gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
# Copyright 2013 OpenStack Foundation
# Copyright 2015 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import socket
from oslo_config import cfg
from oslo_log import log
from manila.common import constants
from manila import exception
from manila.i18n import _
from manila import network
from manila.network.neutron import api as neutron_api
from manila.network.neutron import constants as neutron_constants
from manila import utils
LOG = log.getLogger(__name__)
neutron_network_plugin_opts = [
cfg.StrOpt(
'neutron_physical_net_name',
help="The name of the physical network to determine which net segment "
"is used. This opt is optional and will only be used for "
"networks configured with multiple segments.",
deprecated_group='DEFAULT'),
]
neutron_single_network_plugin_opts = [
cfg.StrOpt(
'neutron_net_id',
help="Default Neutron network that will be used for share server "
"creation. This opt is used only with "
"class 'NeutronSingleNetworkPlugin'.",
deprecated_group='DEFAULT'),
cfg.StrOpt(
'neutron_subnet_id',
help="Default Neutron subnet that will be used for share server "
"creation. Should be assigned to network defined in opt "
"'neutron_net_id'. This opt is used only with "
"class 'NeutronSingleNetworkPlugin'.",
deprecated_group='DEFAULT'),
]
neutron_bind_network_plugin_opts = [
cfg.StrOpt(
'neutron_vnic_type',
help="vNIC type used for binding.",
choices=['baremetal', 'normal', 'direct',
'direct-physical', 'macvtap'],
default='baremetal'),
cfg.StrOpt(
"neutron_host_id",
help="Host ID to be used when creating neutron port. If not set "
"host is set to manila-share host by default.",
default=socket.gethostname()),
]
neutron_binding_profile = [
cfg.ListOpt(
"neutron_binding_profiles",
help="A list of binding profiles to be used during port binding. This "
"option can be used with the NeutronBindNetworkPlugin. The value for "
"this option has to be a comma separated list of names that "
"correspond to each binding profile. Each binding profile needs to be "
"specified as an individual configuration section using the binding "
"profile name as the section name."),
]
neutron_binding_profile_opts = [
cfg.StrOpt(
'neutron_switch_id',
help="Switch ID for binding profile."),
cfg.StrOpt(
'neutron_port_id',
help="Port ID on the given switch.",),
cfg.DictOpt(
'neutron_switch_info',
help="Switch label. For example: 'switch_ip: 10.4.30.5'. Multiple "
"key-value pairs separated by commas are accepted.",),
]
CONF = cfg.CONF
class NeutronNetworkPlugin(network.NetworkBaseAPI):
def __init__(self, *args, **kwargs):
db_driver = kwargs.pop('db_driver', None)
super(NeutronNetworkPlugin, self).__init__(db_driver=db_driver)
self._neutron_api = None
self._neutron_api_args = args
self._neutron_api_kwargs = kwargs
self._label = kwargs.pop('label', 'user')
CONF.register_opts(
neutron_network_plugin_opts,
group=self.neutron_api.config_group_name)
@property
def label(self):
return self._label
@property
@utils.synchronized("instantiate_neutron_api")
def neutron_api(self):
if not self._neutron_api:
self._neutron_api = neutron_api.API(*self._neutron_api_args,
**self._neutron_api_kwargs)
return self._neutron_api
def _store_neutron_net_info(self, context, share_network):
self._save_neutron_network_data(context, share_network)
self._save_neutron_subnet_data(context, share_network)
def allocate_network(self, context, share_server, share_network=None,
**kwargs):
"""Allocate network resources using given network information.
Create neutron ports for a given neutron network and subnet,
create manila db records for allocated neutron ports.
:param context: RequestContext object
:param share_network: share network data
:param kwargs: allocations parameters given by the back-end
driver. Supported params:
'count' - how many allocations should be created
'device_owner' - set owner for network allocations
:rtype: list of :class: 'dict'
"""
if not self._has_provider_network_extension():
msg = "%s extension required" % neutron_constants.PROVIDER_NW_EXT
raise exception.NetworkBadConfigurationException(reason=msg)
self._verify_share_network(share_server['id'], share_network)
self._store_neutron_net_info(context, share_network)
allocation_count = kwargs.get('count', 1)
device_owner = kwargs.get('device_owner', 'share')
ports = []
for __ in range(0, allocation_count):
ports.append(self._create_port(context, share_server,
share_network, device_owner))
return ports
def deallocate_network(self, context, share_server_id):
"""Deallocate neutron network resources for the given share server.
Delete previously allocated neutron ports, delete manila db
records for deleted ports.
:param context: RequestContext object
:param share_server_id: id of share server
:rtype: None
"""
ports = self.db.network_allocations_get_for_share_server(
context, share_server_id)
for port in ports:
self._delete_port(context, port)
def _get_port_create_args(self, share_server, share_network,
device_owner):
return {
"network_id": share_network['neutron_net_id'],
"subnet_id": share_network['neutron_subnet_id'],
"device_owner": 'manila:' + device_owner,
"device_id": share_server.get('id'),
}
def _create_port(self, context, share_server, share_network, device_owner):
create_args = self._get_port_create_args(share_server, share_network,
device_owner)
port = self.neutron_api.create_port(
share_network['project_id'], **create_args)
port_dict = {
'id': port['id'],
'share_server_id': share_server['id'],
'ip_address': port['fixed_ips'][0]['ip_address'],
'gateway': share_network['gateway'],
'mac_address': port['mac_address'],
'status': constants.STATUS_ACTIVE,
'label': self.label,
'network_type': share_network.get('network_type'),
'segmentation_id': share_network.get('segmentation_id'),
'ip_version': share_network['ip_version'],
'cidr': share_network['cidr'],
'mtu': share_network['mtu'],
}
return self.db.network_allocation_create(context, port_dict)
def _delete_port(self, context, port):
try:
self.neutron_api.delete_port(port['id'])
except exception.NetworkException:
self.db.network_allocation_update(
context, port['id'], {'status': constants.STATUS_ERROR})
raise
else:
self.db.network_allocation_delete(context, port['id'])
def _has_provider_network_extension(self):
extensions = self.neutron_api.list_extensions()
return neutron_constants.PROVIDER_NW_EXT in extensions
def _is_neutron_multi_segment(self, share_network, net_info=None):
if net_info is None:
net_info = self.neutron_api.get_network(
share_network['neutron_net_id'])
return 'segments' in net_info
def _save_neutron_network_data(self, context, share_network):
net_info = self.neutron_api.get_network(
share_network['neutron_net_id'])
segmentation_id = None
network_type = None
if self._is_neutron_multi_segment(share_network, net_info):
# we have a multi segment network and need to identify the
# lowest segment used for binding
phy_nets = []
phy = self.neutron_api.configuration.neutron_physical_net_name
if not phy:
msg = "Cannot identify segment used for binding. Please add "
"neutron_physical_net_name in configuration."
raise exception.NetworkBadConfigurationException(reason=msg)
for segment in net_info['segments']:
phy_nets.append(segment['provider:physical_network'])
if segment['provider:physical_network'] == phy:
segmentation_id = segment['provider:segmentation_id']
network_type = segment['provider:network_type']
if not (segmentation_id and network_type):
msg = ("No matching neutron_physical_net_name found for %s "
"(found: %s)." % (phy, phy_nets))
raise exception.NetworkBadConfigurationException(reason=msg)
else:
network_type = net_info['provider:network_type']
segmentation_id = net_info['provider:segmentation_id']
provider_nw_dict = {
'network_type': network_type,
'segmentation_id': segmentation_id,
'mtu': net_info['mtu'],
}
share_network.update(provider_nw_dict)
if self.label != 'admin':
self.db.share_network_update(
context, share_network['id'], provider_nw_dict)
def _save_neutron_subnet_data(self, context, share_network):
subnet_info = self.neutron_api.get_subnet(
share_network['neutron_subnet_id'])
subnet_values = {
'cidr': subnet_info['cidr'],
'gateway': subnet_info['gateway_ip'],
'ip_version': subnet_info['ip_version']
}
share_network.update(subnet_values)
if self.label != 'admin':
self.db.share_network_update(
context, share_network['id'], subnet_values)
class NeutronSingleNetworkPlugin(NeutronNetworkPlugin):
def __init__(self, *args, **kwargs):
super(NeutronSingleNetworkPlugin, self).__init__(*args, **kwargs)
CONF.register_opts(
neutron_single_network_plugin_opts,
group=self.neutron_api.config_group_name)
self.net = self.neutron_api.configuration.neutron_net_id
self.subnet = self.neutron_api.configuration.neutron_subnet_id
self._verify_net_and_subnet()
def allocate_network(self, context, share_server, share_network=None,
**kwargs):
if self.label != 'admin':
share_network = self._update_share_network_net_data(
context, share_network)
else:
share_network = {
'project_id': self.neutron_api.admin_project_id,
'neutron_net_id': self.net,
'neutron_subnet_id': self.subnet,
}
return super(NeutronSingleNetworkPlugin, self).allocate_network(
context, share_server, share_network, **kwargs)
def _verify_net_and_subnet(self):
data = dict(net=self.net, subnet=self.subnet)
if self.net and self.subnet:
net = self.neutron_api.get_network(self.net)
if not (net.get('subnets') and data['subnet'] in net['subnets']):
raise exception.NetworkBadConfigurationException(
"Subnet '%(subnet)s' does not belong to "
"network '%(net)s'." % data)
else:
raise exception.NetworkBadConfigurationException(
"Neutron net and subnet are expected to be both set. "
"Got: net=%(net)s and subnet=%(subnet)s." % data)
def _update_share_network_net_data(self, context, share_network):
upd = dict()
if share_network.get('nova_net_id') is not None:
raise exception.NetworkBadConfigurationException(
"Share network has nova_net_id set.")
if not share_network.get('neutron_net_id') == self.net:
if share_network.get('neutron_net_id') is not None:
raise exception.NetworkBadConfigurationException(
"Using neutron net id different from None or value "
"specified in the config is forbidden for "
"NeutronSingleNetworkPlugin. Allowed values: (%(net)s, "
"None), received value: %(err)s" % {
"net": self.net,
"err": share_network.get('neutron_net_id')})
upd['neutron_net_id'] = self.net
if not share_network.get('neutron_subnet_id') == self.subnet:
if share_network.get('neutron_subnet_id') is not None:
raise exception.NetworkBadConfigurationException(
"Using neutron subnet id different from None or value "
"specified in the config is forbidden for "
"NeutronSingleNetworkPlugin. Allowed values: (%(snet)s, "
"None), received value: %(err)s" % {
"snet": self.subnet,
"err": share_network.get('neutron_subnet_id')})
upd['neutron_subnet_id'] = self.subnet
if upd:
share_network = self.db.share_network_update(
context, share_network['id'], upd)
return share_network
class NeutronBindNetworkPlugin(NeutronNetworkPlugin):
def __init__(self, *args, **kwargs):
super(NeutronBindNetworkPlugin, self).__init__(*args, **kwargs)
self.binding_profiles = []
CONF.register_opts(
neutron_binding_profile,
group=self.neutron_api.config_group_name)
conf = CONF[self.neutron_api.config_group_name]
if conf.neutron_binding_profiles:
for profile in conf.neutron_binding_profiles:
CONF.register_opts(neutron_binding_profile_opts, group=profile)
self.binding_profiles.append(profile)
CONF.register_opts(
neutron_bind_network_plugin_opts,
group=self.neutron_api.config_group_name)
self.config = self.neutron_api.configuration
def update_network_allocation(self, context, share_server):
if self.config.neutron_vnic_type == 'normal':
ports = self.db.network_allocations_get_for_share_server(
context,
share_server['id'])
self._wait_for_ports_bind(ports, share_server)
return ports
@utils.retry(exception.NetworkBindException, retries=20)
def _wait_for_ports_bind(self, ports, share_server):
inactive_ports = []
for port in ports:
port = self._neutron_api.show_port(port['id'])
if (port['status'] == neutron_constants.PORT_STATUS_ERROR or
('binding:vif_type' in port and
port['binding:vif_type'] ==
neutron_constants.VIF_TYPE_BINDING_FAILED)):
msg = _("Port binding %s failed.") % port['id']
raise exception.NetworkException(msg)
elif port['status'] != neutron_constants.PORT_STATUS_ACTIVE:
LOG.debug("The port %(id)s is in state %(state)s. "
"Wait for active state.", {
"id": port['id'],
"state": port['status']})
inactive_ports.append(port['id'])
if len(inactive_ports) == 0:
return
msg = _("Ports are not fully bound for share server "
"'%(s_id)s' (inactive ports: %(ports)s)") % {
"s_id": share_server['id'],
"ports": inactive_ports}
raise exception.NetworkBindException(msg)
def _get_port_create_args(self, share_server, share_network,
device_owner):
arguments = super(
NeutronBindNetworkPlugin, self)._get_port_create_args(
share_network, share_network, device_owner)
arguments['host_id'] = self.config.neutron_host_id
arguments['binding:vnic_type'] = self.config.neutron_vnic_type
if self.binding_profiles:
local_links = []
for profile in self.binding_profiles:
local_links.append({
'switch_id': CONF[profile]['neutron_switch_id'],
'port_id': CONF[profile]['neutron_port_id'],
'switch_info': CONF[profile]['neutron_switch_info'],
})
arguments['binding:profile'] = {
"local_link_information": local_links}
return arguments
def _store_neutron_net_info(self, context, share_network):
"""Store the Neutron network info.
In case of dynamic multi segments the segment is determined while
binding the port. Therefore this method will return for multi segments
network without storing network information.
Instead, multi segments network will wait until ports are bound and
then store network information (see allocate_network()).
"""
if self._is_neutron_multi_segment(share_network):
# In case of dynamic multi segment the segment is determined while
# binding the port
return
super(NeutronBindNetworkPlugin, self)._store_neutron_net_info(
context, share_network)
def allocate_network(self, context, share_server, share_network=None,
**kwargs):
ports = super(NeutronBindNetworkPlugin, self).allocate_network(
context, share_server, share_network, **kwargs)
# If vnic type is 'normal' we expect a neutron agent to bind the
# ports. This action requires a vnic to be spawned by the driver.
# Therefore we do not wait for the port binding here, but
# return the unbound ports and expect the share manager to call
# update_network_allocation after the share server was created, in
# order to update the ports with the correct binding.
if self.config.neutron_vnic_type != 'normal':
self._wait_for_ports_bind(ports, share_server)
if self._is_neutron_multi_segment(share_network):
# update segment information after port bind
super(NeutronBindNetworkPlugin, self)._store_neutron_net_info(
context, share_network)
for num, port in enumerate(ports):
port_info = {
'network_type': share_network['network_type'],
'segmentation_id': share_network['segmentation_id'],
'cidr': share_network['cidr'],
'ip_version': share_network['ip_version'],
}
ports[num] = self.db.network_allocation_update(
context, port['id'], port_info)
return ports
class NeutronBindSingleNetworkPlugin(NeutronSingleNetworkPlugin,
NeutronBindNetworkPlugin):
pass
|
|
import itertools
import random
import string
import re
import sys
import time
import math
# constants and dictionary
yv = 'aeo'
yc = 'rst'
bv = 'i'
bc = 'bcdmp'
gv = 'u'
gc = 'gn'
pi = 'fwvhy'
og = 'lk'
pu = 'jx'
red = 'qz'
grid_letters = (red*3)+(pu*3)+(og*4)+(pi*6)+(gc*12)+(gv*9)+(bc*11)+(bv*8)+(yc*20)+(yv*34)
grid_height = 9
grid_width = 13
# returns a randomized grid, similar to word soup
def generate_grid():
grid = []
for _ in itertools.repeat(None, grid_height):
grid_row = ''.join(random.choice(grid_letters) for _ in range(grid_width))
grid.append(grid_row)
return grid
grid = generate_grid()
alphabet = ''.join(set(''.join(grid)))
bogglable = re.compile('[' + alphabet + ']{3,}$', re.I).match
words = set(word.rstrip('\n') for word in open("/usr/share/dict/words") if bogglable(word))
prefixes = set(word[:i] for word in words
for i in range(2, len(word)+1))
score = {"a": 1, "c": 2, "b": 2, "e": 1, "d": 2, "g": 3, "f": 4, "i": 2, "h": 4, "k": 5, "j": 8, "m": 2, "l": 5, "o": 1, "n": 3, "q": 10, "p": 2, "s": 1, "r": 1, "u": 3, "t": 1, "w": 4, "v": 4, "y": 4, "x": 8, "z": 10}
# returns the best option with score and grid rating taken into account
# ERROR- SOMETIMES RETURNS NOTHING WHEN WORDS ARE AVAILABLE
# POSSIBLY DUE TO LESS THAN 40 OPTIONS BEING AVAILABLE- EXAMINE
def examine_options(griddle, stage):
topscore = 0
score = 0
choice = ()
if stage == 8:
return sorted(solve(), key=lambda (word, path): score_word(word))
if stage == 9:
return sorted(solve2(griddle), key=lambda (word, path): score_word(word))
if stage == 0:
return max(solve(), key=lambda (word, path): score_word(word))
options = sorted(solve(), key=lambda (word, path): score_word(word))
print 'options = ' + str(len(options))
if len(options) > 50:
options = options[:-50]
for opt in options:
newgrid = move_letters(grid, opt[0])
score = evaluate_grid0(newgrid, stage) + (score_word(opt[0]))
if score >= topscore:
topscore = score
choice = opt
print 'score = ' + str(score)
if choice == ():
print options
print 'LEN = ' + str(len(options))
return choice
qumix1 = 50
qnum1 = 100
cmix2 = 2
zmix2 = 8
qumix2 = 25
ratrat2 = 350
sqrat2 = 0.1
znum2 = 12
qunum2 = 50
vratio2 = 0.7
cmix3 = 1
zmix3 = 4
qumix3 = 15
ratrat3 = 450
sqrat3 = 5
znum3 = 12
qunum3 = 50
vratio3 = 0.7
def evaluate_grid0(grid, stage):
rating = 0
mixrating = 1
ratiorating = 1
sqrating = 1
#print letter_match(grid)[0] * 4
#print letter_match(grid)[1] * 20
#print letter_match(grid)[2] * 50
#print vowel_ratio(grid)
if stage == 1:
rating += letter_match(grid)[2] * qumix1
rating += letter_match(grid)[4] * qnum1
return 100000 + rating
if stage == 2:
mixrating += letter_match(grid)[0] * cmix2
mixrating += letter_match(grid)[1] * zmix2
mixrating += letter_match(grid)[2] * qumix2
mixrating += letter_match(grid)[3] * znum2
mixrating += letter_match(grid)[4] * qunum2
ratiorating = abs(vowel_ratio(grid) - vratio2) * ratrat2
sqrating = unsquareness(grid) * sqrat2
if stage == 3:
mixrating += letter_match(grid)[0] * cmix3
mixrating += letter_match(grid)[1] * zmix3
mixrating += letter_match(grid)[2] * qumix3
mixrating += letter_match(grid)[3] * znum3
mixrating += letter_match(grid)[4] * qunum3
ratiorating = abs(vowel_ratio(grid) - vratio3) * ratrat3
sqrating = unsquareness(grid) * sqrat3
#print 'mixrating = ' + str(mixrating)
#print 'ratiorating = ' + str(ratiorating)
#print 'sqrating = ' + str(sqrating)
rating = 1000000 + mixrating - ratiorating - sqrating
return rating
def evaluate_grid_display(grid, stage):
rating = 0
mixrating = 1
ratiorating = 1
sqrating = 1
#print letter_match(grid)[0] * 4
#print letter_match(grid)[1] * 20
#print letter_match(grid)[2] * 50
#print vowel_ratio(grid)
if stage == 1:
rating += letter_match(grid)[2] * qumix1
rating += letter_match(grid)[4] * qnum1
return 100000 + rating
if stage == 2:
mixrating += letter_match(grid)[0] * cmix2
mixrating += letter_match(grid)[1] * zmix2
mixrating += letter_match(grid)[2] * qumix2
mixrating += letter_match(grid)[3] * znum2
mixrating += letter_match(grid)[4] * qunum2
ratiorating = abs(vowel_ratio(grid) - vratio2) * ratrat2
sqrating = unsquareness(grid) * sqrat2
print '----mixrating--cmix-zmix-qmix-znum-qnum'
print 'mixrating = ' + str(mixrating) + "::" + str([letter_match(grid)[0]*cmix2]+[letter_match(grid)[1]*zmix2]+[letter_match(grid)[2]*qumix2]+[letter_match(grid)[3]*znum2]+[letter_match(grid)[4]*qunum2])
if stage == 3:
mixrating += letter_match(grid)[0] * cmix3
mixrating += letter_match(grid)[1] * zmix3
mixrating += letter_match(grid)[2] * qumix3
mixrating += letter_match(grid)[3] * znum3
mixrating += letter_match(grid)[4] * qunum3
ratiorating = abs(vowel_ratio(grid) - vratio3) * ratrat3
sqrating = unsquareness(grid) * sqrat3
print '----mixrating--cmix-zmix-qmix-znum-qnum'
print 'mixrating = ' + str(mixrating) + "::" + str([letter_match(grid)[0]*cmix3]+[letter_match(grid)[1]*zmix3]+[letter_match(grid)[2]*qumix3]+[letter_match(grid)[3]*znum3]+[letter_match(grid)[4]*qunum3])
#print 'mixrating = ' + str(mixrating)
#print 'ratiorating = ' + str(ratiorating)
#print 'sqrating = ' + str(sqrating)
rating = 1000000 + mixrating - ratiorating - sqrating
print 'ratiorating = ' + str(ratiorating)
print 'sqrating = ' + str(sqrating)
print 'rating = ' + str(rating)
#def evaluate_grid(grid, stage):
# match_ratings = letter_match(grid)
# vcount, ccount = vowel_ratio(grid)[0], vowel_ratio(grid)[1]
# width, height, scount = squareness(grid)[0], squareness(grid)[1], squareness(grid)[2]
# #print width
# #print height
# #print scount
# #print vcount
# #print ccount
# rating = 0
# if stage == 1:
# rating += match_ratings[0]
# rating += match_ratings[1] * 3
# if match_ratings[2] > 0:
# rating += 50
# rmix = (float(vcount)/float(ccount)) - .4
# rating += -(rmix * rmix * 1500)
# rating += (grid_width - (grid_width - width)) * 30
# rating += (grid_height - (grid_height - width)) * 60
# if stage == 2:
# rating += match_ratings[0] * 2
# rating += match_ratings[1] * 5
# if match_ratings[2] > 0:
# rating += 50
# rmix = (float(vcount)/float(ccount)) - .4
# rating += - (rmix * rmix * 2000)
# rating -= height * 15
# rating -= width * 60
# return rating
#
## returns the rating of the grid
#def evaluate_grid1(grid, stage):
# match_ratings = letter_match(grid)
# vcount, ccount = vowel_ratio(grid)[0], vowel_ratio(grid)[1]
# #shape = widths(grid)
# unsq = unsquareness(grid)
# rating = 0
# if stage == 1:
# rating += match_ratings[0]
# rating += match_ratings[1] * 3
# if match_ratings[2] > 0:
# rating += 50
# rmix = (float(vcount)/float(ccount)) - .4
# rating += -(rmix * rmix * 1500)
# rating -= unsq * 10
# #rating += (grid_width - (grid_width - width)) * 30
# #rating += (grid_height - (grid_height - width)) * 60
# if stage == 2:
# rating += match_ratings[0] * 2
# rating += match_ratings[1] * 5
# if match_ratings[2] > 0:
# rating += 50
# rmix = (float(vcount)/float(ccount)) - .4
# rating += - (rmix * rmix * 2000)
# rating -= unsq * 10
# #rating -= height * 15
# #rating -= width * 60
# return rating
# returns the rating of how well vowels and constanants match up
def letter_match(grid):
cmixing = 0
zmixing = 0
qrating = 0
znum = 0
qnum = 0
for y, row in enumerate(grid):
for x, letter in enumerate(row):
coord = [x]+[y]
if letter == 'b' or letter == 'c' or letter == 'd' or letter == 'f' or letter == 'g' or letter == 'h' or letter == 'm' or letter == 'l' or letter == 'm' or letter == 'n' or letter == 'p' or letter == 'r' or letter == 't' or letter == 'w':
for (nx, ny) in neighbors(coord):
if grid[ny][nx] == 'a' or grid[ny][nx] == 'e' or grid[ny][nx] == 'i' or grid[ny][nx] =='o' or grid[ny][nx] == 'u':
cmixing += 1
elif letter == 'z' or letter == 'j' or letter == 'k' or letter == 'v' or letter == 'x':
znum += 1
for (nx, ny) in neighbors(coord):
if grid[ny][nx] == 'a' or grid[ny][nx] == 'e' or grid[ny][nx] == 'i' or grid[ny][nx] =='o' or grid[ny][nx] == 'u':
zmixing += 1
elif letter == 'q':
qnum += 1
for (nx, ny) in neighbors(coord):
if grid[ny][nx] == 'u':
qrating += 1
rating = [cmixing] + [zmixing] + [qrating] + [znum] + [qnum]
return rating
# return number of vowels, consanants, and stars
def vowel_ratio(grid):
vcount = 0
ccount = 0
for y, row in enumerate(grid):
for x, letter in enumerate(row):
if letter == 'a' or letter == 'e' or letter == 'i' or letter =='o' or letter == 'u':
vcount += 1
else:
ccount +=1
#print vcount
#print ccount
return float(vcount)/(ccount+vcount)
# returns how the sums of the squares of each row's deviation from the average (only when larger)
def unsquareness(grid):
ws = widths(grid)
level = math.sqrt(float(sum(ws)))
deviation = 0
for row in ws:
deviation += (abs(level-row))**3
return deviation/len(ws)
#avg = sum(ws)/len(ws)
#for row in ws:
# if row >= avg:
# deviation += (avg-row)**2
#return deviation + 1
def squareness(grid):
width = grid_width - grid[0].count('*')
height = grid_height
scount = 0
for y, row in enumerate(grid):
scount += row.count('*')
if row.count('*') != grid_width:
height = y + 1
result = [width] + [height] + [scount]
return result
# returns the width of each row
def widths(grid):
widths = []
for row in grid:
if row.count('*') != grid_width:
widths.append(grid_width - row.count('*'))
return widths
#def cvmatch
# all stages:
# 1. match up Qs and Us
# 2. match up ZXJKV with vowels
# later stages:
# 1. make the grid square
# 2. regulate vowel frequency
# 3. towards the end, start looking for clearances
#print grid
#grid = "fxie amlo ewbx astu".split()
#nrows, ncols = len(grid), len(grid[0])
# A dictionary word that could be a solution must use only the grid's
# letters and have length >= 3. (With a case-insensitive match.)
def triplemove(grid):
scorelist = []
score1 = 0
score2 = 0
options1 = sorted(solve(), key=lambda (word, path): score_word(word))[-5:]
for opt1 in options1:
grid = move_letters(grid, opt1[1])
options2 = sorted(solve(), key=lambda (word, path): score_word(word))[-5:]
for opt2 in options2:
grid = move_letters(grid, opt2[1])
topword = max(solve(), key=lambda (word, path): score_word(word))
total_score = score_word(topword[0]) + score_word(opt2[0]) + score_word(opt1[0])
print total_score
scorelist.append(total_score)
return max(scorelist)
#def clear_grid(grid):
# options1 = (solve(), key=lambda (word, path): score_word(word))
# for opt1 in options1:
# grid = move_letters(grid, opt1[1])
# options2 = (solve(), key=lambda (word, path): score_word(word))[-5:]
# for opt2 in options2:
# grid = move_letters(grid, opt2[1])
# topword = (solve(), key=lambda (word, path): score_word(word))
# returns word score according to word soup rules
def score_word(word):
return (sum([score[c] for c in word]) * len(word))
# finds all words in grid
def solve():
for y, row in enumerate(grid):
for x, letter in enumerate(row):
if letter != '*':
for result in extending(letter, ((x, y),)):
yield result
# yields possible next steps in path to make words
def extending(prefix, path):
if prefix in words:
yield (prefix, path)
for (nx, ny) in neighbors(path[-1]):
if (nx, ny) not in path:
prefix1 = prefix + grid[ny][nx]
if prefix1 in prefixes:
for result in extending(prefix1, path + ((nx, ny),)):
yield result
# yields letter's neighbours in grid
def neighbors((x, y)):
for nx in range(max(0, x-1), min(x+2, len(grid[0]))):
for ny in range(max(0, y-1), min(y+2, len(grid))):
yield (nx, ny)
def solve2(grid):
for y, row in enumerate(grid):
for x, letter in enumerate(row):
if letter != '*':
for result in extending2(letter, ((x, y),), grid):
yield result
def extending2(prefix, path, grid):
if prefix in words:
yield (prefix, path)
for (nx, ny) in neighbors2(path[-1], grid):
if (nx, ny) not in path:
prefix1 = prefix + grid[ny][nx]
if prefix1 in prefixes:
for result in extending2(prefix1, path + ((nx, ny),), grid):
yield result
# yields letter's neighbours in grid
def neighbors2((x, y,), grid):
for nx in range(max(0, x-1), min(x+2, len(grid[0]))):
for ny in range(max(0, y-1), min(y+2, len(grid))):
yield (nx, ny)
# prints topword, path, and score
def display_word(topword):
print topword
if len(topword) > 0:
print "word score = " + str(score_word(topword[0]))
# rearranges grid after words are found
def move_letters(grid, empty_points):
newgrid = [list(row) for row in grid]
empty_points = [list(pair) for pair in empty_points]
for y, row in enumerate(newgrid):
for x, letter in enumerate(row): #look at all squares
for point in empty_points:
if x == point[0] and y == point[1]: #if empty matches square location
#print 'check 1'
#print empty_points
location = [x, y]
#print location
while location in empty_points:
#print 'check 2'
for j, r in enumerate(newgrid):
for i, l in enumerate(r): #look at all squares
if j >= y and x == i: # and see which are above it
if j == grid_height-1:
newgrid[j][i] = '*'
#print 'star'
else:
newgrid[j][i] = newgrid[j+1][i] #copy upper neighbour onto lower in all of column
#print newgrid[j][i] + ' to ' + newgrid[j+1][i] + ' at ' + str(x) +', ' + str(y)
for p in empty_points:
if p[0] == x:
p[1] -= 1
#print 'empty lowered'
newgrid2 = []
for row in newgrid:
stars = row.count('*')
row[:] = (value for value in row if value != "*")
if stars == 0:
newgrid2.append(row)
else:
for _ in itertools.repeat(None, stars):
row = ['*'] + row
newgrid2.append(row)
resultgrid = []
for row in newgrid2:
resultgrid.append(''.join(row))
return resultgrid
def find_squares(original_grid):
start = time.time()
routes = []
squares = []
grid = original_grid
options1 = examine_options(original_grid, 8)[:-20:-2]
counter = [0,0,0,0]
for opt1 in options1:
counter[0] +=1
moves = [(),(),()]
moves[0] = opt1
grid2 = move_letters(original_grid, opt1[1])
grid = grid2
options2 = examine_options(grid2, 8)[:-80:-1]
for opt2 in options2:
counter[1] +=1
moves[1] = opt2
grid3 = move_letters(grid2, opt2[1])
wid = widths(grid3)
if 2 < len(wid) < 5 and 2 < wid[0] <= wid[1] + 1 <= wid[2] + 2 < 7 and moves not in routes:
routes.append(moves)
squares.append(grid3)
print '----------'
for row in reversed(grid3):
print row
print '----------'
print grid3
grid = grid3
options3 = examine_options(grid3, 8)[::-1]
for opt3 in options3:
counter[2] +=1
moves[2] = opt3
grid4 = move_letters(grid3, opt3[1])
grid = grid4
wid = widths(grid4)
if 2 < len(wid) < 5 and 2 < wid[0] <= wid[1] + 1 <= wid[2] + 2 < 7 and moves not in routes:
routes.append(moves)
squares.append(grid4)
print '----------'
for row in reversed(grid4):
print row
print '----------'
print grid4
if time.time() - start > 30:
print 'TIMEOUT'
print len(routes)
print counter
return [routes] + [squares]
return [routes] + [squares]
def finish(original_grid):
timeout = 10
finish_start = time.time()
complete = ['*'*grid_width]*grid_height
result = [(),(),(),()]
grid = original_grid
options1 = examine_options(original_grid, 9)[::-7]
timer = time.time()
for opt1 in options1:
result[0] = opt1
grid1 = move_letters(original_grid, opt1[1])
grid = grid1
if grid1 == complete:
return result
options2 = examine_options(grid1, 9)[::-3]
for opt2 in options2:
result[1] = opt2
grid2 = move_letters(grid1, opt2[1])
grid = grid2
if grid2 == complete:
print '----------'
for row in reversed(original_grid):
print row
print '----------'
for row in reversed(grid1):
print row
print '----------'
for row in reversed(grid2):
print row
print time.time() - timer
return result
return result
if time.time() - finish_start > timeout:
print 'TIMEOUT2'
return None
options3 = examine_options(grid2, 9)
for opt3 in options3:
result[2] = opt3
grid3 = move_letters(grid2, opt3[1])
if grid3 == complete:
print '----------'
for row in reversed(original_grid):
print row
print '----------'
for row in reversed(grid1):
print row
print '----------'
for row in reversed(grid2):
print row
print '----------'
for row in reversed(grid3):
print row
print time.time() - timer
return result
options4 = reversed(examine_options(grid3, 9))
for opt4 in options4:
result[3] = opt4
grid4 = move_letters(grid3, opt4[1])
if grid4 == complete:
print '----------'
for row in reversed(original_grid):
print row
print '----------'
for row in reversed(grid1):
print row
print '----------'
for row in reversed(grid2):
print row
print '----------'
for row in reversed(grid3):
print row
print '----------'
for row in reversed(grid4):
print row
return result
if time.time() - finish_start > timeout:
print result
print grid
print 'TIMEOUT4'
if grid == complete:
return result
else:
print 'NO SOLUTION'
timeout = 0
letter_count = grid_height*grid_width
total_score = 0
print alphabet
for row in reversed(grid):
print row
while letter_count > 90 and timeout < 30:
print '+++STAGE 1'
topword = examine_options(grid, 1)
display_word(topword)
letter_count -= len(topword[0])
grid = move_letters(grid, topword[1])
total_score += score_word(topword[0])
print 'letter count = ' + str(letter_count)
evaluate_grid_display(grid, 1)
timeout += 1
for row in reversed(grid):
print row
print grid
s1grid = grid
while letter_count > 55 and timeout < 50:
print '+++STAGE 2'
topword = examine_options(grid, 2)
display_word(topword)
if len(topword) > 0:
letter_count -= len(topword[0])
print 'letter count = ' + str(letter_count)
if topword != ():
grid = move_letters(grid, topword[1])
total_score += score_word(topword[0])
else:
print 'complete'
timeout = 50
print 'total score = ' + str(total_score)
evaluate_grid_display(grid, 2)
timeout += 1
for row in reversed(grid):
print row
print grid
s2grid = grid
while letter_count > 18 and timeout < 50:
print '+++STAGE 3'
topword = examine_options(grid, 3)
display_word(topword)
if len(topword) > 0:
letter_count -= len(topword[0])
print 'letter count = ' + str(letter_count)
if topword != ():
grid = move_letters(grid, topword[1])
total_score += score_word(topword[0])
else:
print 'complete'
timeout = 50
print 'total score = ' + str(total_score)
evaluate_grid_display(grid, 3)
timeout += 1
for row in reversed(grid):
print row
print grid
s3grid = grid
print "+++FINISHING"
print grid
finish(grid)
#print find_squares(grid)
print "+++DONE"
# print 'l = ' + str(letter_count)
for row in reversed(grid):
print row
print grid
print 'total score = ' + str(total_score)
print '++++++++++++++++++++'
for row in reversed(s1grid):
print row
print '++++++++++++++++++++'
for row in reversed(s2grid):
print row
print '++++++++++++++++++++'
for row in reversed(s3grid):
print row
print '++++++++++++++++++++'
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Downloads and converts Flowers data to TFRecords of TF-Example protos.
This module downloads the Flowers data, uncompresses it, reads the files
that make up the Flowers data and creates two TFRecord datasets: one for train
and one for test. Each TFRecord dataset is comprised of a set of TF-Example
protocol buffers, each of which contain a single image and label.
The script should take about a minute to run.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import random
import sys
import tensorflow as tf
from datasets import dataset_utils
# The URL where the Flowers data can be downloaded.
_DATA_URL = 'http://download.tensorflow.org/example_images/flower_photos.tgz'
# The number of images in the validation set.
_NUM_VALIDATION = 2000
# Seed for repeatability.
_RANDOM_SEED = 0
# The number of shards per dataset split.
_NUM_SHARDS = 5
class ImageReader(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def read_image_dims(self, sess, image_data):
image = self.decode_jpeg(sess, image_data)
return image.shape[0], image.shape[1]
def decode_jpeg(self, sess, image_data):
image = sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _get_filenames_and_classes(dataset_dir):
"""Returns a list of filenames and inferred class names.
Args:
dataset_dir: A directory containing a set of subdirectories representing
class names. Each subdirectory should contain PNG or JPG encoded images.
Returns:
A list of image file paths, relative to `dataset_dir` and the list of
subdirectories, representing class names.
"""
celeb_root = os.path.join(dataset_dir, 'celeb_photos')
directories = []
class_names = []
for filename in os.listdir(celeb_root):
path = os.path.join(celeb_root, filename)
if os.path.isdir(path):
directories.append(path)
class_names.append(filename)
photo_filenames = []
for directory in directories:
for filename in os.listdir(directory):
path = os.path.join(directory, filename)
photo_filenames.append(path)
return photo_filenames, sorted(class_names)
def _get_dataset_filename(dataset_dir, split_name, shard_id):
output_filename = 'celebs_%s_%05d-of-%05d.tfrecord' % (
split_name, shard_id, _NUM_SHARDS)
return os.path.join(dataset_dir, output_filename)
def _convert_dataset(split_name, filenames, class_names_to_ids, dataset_dir):
"""Converts the given filenames to a TFRecord dataset.
Args:
split_name: The name of the dataset, either 'train' or 'validation'.
filenames: A list of absolute paths to png or jpg images.
class_names_to_ids: A dictionary from class names (strings) to ids
(integers).
dataset_dir: The directory where the converted datasets are stored.
"""
assert split_name in ['train', 'validation']
num_per_shard = int(math.ceil(len(filenames) / float(_NUM_SHARDS)))
with tf.Graph().as_default():
image_reader = ImageReader()
with tf.Session('') as sess:
for shard_id in range(_NUM_SHARDS):
output_filename = _get_dataset_filename(
dataset_dir, split_name, shard_id)
with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer:
start_ndx = shard_id * num_per_shard
end_ndx = min((shard_id+1) * num_per_shard, len(filenames))
for i in range(start_ndx, end_ndx):
sys.stdout.write('\r>> Converting image %d/%d shard %d' % (
i+1, len(filenames), shard_id))
sys.stdout.flush()
# Read the filename:
image_data = tf.gfile.FastGFile(filenames[i], 'r').read()
height, width = image_reader.read_image_dims(sess, image_data)
class_name = os.path.basename(os.path.dirname(filenames[i]))
class_id = class_names_to_ids[class_name]
example = dataset_utils.image_to_tfexample(
image_data, 'jpg', height, width, class_id, filenames[i])
tfrecord_writer.write(example.SerializeToString())
sys.stdout.write('\n')
sys.stdout.flush()
def _clean_up_temporary_files(dataset_dir):
"""Removes temporary files used to create the dataset.
Args:
dataset_dir: The directory where the temporary files are stored.
"""
filename = _DATA_URL.split('/')[-1]
filepath = os.path.join(dataset_dir, filename)
tf.gfile.Remove(filepath)
tmp_dir = os.path.join(dataset_dir, 'celeb_photos')
tf.gfile.DeleteRecursively(tmp_dir)
def _dataset_exists(dataset_dir):
for split_name in ['train', 'validation']:
for shard_id in range(_NUM_SHARDS):
output_filename = _get_dataset_filename(
dataset_dir, split_name, shard_id)
if not tf.gfile.Exists(output_filename):
return False
return True
def run(dataset_dir):
"""Runs the download and conversion operation.
Args:
dataset_dir: The dataset directory where the dataset is stored.
"""
if not tf.gfile.Exists(dataset_dir):
tf.gfile.MakeDirs(dataset_dir)
if _dataset_exists(dataset_dir):
print('Dataset files already exist. Exiting without re-creating them.')
return
#dataset_utils.download_and_uncompress_tarball(_DATA_URL, dataset_dir)
photo_filenames, class_names = _get_filenames_and_classes(dataset_dir)
class_names_to_ids = dict(zip(class_names, range(len(class_names))))
# Divide into train and test:
random.seed(_RANDOM_SEED)
random.shuffle(photo_filenames)
training_filenames = photo_filenames[_NUM_VALIDATION:]
validation_filenames = photo_filenames[:_NUM_VALIDATION]
# First, convert the training and validation sets.
_convert_dataset('train', training_filenames, class_names_to_ids,
dataset_dir)
_convert_dataset('validation', validation_filenames, class_names_to_ids,
dataset_dir)
# Finally, write the labels file:
labels_to_class_names = dict(zip(range(len(class_names)), class_names))
dataset_utils.write_label_file(labels_to_class_names, dataset_dir)
#_clean_up_temporary_files(dataset_dir)
print('\nFinished converting the celeb dataset!')
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Ansible, Inc.
# All Rights Reserved.
from __future__ import unicode_literals
import awx.main.fields
from django.db import migrations, models
import django.db.models.deletion
from django.conf import settings
from django.utils.timezone import now
import jsonfield.fields
import taggit.managers
def create_system_job_templates(apps, schema_editor):
'''
Create default system job templates if not present. Create default schedules
only if new system job templates were created (i.e. new database).
'''
SystemJobTemplate = apps.get_model('main', 'SystemJobTemplate')
Schedule = apps.get_model('main', 'Schedule')
ContentType = apps.get_model('contenttypes', 'ContentType')
sjt_ct = ContentType.objects.get_for_model(SystemJobTemplate)
now_dt = now()
now_str = now_dt.strftime('%Y%m%dT%H%M%SZ')
sjt, created = SystemJobTemplate.objects.get_or_create(
job_type='cleanup_jobs',
defaults=dict(
name='Cleanup Job Details',
description='Remove job history',
created=now_dt,
modified=now_dt,
polymorphic_ctype=sjt_ct,
),
)
if created:
sched = Schedule(
name='Cleanup Job Schedule',
rrule='DTSTART:%s RRULE:FREQ=WEEKLY;INTERVAL=1;BYDAY=SU' % now_str,
description='Automatically Generated Schedule',
enabled=True,
extra_data={'days': '120'},
created=now_dt,
modified=now_dt,
)
sched.unified_job_template = sjt
sched.save()
existing_cd_jobs = SystemJobTemplate.objects.filter(job_type='cleanup_deleted')
Schedule.objects.filter(unified_job_template__in=existing_cd_jobs).delete()
existing_cd_jobs.delete()
sjt, created = SystemJobTemplate.objects.get_or_create(
job_type='cleanup_activitystream',
defaults=dict(
name='Cleanup Activity Stream',
description='Remove activity stream history',
created=now_dt,
modified=now_dt,
polymorphic_ctype=sjt_ct,
),
)
if created:
sched = Schedule(
name='Cleanup Activity Schedule',
rrule='DTSTART:%s RRULE:FREQ=WEEKLY;INTERVAL=1;BYDAY=TU' % now_str,
description='Automatically Generated Schedule',
enabled=True,
extra_data={'days': '355'},
created=now_dt,
modified=now_dt,
)
sched.unified_job_template = sjt
sched.save()
sjt, created = SystemJobTemplate.objects.get_or_create(
job_type='cleanup_facts',
defaults=dict(
name='Cleanup Fact Details',
description='Remove system tracking history',
created=now_dt,
modified=now_dt,
polymorphic_ctype=sjt_ct,
),
)
if created:
sched = Schedule(
name='Cleanup Fact Schedule',
rrule='DTSTART:%s RRULE:FREQ=MONTHLY;INTERVAL=1;BYMONTHDAY=1' % now_str,
description='Automatically Generated Schedule',
enabled=True,
extra_data={'older_than': '120d', 'granularity': '1w'},
created=now_dt,
modified=now_dt,
)
sched.unified_job_template = sjt
sched.save()
class Migration(migrations.Migration):
replaces = [('main', '0002_v300_tower_settings_changes'),
('main', '0003_v300_notification_changes'),
('main', '0004_v300_fact_changes'),
('main', '0005_v300_migrate_facts'),
('main', '0006_v300_active_flag_cleanup'),
('main', '0007_v300_active_flag_removal'),
('main', '0008_v300_rbac_changes'),
('main', '0009_v300_rbac_migrations'),
('main', '0010_v300_create_system_job_templates'),
('main', '0011_v300_credential_domain_field'),
('main', '0012_v300_create_labels'),
('main', '0013_v300_label_changes'),
('main', '0014_v300_invsource_cred'),
('main', '0015_v300_label_changes'),
('main', '0016_v300_prompting_changes'),
('main', '0017_v300_prompting_migrations'),
('main', '0018_v300_host_ordering'),
('main', '0019_v300_new_azure_credential'),]
dependencies = [
('taggit', '0002_auto_20150616_2121'),
('contenttypes', '0002_remove_content_type_name'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('main', '0001_initial'),
]
operations = [
# Tower settings changes
migrations.CreateModel(
name='TowerSettings',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(default=None, editable=False)),
('modified', models.DateTimeField(default=None, editable=False)),
('key', models.CharField(unique=True, max_length=255)),
('description', models.TextField()),
('category', models.CharField(max_length=128)),
('value', models.TextField(blank=True)),
('value_type', models.CharField(max_length=12, choices=[('string', 'String'), ('int', 'Integer'), ('float', 'Decimal'), ('json', 'JSON'), ('bool', 'Boolean'), ('password', 'Password'), ('list', 'List')])),
('user', models.ForeignKey(related_name='settings', default=None, editable=False, to=settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, null=True)),
],
),
# Notification changes
migrations.CreateModel(
name='Notification',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(default=None, editable=False)),
('modified', models.DateTimeField(default=None, editable=False)),
('status', models.CharField(default='pending', max_length=20, editable=False, choices=[('pending', 'Pending'), ('successful', 'Successful'), ('failed', 'Failed')])),
('error', models.TextField(default='', editable=False, blank=True)),
('notifications_sent', models.IntegerField(default=0, editable=False)),
('notification_type', models.CharField(max_length=32, choices=[('email', 'Email'), ('slack', 'Slack'), ('twilio', 'Twilio'), ('pagerduty', 'Pagerduty'), ('hipchat', 'HipChat'), ('webhook', 'Webhook'), ('mattermost', 'Mattermost'), ('rocketchat', 'Rocket.Chat'), ('irc', 'IRC')])),
('recipients', models.TextField(default='', editable=False, blank=True)),
('subject', models.TextField(default='', editable=False, blank=True)),
('body', jsonfield.fields.JSONField(default=dict, blank=True)),
],
options={
'ordering': ('pk',),
},
),
migrations.CreateModel(
name='NotificationTemplate',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(default=None, editable=False)),
('modified', models.DateTimeField(default=None, editable=False)),
('description', models.TextField(default='', blank=True)),
('name', models.CharField(unique=True, max_length=512)),
('notification_type', models.CharField(max_length=32, choices=[('email', 'Email'), ('slack', 'Slack'), ('twilio', 'Twilio'), ('pagerduty', 'Pagerduty'), ('hipchat', 'HipChat'), ('webhook', 'Webhook'), ('mattermost', 'Mattermost'), ('rocketchat', 'Rocket.Chat'), ('irc', 'IRC')])),
('notification_configuration', jsonfield.fields.JSONField(default=dict)),
('created_by', models.ForeignKey(related_name="{u'class': 'notificationtemplate', u'app_label': 'main'}(class)s_created+", on_delete=django.db.models.deletion.SET_NULL, default=None, editable=False, to=settings.AUTH_USER_MODEL, null=True)),
('modified_by', models.ForeignKey(related_name="{u'class': 'notificationtemplate', u'app_label': 'main'}(class)s_modified+", on_delete=django.db.models.deletion.SET_NULL, default=None, editable=False, to=settings.AUTH_USER_MODEL, null=True)),
('organization', models.ForeignKey(related_name='notification_templates', on_delete=django.db.models.deletion.SET_NULL, to='main.Organization', null=True)),
('tags', taggit.managers.TaggableManager(to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags')),
],
),
migrations.AddField(
model_name='notification',
name='notification_template',
field=models.ForeignKey(related_name='notifications', editable=False, on_delete=models.CASCADE, to='main.NotificationTemplate'),
),
migrations.AddField(
model_name='activitystream',
name='notification',
field=models.ManyToManyField(to='main.Notification', blank=True),
),
migrations.AddField(
model_name='activitystream',
name='notification_template',
field=models.ManyToManyField(to='main.NotificationTemplate', blank=True),
),
migrations.AddField(
model_name='organization',
name='notification_templates_any',
field=models.ManyToManyField(related_name='organization_notification_templates_for_any', to='main.NotificationTemplate', blank=True),
),
migrations.AddField(
model_name='organization',
name='notification_templates_error',
field=models.ManyToManyField(related_name='organization_notification_templates_for_errors', to='main.NotificationTemplate', blank=True),
),
migrations.AddField(
model_name='organization',
name='notification_templates_success',
field=models.ManyToManyField(related_name='organization_notification_templates_for_success', to='main.NotificationTemplate', blank=True),
),
migrations.AddField(
model_name='unifiedjob',
name='notifications',
field=models.ManyToManyField(related_name='unifiedjob_notifications', editable=False, to='main.Notification'),
),
migrations.AddField(
model_name='unifiedjobtemplate',
name='notification_templates_any',
field=models.ManyToManyField(related_name='unifiedjobtemplate_notification_templates_for_any', to='main.NotificationTemplate', blank=True),
),
migrations.AddField(
model_name='unifiedjobtemplate',
name='notification_templates_error',
field=models.ManyToManyField(related_name='unifiedjobtemplate_notification_templates_for_errors', to='main.NotificationTemplate', blank=True),
),
migrations.AddField(
model_name='unifiedjobtemplate',
name='notification_templates_success',
field=models.ManyToManyField(related_name='unifiedjobtemplate_notification_templates_for_success', to='main.NotificationTemplate', blank=True),
),
# Fact changes
migrations.CreateModel(
name='Fact',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('timestamp', models.DateTimeField(default=None, help_text='Date and time of the corresponding fact scan gathering time.', editable=False)),
('module', models.CharField(max_length=128)),
('facts', awx.main.fields.JSONBField(default=dict, help_text='Arbitrary JSON structure of module facts captured at timestamp for a single host.', blank=True)),
('host', models.ForeignKey(related_name='facts', to='main.Host', on_delete=models.CASCADE, help_text='Host for the facts that the fact scan captured.')),
],
),
migrations.AlterIndexTogether(
name='fact',
index_together=set([('timestamp', 'module', 'host')]),
),
# Active flag removal
migrations.RemoveField(
model_name='credential',
name='active',
),
migrations.RemoveField(
model_name='custominventoryscript',
name='active',
),
migrations.RemoveField(
model_name='group',
name='active',
),
migrations.RemoveField(
model_name='host',
name='active',
),
migrations.RemoveField(
model_name='inventory',
name='active',
),
migrations.RemoveField(
model_name='organization',
name='active',
),
migrations.RemoveField(
model_name='permission',
name='active',
),
migrations.RemoveField(
model_name='schedule',
name='active',
),
migrations.RemoveField(
model_name='team',
name='active',
),
migrations.RemoveField(
model_name='unifiedjob',
name='active',
),
migrations.RemoveField(
model_name='unifiedjobtemplate',
name='active',
),
# RBAC Changes
# ############
migrations.RenameField(
'Organization',
'admins',
'deprecated_admins',
),
migrations.RenameField(
'Organization',
'users',
'deprecated_users',
),
migrations.RenameField(
'Team',
'users',
'deprecated_users',
),
migrations.RenameField(
'Team',
'projects',
'deprecated_projects',
),
migrations.AddField(
model_name='project',
name='organization',
field=models.ForeignKey(related_name='projects', to='main.Organization', on_delete=models.CASCADE, blank=True, null=True),
),
migrations.AlterField(
model_name='team',
name='deprecated_projects',
field=models.ManyToManyField(related_name='deprecated_teams', to='main.Project', blank=True),
),
migrations.RenameField(
model_name='organization',
old_name='projects',
new_name='deprecated_projects',
),
migrations.AlterField(
model_name='organization',
name='deprecated_projects',
field=models.ManyToManyField(related_name='deprecated_organizations', to='main.Project', blank=True),
),
migrations.RenameField(
'Credential',
'team',
'deprecated_team',
),
migrations.RenameField(
'Credential',
'user',
'deprecated_user',
),
migrations.AlterField(
model_name='organization',
name='deprecated_admins',
field=models.ManyToManyField(related_name='deprecated_admin_of_organizations', to=settings.AUTH_USER_MODEL, blank=True),
),
migrations.AlterField(
model_name='organization',
name='deprecated_users',
field=models.ManyToManyField(related_name='deprecated_organizations', to=settings.AUTH_USER_MODEL, blank=True),
),
migrations.AlterField(
model_name='team',
name='deprecated_users',
field=models.ManyToManyField(related_name='deprecated_teams', to=settings.AUTH_USER_MODEL, blank=True),
),
migrations.AlterUniqueTogether(
name='credential',
unique_together=set([]),
),
migrations.AddField(
model_name='credential',
name='organization',
field=models.ForeignKey(related_name='credentials', on_delete=models.CASCADE, default=None, blank=True, to='main.Organization', null=True),
),
#
# New RBAC models and fields
#
migrations.CreateModel(
name='Role',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('role_field', models.TextField()),
('singleton_name', models.TextField(default=None, unique=True, null=True, db_index=True)),
('members', models.ManyToManyField(related_name='roles', to=settings.AUTH_USER_MODEL)),
('parents', models.ManyToManyField(related_name='children', to='main.Role')),
('implicit_parents', models.TextField(default='[]')),
('content_type', models.ForeignKey(default=None, to='contenttypes.ContentType', on_delete=models.CASCADE, null=True)),
('object_id', models.PositiveIntegerField(default=None, null=True)),
],
options={
'db_table': 'main_rbac_roles',
'verbose_name_plural': 'roles',
},
),
migrations.CreateModel(
name='RoleAncestorEntry',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('role_field', models.TextField()),
('content_type_id', models.PositiveIntegerField()),
('object_id', models.PositiveIntegerField()),
('ancestor', models.ForeignKey(on_delete=models.CASCADE, related_name='+', to='main.Role')),
('descendent', models.ForeignKey(on_delete=models.CASCADE, related_name='+', to='main.Role')),
],
options={
'db_table': 'main_rbac_role_ancestors',
'verbose_name_plural': 'role_ancestors',
},
),
migrations.AddField(
model_name='role',
name='ancestors',
field=models.ManyToManyField(related_name='descendents', through='main.RoleAncestorEntry', to='main.Role'),
),
migrations.AlterIndexTogether(
name='role',
index_together=set([('content_type', 'object_id')]),
),
migrations.AlterIndexTogether(
name='roleancestorentry',
index_together=set([('ancestor', 'content_type_id', 'object_id'), ('ancestor', 'content_type_id', 'role_field'), ('ancestor', 'descendent')]),
),
migrations.AddField(
model_name='credential',
name='admin_role',
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=['singleton:system_administrator'], to='main.Role', null='True'),
),
migrations.AddField(
model_name='credential',
name='use_role',
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=['admin_role'], to='main.Role', null='True'),
),
migrations.AddField(
model_name='credential',
name='read_role',
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=['singleton:system_auditor', 'organization.auditor_role', 'use_role', 'admin_role'], to='main.Role', null='True'),
),
migrations.AddField(
model_name='custominventoryscript',
name='admin_role',
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role='organization.admin_role', to='main.Role', null='True'),
),
migrations.AddField(
model_name='custominventoryscript',
name='read_role',
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=['organization.auditor_role', 'organization.member_role', 'admin_role'], to='main.Role', null='True'),
),
migrations.AddField(
model_name='inventory',
name='admin_role',
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role='organization.admin_role', to='main.Role', null='True'),
),
migrations.AddField(
model_name='inventory',
name='adhoc_role',
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role='admin_role', to='main.Role', null='True'),
),
migrations.AddField(
model_name='inventory',
name='update_role',
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role='admin_role', to='main.Role', null='True'),
),
migrations.AddField(
model_name='inventory',
name='use_role',
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role='adhoc_role', to='main.Role', null='True'),
),
migrations.AddField(
model_name='inventory',
name='read_role',
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=['organization.auditor_role', 'update_role', 'use_role', 'admin_role'], to='main.Role', null='True'),
),
migrations.AddField(
model_name='jobtemplate',
name='admin_role',
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=['project.organization.admin_role', 'inventory.organization.admin_role'], to='main.Role', null='True'),
),
migrations.AddField(
model_name='jobtemplate',
name='execute_role',
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=['admin_role'], to='main.Role', null='True'),
),
migrations.AddField(
model_name='jobtemplate',
name='read_role',
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=['project.organization.auditor_role', 'inventory.organization.auditor_role', 'execute_role', 'admin_role'], to='main.Role', null='True'),
),
migrations.AddField(
model_name='organization',
name='admin_role',
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role='singleton:system_administrator', to='main.Role', null='True'),
),
migrations.AddField(
model_name='organization',
name='auditor_role',
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role='singleton:system_auditor', to='main.Role', null='True'),
),
migrations.AddField(
model_name='organization',
name='member_role',
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role='admin_role', to='main.Role', null='True'),
),
migrations.AddField(
model_name='organization',
name='read_role',
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=['member_role', 'auditor_role'], to='main.Role', null='True'),
),
migrations.AddField(
model_name='project',
name='admin_role',
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=['organization.admin_role', 'singleton:system_administrator'], to='main.Role', null='True'),
),
migrations.AddField(
model_name='project',
name='use_role',
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role='admin_role', to='main.Role', null='True'),
),
migrations.AddField(
model_name='project',
name='update_role',
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role='admin_role', to='main.Role', null='True'),
),
migrations.AddField(
model_name='project',
name='read_role',
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=['organization.auditor_role', 'singleton:system_auditor', 'use_role', 'update_role'], to='main.Role', null='True'),
),
migrations.AddField(
model_name='team',
name='admin_role',
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role='organization.admin_role', to='main.Role', null='True'),
),
migrations.AddField(
model_name='team',
name='member_role',
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=None, to='main.Role', null='True'),
),
migrations.AddField(
model_name='team',
name='read_role',
field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=['admin_role', 'organization.auditor_role', 'member_role'], to='main.Role', null='True'),
),
# System Job Templates
migrations.RunPython(create_system_job_templates, migrations.RunPython.noop),
migrations.AlterField(
model_name='systemjob',
name='job_type',
field=models.CharField(default='', max_length=32, blank=True, choices=[('cleanup_jobs', 'Remove jobs older than a certain number of days'), ('cleanup_activitystream', 'Remove activity stream entries older than a certain number of days'), ('cleanup_facts', 'Purge and/or reduce the granularity of system tracking data')]),
),
migrations.AlterField(
model_name='systemjobtemplate',
name='job_type',
field=models.CharField(default='', max_length=32, blank=True, choices=[('cleanup_jobs', 'Remove jobs older than a certain number of days'), ('cleanup_activitystream', 'Remove activity stream entries older than a certain number of days'), ('cleanup_facts', 'Purge and/or reduce the granularity of system tracking data')]),
),
# Credential domain field
migrations.AddField(
model_name='credential',
name='domain',
field=models.CharField(default='', help_text='The identifier for the domain.', max_length=100, verbose_name='Domain', blank=True),
),
# Create Labels
migrations.CreateModel(
name='Label',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(default=None, editable=False)),
('modified', models.DateTimeField(default=None, editable=False)),
('description', models.TextField(default='', blank=True)),
('name', models.CharField(max_length=512)),
('created_by', models.ForeignKey(related_name="{u'class': 'label', u'app_label': 'main'}(class)s_created+", on_delete=django.db.models.deletion.SET_NULL, default=None, editable=False, to=settings.AUTH_USER_MODEL, null=True)),
('modified_by', models.ForeignKey(related_name="{u'class': 'label', u'app_label': 'main'}(class)s_modified+", on_delete=django.db.models.deletion.SET_NULL, default=None, editable=False, to=settings.AUTH_USER_MODEL, null=True)),
('organization', models.ForeignKey(related_name='labels', on_delete=django.db.models.deletion.CASCADE, to='main.Organization', help_text='Organization this label belongs to.')),
('tags', taggit.managers.TaggableManager(to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags')),
],
options={
'ordering': ('organization', 'name'),
},
),
migrations.AddField(
model_name='activitystream',
name='label',
field=models.ManyToManyField(to='main.Label', blank=True),
),
migrations.AddField(
model_name='job',
name='labels',
field=models.ManyToManyField(related_name='job_labels', to='main.Label', blank=True),
),
migrations.AddField(
model_name='jobtemplate',
name='labels',
field=models.ManyToManyField(related_name='jobtemplate_labels', to='main.Label', blank=True),
),
migrations.AlterUniqueTogether(
name='label',
unique_together=set([('name', 'organization')]),
),
# Label changes
migrations.AlterField(
model_name='label',
name='organization',
field=models.ForeignKey(related_name='labels', on_delete=django.db.models.deletion.CASCADE, default=None, blank=True, to='main.Organization', help_text='Organization this label belongs to.', null=True),
),
migrations.AlterField(
model_name='label',
name='organization',
field=models.ForeignKey(related_name='labels', on_delete=django.db.models.deletion.CASCADE, to='main.Organization', help_text='Organization this label belongs to.'),
),
# InventorySource Credential
migrations.AddField(
model_name='job',
name='network_credential',
field=models.ForeignKey(related_name='jobs_as_network_credential+', on_delete=django.db.models.deletion.SET_NULL, default=None, blank=True, to='main.Credential', null=True),
),
migrations.AddField(
model_name='jobtemplate',
name='network_credential',
field=models.ForeignKey(related_name='jobtemplates_as_network_credential+', on_delete=django.db.models.deletion.SET_NULL, default=None, blank=True, to='main.Credential', null=True),
),
migrations.AddField(
model_name='credential',
name='authorize',
field=models.BooleanField(default=False, help_text='Whether to use the authorize mechanism.'),
),
migrations.AddField(
model_name='credential',
name='authorize_password',
field=models.CharField(default='', help_text='Password used by the authorize mechanism.', max_length=1024, blank=True),
),
migrations.AlterField(
model_name='credential',
name='deprecated_team',
field=models.ForeignKey(related_name='deprecated_credentials', on_delete=django.db.models.deletion.SET_NULL, default=None, blank=True, to='main.Team', null=True),
),
migrations.AlterField(
model_name='credential',
name='deprecated_user',
field=models.ForeignKey(related_name='deprecated_credentials', on_delete=django.db.models.deletion.SET_NULL, default=None, blank=True, to=settings.AUTH_USER_MODEL, null=True),
),
migrations.AlterField(
model_name='credential',
name='kind',
field=models.CharField(default='ssh', max_length=32, choices=[('ssh', 'Machine'), ('net', 'Network'), ('scm', 'Source Control'), ('aws', 'Amazon Web Services'), ('rax', 'Rackspace'), ('vmware', 'VMware vCenter'), ('satellite6', 'Red Hat Satellite 6'), ('cloudforms', 'Red Hat CloudForms'), ('gce', 'Google Compute Engine'), ('azure', 'Microsoft Azure'), ('openstack', 'OpenStack')]),
),
migrations.AlterField(
model_name='inventorysource',
name='source',
field=models.CharField(default='', max_length=32, blank=True, choices=[('', 'Manual'), ('file', 'Local File, Directory or Script'), ('rax', 'Rackspace Cloud Servers'), ('ec2', 'Amazon EC2'), ('gce', 'Google Compute Engine'), ('azure', 'Microsoft Azure'), ('vmware', 'VMware vCenter'), ('satellite6', 'Red Hat Satellite 6'), ('cloudforms', 'Red Hat CloudForms'), ('openstack', 'OpenStack'), ('custom', 'Custom Script')]),
),
migrations.AlterField(
model_name='inventoryupdate',
name='source',
field=models.CharField(default='', max_length=32, blank=True, choices=[('', 'Manual'), ('file', 'Local File, Directory or Script'), ('rax', 'Rackspace Cloud Servers'), ('ec2', 'Amazon EC2'), ('gce', 'Google Compute Engine'), ('azure', 'Microsoft Azure'), ('vmware', 'VMware vCenter'), ('satellite6', 'Red Hat Satellite 6'), ('cloudforms', 'Red Hat CloudForms'), ('openstack', 'OpenStack'), ('custom', 'Custom Script')]),
),
migrations.AlterField(
model_name='team',
name='deprecated_projects',
field=models.ManyToManyField(related_name='deprecated_teams', to='main.Project', blank=True),
),
# Prompting changes
migrations.AddField(
model_name='jobtemplate',
name='ask_limit_on_launch',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='jobtemplate',
name='ask_inventory_on_launch',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='jobtemplate',
name='ask_credential_on_launch',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='jobtemplate',
name='ask_job_type_on_launch',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='jobtemplate',
name='ask_tags_on_launch',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='job',
name='inventory',
field=models.ForeignKey(related_name='jobs', on_delete=django.db.models.deletion.SET_NULL, default=None, blank=True, to='main.Inventory', null=True),
),
migrations.AlterField(
model_name='jobtemplate',
name='inventory',
field=models.ForeignKey(related_name='jobtemplates', on_delete=django.db.models.deletion.SET_NULL, default=None, blank=True, to='main.Inventory', null=True),
),
# Host ordering
migrations.AlterModelOptions(
name='host',
options={'ordering': ('name',)},
),
# New Azure credential
migrations.AddField(
model_name='credential',
name='client',
field=models.CharField(default='', help_text='Client Id or Application Id for the credential', max_length=128, blank=True),
),
migrations.AddField(
model_name='credential',
name='secret',
field=models.CharField(default='', help_text='Secret Token for this credential', max_length=1024, blank=True),
),
migrations.AddField(
model_name='credential',
name='subscription',
field=models.CharField(default='', help_text='Subscription identifier for this credential', max_length=1024, blank=True),
),
migrations.AddField(
model_name='credential',
name='tenant',
field=models.CharField(default='', help_text='Tenant identifier for this credential', max_length=1024, blank=True),
),
migrations.AlterField(
model_name='credential',
name='kind',
field=models.CharField(default='ssh', max_length=32, choices=[('ssh', 'Machine'), ('net', 'Network'), ('scm', 'Source Control'), ('aws', 'Amazon Web Services'), ('rax', 'Rackspace'), ('vmware', 'VMware vCenter'), ('satellite6', 'Satellite 6'), ('cloudforms', 'CloudForms'), ('gce', 'Google Compute Engine'), ('azure', 'Microsoft Azure Classic (deprecated)'), ('azure_rm', 'Microsoft Azure Resource Manager'), ('openstack', 'OpenStack')]),
),
migrations.AlterField(
model_name='host',
name='instance_id',
field=models.CharField(default='', max_length=1024, blank=True),
),
migrations.AlterField(
model_name='inventorysource',
name='source',
field=models.CharField(default='', max_length=32, blank=True, choices=[('', 'Manual'), ('file', 'Local File, Directory or Script'), ('rax', 'Rackspace Cloud Servers'), ('ec2', 'Amazon EC2'), ('gce', 'Google Compute Engine'), ('azure', 'Microsoft Azure Classic (deprecated)'), ('azure_rm', 'Microsoft Azure Resource Manager'), ('vmware', 'VMware vCenter'), ('satellite6', 'Satellite 6'), ('cloudforms', 'CloudForms'), ('openstack', 'OpenStack'), ('custom', 'Custom Script')]),
),
migrations.AlterField(
model_name='inventoryupdate',
name='source',
field=models.CharField(default='', max_length=32, blank=True, choices=[('', 'Manual'), ('file', 'Local File, Directory or Script'), ('rax', 'Rackspace Cloud Servers'), ('ec2', 'Amazon EC2'), ('gce', 'Google Compute Engine'), ('azure', 'Microsoft Azure Classic (deprecated)'), ('azure_rm', 'Microsoft Azure Resource Manager'), ('vmware', 'VMware vCenter'), ('satellite6', 'Satellite 6'), ('cloudforms', 'CloudForms'), ('openstack', 'OpenStack'), ('custom', 'Custom Script')]),
),
]
|
|
# Copyright 2011 OpenStack Foundation.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""OpenStack logging handler.
This module adds to logging functionality by adding the option to specify
a context object when calling the various log methods. If the context object
is not specified, default formatting is used. Additionally, an instance uuid
may be passed as part of the log message, which is intended to make it easier
for admins to find messages related to a specific instance.
It also allows setting of formatting information through conf.
"""
import inspect
import itertools
import logging
import logging.config
import logging.handlers
import os
import re
import sys
import traceback
from oslo.config import cfg
import six
from six import moves
from rally.openstack.common.gettextutils import _
from rally.openstack.common import importutils
from rally.openstack.common import jsonutils
from rally.openstack.common import local
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', 'admin_password']
# NOTE(ldbragst): Let's build a list of regex objects using the list of
# _SANITIZE_KEYS we already have. This way, we only have to add the new key
# to the list of _SANITIZE_KEYS and we can generate regular expressions
# for XML and JSON automatically.
_SANITIZE_PATTERNS = []
_FORMAT_PATTERNS = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])',
r'(<%(key)s>).*?(</%(key)s>)',
r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])',
r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])']
for key in _SANITIZE_KEYS:
for pattern in _FORMAT_PATTERNS:
reg_ex = re.compile(pattern % {'key': key}, re.DOTALL)
_SANITIZE_PATTERNS.append(reg_ex)
common_cli_opts = [
cfg.BoolOpt('debug',
short='d',
default=False,
help='Print debugging output (set logging level to '
'DEBUG instead of default WARNING level).'),
cfg.BoolOpt('verbose',
short='v',
default=False,
help='Print more verbose output (set logging level to '
'INFO instead of default WARNING level).'),
]
logging_cli_opts = [
cfg.StrOpt('log-config-append',
metavar='PATH',
deprecated_name='log-config',
help='The name of logging configuration file. It does not '
'disable existing loggers, but just appends specified '
'logging configuration to any other existing logging '
'options. Please see the Python logging module '
'documentation for details on logging configuration '
'files.'),
cfg.StrOpt('log-format',
default=None,
metavar='FORMAT',
help='DEPRECATED. '
'A logging.Formatter log message format string which may '
'use any of the available logging.LogRecord attributes. '
'This option is deprecated. Please use '
'logging_context_format_string and '
'logging_default_format_string instead.'),
cfg.StrOpt('log-date-format',
default=_DEFAULT_LOG_DATE_FORMAT,
metavar='DATE_FORMAT',
help='Format string for %%(asctime)s in log records. '
'Default: %(default)s'),
cfg.StrOpt('log-file',
metavar='PATH',
deprecated_name='logfile',
help='(Optional) Name of log file to output to. '
'If no default is set, logging will go to stdout.'),
cfg.StrOpt('log-dir',
deprecated_name='logdir',
help='(Optional) The base directory used for relative '
'--log-file paths'),
cfg.BoolOpt('use-syslog',
default=False,
help='Use syslog for logging. '
'Existing syslog format is DEPRECATED during I, '
'and then will be changed in J to honor RFC5424'),
cfg.BoolOpt('use-syslog-rfc-format',
# TODO(bogdando) remove or use True after existing
# syslog format deprecation in J
default=False,
help='(Optional) Use syslog rfc5424 format for logging. '
'If enabled, will add APP-NAME (RFC5424) before the '
'MSG part of the syslog message. The old format '
'without APP-NAME is deprecated in I, '
'and will be removed in J.'),
cfg.StrOpt('syslog-log-facility',
default='LOG_USER',
help='Syslog facility to receive log lines')
]
generic_log_opts = [
cfg.BoolOpt('use_stderr',
default=True,
help='Log output to standard error')
]
log_opts = [
cfg.StrOpt('logging_context_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [%(request_id)s %(user_identity)s] '
'%(instance)s%(message)s',
help='Format string to use for log messages with context'),
cfg.StrOpt('logging_default_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [-] %(instance)s%(message)s',
help='Format string to use for log messages without context'),
cfg.StrOpt('logging_debug_format_suffix',
default='%(funcName)s %(pathname)s:%(lineno)d',
help='Data to append to log format when level is DEBUG'),
cfg.StrOpt('logging_exception_prefix',
default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
'%(instance)s',
help='Prefix each line of exception output with this format'),
cfg.ListOpt('default_log_levels',
default=[
'amqp=WARN',
'amqplib=WARN',
'boto=WARN',
'qpid=WARN',
'sqlalchemy=WARN',
'suds=INFO',
'oslo.messaging=INFO',
'iso8601=WARN',
'requests.packages.urllib3.connectionpool=WARN'
],
help='List of logger=LEVEL pairs'),
cfg.BoolOpt('publish_errors',
default=False,
help='Publish error events'),
cfg.BoolOpt('fatal_deprecations',
default=False,
help='Make deprecations fatal'),
# NOTE(mikal): there are two options here because sometimes we are handed
# a full instance (and could include more information), and other times we
# are just handed a UUID for the instance.
cfg.StrOpt('instance_format',
default='[instance: %(uuid)s] ',
help='If an instance is passed with the log message, format '
'it like this'),
cfg.StrOpt('instance_uuid_format',
default='[instance: %(uuid)s] ',
help='If an instance UUID is passed with the log message, '
'format it like this'),
]
CONF = cfg.CONF
CONF.register_cli_opts(common_cli_opts)
CONF.register_cli_opts(logging_cli_opts)
CONF.register_opts(generic_log_opts)
CONF.register_opts(log_opts)
# our new audit level
# NOTE(jkoelker) Since we synthesized an audit level, make the logging
# module aware of it so it acts like other levels.
logging.AUDIT = logging.INFO + 1
logging.addLevelName(logging.AUDIT, 'AUDIT')
try:
NullHandler = logging.NullHandler
except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7
class NullHandler(logging.Handler):
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
def _dictify_context(context):
if context is None:
return None
if not isinstance(context, dict) and getattr(context, 'to_dict', None):
context = context.to_dict()
return context
def _get_binary_name():
return os.path.basename(inspect.stack()[-1][1])
def _get_log_file_path(binary=None):
logfile = CONF.log_file
logdir = CONF.log_dir
if logfile and not logdir:
return logfile
if logfile and logdir:
return os.path.join(logdir, logfile)
if logdir:
binary = binary or _get_binary_name()
return '%s.log' % (os.path.join(logdir, binary),)
return None
def mask_password(message, secret="***"):
"""Replace password with 'secret' in message.
:param message: The string which includes security information.
:param secret: value with which to replace passwords.
:returns: The unicode value of message with the password fields masked.
For example:
>>> mask_password("'adminPass' : 'aaaaa'")
"'adminPass' : '***'"
>>> mask_password("'admin_pass' : 'aaaaa'")
"'admin_pass' : '***'"
>>> mask_password('"password" : "aaaaa"')
'"password" : "***"'
>>> mask_password("'original_password' : 'aaaaa'")
"'original_password' : '***'"
>>> mask_password("u'original_password' : u'aaaaa'")
"u'original_password' : u'***'"
"""
message = six.text_type(message)
# NOTE(ldbragst): Check to see if anything in message contains any key
# specified in _SANITIZE_KEYS, if not then just return the message since
# we don't have to mask any passwords.
if not any(key in message for key in _SANITIZE_KEYS):
return message
secret = r'\g<1>' + secret + r'\g<2>'
for pattern in _SANITIZE_PATTERNS:
message = re.sub(pattern, secret, message)
return message
class BaseLoggerAdapter(logging.LoggerAdapter):
def audit(self, msg, *args, **kwargs):
self.log(logging.AUDIT, msg, *args, **kwargs)
class LazyAdapter(BaseLoggerAdapter):
def __init__(self, name='unknown', version='unknown'):
self._logger = None
self.extra = {}
self.name = name
self.version = version
@property
def logger(self):
if not self._logger:
self._logger = getLogger(self.name, self.version)
return self._logger
class ContextAdapter(BaseLoggerAdapter):
warn = logging.LoggerAdapter.warning
def __init__(self, logger, project_name, version_string):
self.logger = logger
self.project = project_name
self.version = version_string
self._deprecated_messages_sent = dict()
@property
def handlers(self):
return self.logger.handlers
def deprecated(self, msg, *args, **kwargs):
"""Call this method when a deprecated feature is used.
If the system is configured for fatal deprecations then the message
is logged at the 'critical' level and :class:`DeprecatedConfig` will
be raised.
Otherwise, the message will be logged (once) at the 'warn' level.
:raises: :class:`DeprecatedConfig` if the system is configured for
fatal deprecations.
"""
stdmsg = _("Deprecated: %s") % msg
if CONF.fatal_deprecations:
self.critical(stdmsg, *args, **kwargs)
raise DeprecatedConfig(msg=stdmsg)
# Using a list because a tuple with dict can't be stored in a set.
sent_args = self._deprecated_messages_sent.setdefault(msg, list())
if args in sent_args:
# Already logged this message, so don't log it again.
return
sent_args.append(args)
self.warn(stdmsg, *args, **kwargs)
def process(self, msg, kwargs):
# NOTE(mrodden): catch any Message/other object and
# coerce to unicode before they can get
# to the python logging and possibly
# cause string encoding trouble
if not isinstance(msg, six.string_types):
msg = six.text_type(msg)
if 'extra' not in kwargs:
kwargs['extra'] = {}
extra = kwargs['extra']
context = kwargs.pop('context', None)
if not context:
context = getattr(local.store, 'context', None)
if context:
extra.update(_dictify_context(context))
instance = kwargs.pop('instance', None)
instance_uuid = (extra.get('instance_uuid') or
kwargs.pop('instance_uuid', None))
instance_extra = ''
if instance:
instance_extra = CONF.instance_format % instance
elif instance_uuid:
instance_extra = (CONF.instance_uuid_format
% {'uuid': instance_uuid})
extra['instance'] = instance_extra
extra.setdefault('user_identity', kwargs.pop('user_identity', None))
extra['project'] = self.project
extra['version'] = self.version
extra['extra'] = extra.copy()
return msg, kwargs
class JSONFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None):
# NOTE(jkoelker) we ignore the fmt argument, but its still there
# since logging.config.fileConfig passes it.
self.datefmt = datefmt
def formatException(self, ei, strip_newlines=True):
lines = traceback.format_exception(*ei)
if strip_newlines:
lines = [moves.filter(
lambda x: x,
line.rstrip().splitlines()) for line in lines]
lines = list(itertools.chain(*lines))
return lines
def format(self, record):
message = {'message': record.getMessage(),
'asctime': self.formatTime(record, self.datefmt),
'name': record.name,
'msg': record.msg,
'args': record.args,
'levelname': record.levelname,
'levelno': record.levelno,
'pathname': record.pathname,
'filename': record.filename,
'module': record.module,
'lineno': record.lineno,
'funcname': record.funcName,
'created': record.created,
'msecs': record.msecs,
'relative_created': record.relativeCreated,
'thread': record.thread,
'thread_name': record.threadName,
'process_name': record.processName,
'process': record.process,
'traceback': None}
if hasattr(record, 'extra'):
message['extra'] = record.extra
if record.exc_info:
message['traceback'] = self.formatException(record.exc_info)
return jsonutils.dumps(message)
def _create_logging_excepthook(product_name):
def logging_excepthook(exc_type, value, tb):
extra = {}
if CONF.verbose or CONF.debug:
extra['exc_info'] = (exc_type, value, tb)
getLogger(product_name).critical(
"".join(traceback.format_exception_only(exc_type, value)),
**extra)
return logging_excepthook
class LogConfigError(Exception):
message = _('Error loading logging config %(log_config)s: %(err_msg)s')
def __init__(self, log_config, err_msg):
self.log_config = log_config
self.err_msg = err_msg
def __str__(self):
return self.message % dict(log_config=self.log_config,
err_msg=self.err_msg)
def _load_log_config(log_config_append):
try:
logging.config.fileConfig(log_config_append,
disable_existing_loggers=False)
except moves.configparser.Error as exc:
raise LogConfigError(log_config_append, str(exc))
def setup(product_name, version='unknown'):
"""Setup logging."""
if CONF.log_config_append:
_load_log_config(CONF.log_config_append)
else:
_setup_logging_from_conf(product_name, version)
sys.excepthook = _create_logging_excepthook(product_name)
def set_defaults(logging_context_format_string):
cfg.set_defaults(log_opts,
logging_context_format_string=
logging_context_format_string)
def _find_facility_from_conf():
facility_names = logging.handlers.SysLogHandler.facility_names
facility = getattr(logging.handlers.SysLogHandler,
CONF.syslog_log_facility,
None)
if facility is None and CONF.syslog_log_facility in facility_names:
facility = facility_names.get(CONF.syslog_log_facility)
if facility is None:
valid_facilities = facility_names.keys()
consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON',
'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS',
'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP',
'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3',
'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7']
valid_facilities.extend(consts)
raise TypeError(_('syslog facility must be one of: %s') %
', '.join("'%s'" % fac
for fac in valid_facilities))
return facility
class RFCSysLogHandler(logging.handlers.SysLogHandler):
def __init__(self, *args, **kwargs):
self.binary_name = _get_binary_name()
super(RFCSysLogHandler, self).__init__(*args, **kwargs)
def format(self, record):
msg = super(RFCSysLogHandler, self).format(record)
msg = self.binary_name + ' ' + msg
return msg
def _setup_logging_from_conf(project, version):
log_root = getLogger(None).logger
for handler in log_root.handlers:
log_root.removeHandler(handler)
if CONF.use_syslog:
facility = _find_facility_from_conf()
# TODO(bogdando) use the format provided by RFCSysLogHandler
# after existing syslog format deprecation in J
if CONF.use_syslog_rfc_format:
syslog = RFCSysLogHandler(address='/dev/log',
facility=facility)
else:
syslog = logging.handlers.SysLogHandler(address='/dev/log',
facility=facility)
log_root.addHandler(syslog)
logpath = _get_log_file_path()
if logpath:
filelog = logging.handlers.WatchedFileHandler(logpath)
log_root.addHandler(filelog)
if CONF.use_stderr:
streamlog = ColorHandler()
log_root.addHandler(streamlog)
elif not logpath:
# pass sys.stdout as a positional argument
# python2.6 calls the argument strm, in 2.7 it's stream
streamlog = logging.StreamHandler(sys.stdout)
log_root.addHandler(streamlog)
if CONF.publish_errors:
handler = importutils.import_object(
"rally.openstack.common.log_handler.PublishErrorsHandler",
logging.ERROR)
log_root.addHandler(handler)
datefmt = CONF.log_date_format
for handler in log_root.handlers:
# NOTE(alaski): CONF.log_format overrides everything currently. This
# should be deprecated in favor of context aware formatting.
if CONF.log_format:
handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
datefmt=datefmt))
log_root.info('Deprecated: log_format is now deprecated and will '
'be removed in the next release')
else:
handler.setFormatter(ContextFormatter(project=project,
version=version,
datefmt=datefmt))
if CONF.debug:
log_root.setLevel(logging.DEBUG)
elif CONF.verbose:
log_root.setLevel(logging.INFO)
else:
log_root.setLevel(logging.WARNING)
for pair in CONF.default_log_levels:
mod, _sep, level_name = pair.partition('=')
level = logging.getLevelName(level_name)
logger = logging.getLogger(mod)
logger.setLevel(level)
_loggers = {}
def getLogger(name='unknown', version='unknown'):
if name not in _loggers:
_loggers[name] = ContextAdapter(logging.getLogger(name),
name,
version)
return _loggers[name]
def getLazyLogger(name='unknown', version='unknown'):
"""Returns lazy logger.
Creates a pass-through logger that does not create the real logger
until it is really needed and delegates all calls to the real logger
once it is created.
"""
return LazyAdapter(name, version)
class WritableLogger(object):
"""A thin wrapper that responds to `write` and logs."""
def __init__(self, logger, level=logging.INFO):
self.logger = logger
self.level = level
def write(self, msg):
self.logger.log(self.level, msg.rstrip())
class ContextFormatter(logging.Formatter):
"""A context.RequestContext aware formatter configured through flags.
The flags used to set format strings are: logging_context_format_string
and logging_default_format_string. You can also specify
logging_debug_format_suffix to append extra formatting if the log level is
debug.
For information about what variables are available for the formatter see:
http://docs.python.org/library/logging.html#formatter
If available, uses the context value stored in TLS - local.store.context
"""
def __init__(self, *args, **kwargs):
"""Initialize ContextFormatter instance
Takes additional keyword arguments which can be used in the message
format string.
:keyword project: project name
:type project: string
:keyword version: project version
:type version: string
"""
self.project = kwargs.pop('project', 'unknown')
self.version = kwargs.pop('version', 'unknown')
logging.Formatter.__init__(self, *args, **kwargs)
def format(self, record):
"""Uses contextstring if request_id is set, otherwise default."""
# store project info
record.project = self.project
record.version = self.version
# store request info
context = getattr(local.store, 'context', None)
if context:
d = _dictify_context(context)
for k, v in d.items():
setattr(record, k, v)
# NOTE(sdague): default the fancier formatting params
# to an empty string so we don't throw an exception if
# they get used
for key in ('instance', 'color', 'user_identity'):
if key not in record.__dict__:
record.__dict__[key] = ''
if record.__dict__.get('request_id'):
self._fmt = CONF.logging_context_format_string
else:
self._fmt = CONF.logging_default_format_string
if (record.levelno == logging.DEBUG and
CONF.logging_debug_format_suffix):
self._fmt += " " + CONF.logging_debug_format_suffix
# Cache this on the record, Logger will respect our formatted copy
if record.exc_info:
record.exc_text = self.formatException(record.exc_info, record)
return logging.Formatter.format(self, record)
def formatException(self, exc_info, record=None):
"""Format exception output with CONF.logging_exception_prefix."""
if not record:
return logging.Formatter.formatException(self, exc_info)
stringbuffer = moves.StringIO()
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
None, stringbuffer)
lines = stringbuffer.getvalue().split('\n')
stringbuffer.close()
if CONF.logging_exception_prefix.find('%(asctime)') != -1:
record.asctime = self.formatTime(record, self.datefmt)
formatted_lines = []
for line in lines:
pl = CONF.logging_exception_prefix % record.__dict__
fl = '%s%s' % (pl, line)
formatted_lines.append(fl)
return '\n'.join(formatted_lines)
class ColorHandler(logging.StreamHandler):
LEVEL_COLORS = {
logging.DEBUG: '\033[00;32m', # GREEN
logging.INFO: '\033[00;36m', # CYAN
logging.AUDIT: '\033[01;36m', # BOLD CYAN
logging.WARN: '\033[01;33m', # BOLD YELLOW
logging.ERROR: '\033[01;31m', # BOLD RED
logging.CRITICAL: '\033[01;31m', # BOLD RED
}
def format(self, record):
record.color = self.LEVEL_COLORS[record.levelno]
return logging.StreamHandler.format(self, record)
class DeprecatedConfig(Exception):
message = _("Fatal call to deprecated config: %(msg)s")
def __init__(self, msg):
super(Exception, self).__init__(self.message % dict(msg=msg))
|
|
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/MSCommon/vs.py 4369 2009/09/19 15:58:29 scons"
__doc__ = """Module to detect Visual Studio and/or Visual C/C++
"""
import os
import SCons.Errors
import SCons.Util
from common import debug, \
get_output, \
is_win64, \
normalize_env, \
parse_output, \
read_reg
import SCons.Tool.MSCommon.vc
class VisualStudio:
"""
An abstract base class for trying to find installed versions of
Visual Studio.
"""
def __init__(self, version, **kw):
self.version = version
kw['vc_version'] = kw.get('vc_version', version)
kw['sdk_version'] = kw.get('sdk_version', version)
self.__dict__.update(kw)
self._cache = {}
#
def find_batch_file(self):
vs_dir = self.get_vs_dir()
if not vs_dir:
debug('find_executable(): no vs_dir')
return None
batch_file = os.path.join(vs_dir, self.batch_file_path)
batch_file = os.path.normpath(batch_file)
if not os.path.isfile(batch_file):
debug('find_batch_file(): %s not on file system' % batch_file)
return None
return batch_file
def find_vs_dir_by_vc(self):
SCons.Tool.MSCommon.vc.get_installed_vcs()
ivc = SCons.Tool.MSCommon.vc.InstalledVCMap.get(self.vc_version)
if not ivc:
debug('find_vs_dir(): no installed VC %s' % self.vc_version)
return None
return ivc.get_vc_dir()[:-len(ivc.vc_subdir)]
def find_vs_dir_by_reg(self):
root = 'Software\\'
if is_win64():
root = root + 'Wow6432Node\\'
for key in self.hkeys:
if key=='use_dir':
return self.find_vs_dir_by_vc()
key = root + key
try:
comps = read_reg(key)
except WindowsError, e:
debug('find_vs_dir_by_reg(): no VS registry key %s' % repr(key))
else:
debug('find_vs_dir_by_reg(): found VS in registry: %s' % comps)
return comps
return None
def find_vs_dir(self):
""" Can use registry or location of VC to find vs dir
First try to find by registry, and if that fails find via VC dir
"""
if True:
vs_dir=self.find_vs_dir_by_reg()
return vs_dir
else:
return self.find_vs_dir_by_vc()
def find_executable(self):
vs_dir = self.get_vs_dir()
if not vs_dir:
debug('find_executable(): no vs_dir (%s)'%vs_dir)
return None
executable = os.path.join(vs_dir, self.executable_path)
executable = os.path.normpath(executable)
if not os.path.isfile(executable):
debug('find_executable(): %s not on file system' % executable)
return None
return executable
#
def get_batch_file(self):
try:
return self._cache['batch_file']
except KeyError:
batch_file = self.find_batch_file()
self._cache['batch_file'] = batch_file
return batch_file
def get_executable(self):
try:
debug('get_executable using cache:%s'%self._cache['executable'])
return self._cache['executable']
except KeyError:
executable = self.find_executable()
self._cache['executable'] = executable
debug('get_executable not in cache:%s'%executable)
return executable
def get_vs_dir(self):
try:
return self._cache['vs_dir']
except KeyError:
vs_dir = self.find_vs_dir()
self._cache['vs_dir'] = vs_dir
return vs_dir
def get_supported_arch(self):
try:
return self._cache['supported_arch']
except KeyError:
# RDEVE: for the time being use hardcoded lists
# supported_arch = self.find_supported_arch()
self._cache['supported_arch'] = self.supported_arch
return self.supported_arch
def reset(self):
self._cache = {}
# The list of supported Visual Studio versions we know how to detect.
#
# How to look for .bat file ?
# - VS 2008 Express (x86):
# * from registry key productdir, gives the full path to vsvarsall.bat. In
# HKEY_LOCAL_MACHINE):
# Software\Microsoft\VCEpress\9.0\Setup\VC\productdir
# * from environmnent variable VS90COMNTOOLS: the path is then ..\..\VC
# relatively to the path given by the variable.
#
# - VS 2008 Express (WoW6432: 32 bits on windows x64):
# Software\Wow6432Node\Microsoft\VCEpress\9.0\Setup\VC\productdir
#
# - VS 2005 Express (x86):
# * from registry key productdir, gives the full path to vsvarsall.bat. In
# HKEY_LOCAL_MACHINE):
# Software\Microsoft\VCEpress\8.0\Setup\VC\productdir
# * from environmnent variable VS80COMNTOOLS: the path is then ..\..\VC
# relatively to the path given by the variable.
#
# - VS 2005 Express (WoW6432: 32 bits on windows x64): does not seem to have a
# productdir ?
#
# - VS 2003 .Net (pro edition ? x86):
# * from registry key productdir. The path is then ..\Common7\Tools\
# relatively to the key. The key is in HKEY_LOCAL_MACHINE):
# Software\Microsoft\VisualStudio\7.1\Setup\VC\productdir
# * from environmnent variable VS71COMNTOOLS: the path is the full path to
# vsvars32.bat
#
# - VS 98 (VS 6):
# * from registry key productdir. The path is then Bin
# relatively to the key. The key is in HKEY_LOCAL_MACHINE):
# Software\Microsoft\VisualStudio\6.0\Setup\VC98\productdir
#
# The first version found in the list is the one used by default if
# there are multiple versions installed. Barring good reasons to
# the contrary, this means we should list versions from most recent
# to oldest. Pro versions get listed before Express versions on the
# assumption that, by default, you'd rather use the version you paid
# good money for in preference to whatever Microsoft makes available
# for free.
#
# If you update this list, update the documentation in Tool/msvs.xml.
SupportedVSList = [
# Visual Studio 2010
# TODO: find the settings, perhaps from someone with a CTP copy?
#VisualStudio('TBD',
# hkey_root=r'TBD',
# common_tools_var='TBD',
# executable_path=r'TBD',
# default_dirname='TBD',
#),
# Visual Studio 2008
# The batch file we look for is in the VC directory,
# so the devenv.com executable is up in ..\..\Common7\IDE.
VisualStudio('9.0',
sdk_version='6.1',
hkeys=[r'Microsoft\VisualStudio\9.0\Setup\VS\ProductDir'],
common_tools_var='VS90COMNTOOLS',
executable_path=r'Common7\IDE\devenv.com',
batch_file_path=r'Common7\Tools\vsvars32.bat',
default_dirname='Microsoft Visual Studio 9',
supported_arch=['x86', 'amd64'],
),
# Visual C++ 2008 Express Edition
# The batch file we look for is in the VC directory,
# so the VCExpress.exe executable is up in ..\..\Common7\IDE.
VisualStudio('9.0Exp',
vc_version='9.0',
sdk_version='6.1',
hkeys=[r'Microsoft\VCExpress\9.0\Setup\VS\ProductDir'],
common_tools_var='VS90COMNTOOLS',
executable_path=r'Common7\IDE\VCExpress.exe',
batch_file_path=r'Common7\Tools\vsvars32.bat',
default_dirname='Microsoft Visual Studio 9',
supported_arch=['x86'],
),
# Visual Studio 2005
# The batch file we look for is in the VC directory,
# so the devenv.com executable is up in ..\..\Common7\IDE.
VisualStudio('8.0',
sdk_version='6.0A',
hkeys=[r'Microsoft\VisualStudio\8.0\Setup\VS\ProductDir'],
common_tools_var='VS80COMNTOOLS',
executable_path=r'Common7\IDE\devenv.com',
batch_file_path=r'Common7\Tools\vsvars32.bat',
default_dirname='Microsoft Visual Studio 8',
supported_arch=['x86', 'amd64'],
),
# Visual C++ 2005 Express Edition
# The batch file we look for is in the VC directory,
# so the VCExpress.exe executable is up in ..\..\Common7\IDE.
VisualStudio('8.0Exp',
vc_version='8.0',
sdk_version='6.0A',
hkeys=[r'Microsoft\VCExpress\8.0\Setup\VS\ProductDir'],
common_tools_var='VS80COMNTOOLS',
executable_path=r'Common7\IDE\VCExpress.exe',
batch_file_path=r'Common7\Tools\vsvars32.bat',
default_dirname='Microsoft Visual Studio 8',
supported_arch=['x86'],
),
# Visual Studio .NET 2003
# The batch file we look for is in the Common7\Tools directory,
# so the devenv.com executable is next door in ..\IDE.
VisualStudio('7.1',
sdk_version='6.0',
hkeys=[r'Microsoft\VisualStudio\7.1\Setup\VS\ProductDir'],
common_tools_var='VS71COMNTOOLS',
executable_path=r'Common7\IDE\devenv.com',
batch_file_path=r'Common7\Tools\vsvars32.bat',
default_dirname='Microsoft Visual Studio .NET 2003',
supported_arch=['x86'],
),
# Visual Studio .NET
# The batch file we look for is in the Common7\Tools directory,
# so the devenv.com executable is next door in ..\IDE.
VisualStudio('7.0',
sdk_version='2003R2',
hkeys=[r'Microsoft\VisualStudio\7.0\Setup\VS\ProductDir'],
common_tools_var='VS70COMNTOOLS',
executable_path=r'IDE\devenv.com',
batch_file_path=r'Common7\Tools\vsvars32.bat',
default_dirname='Microsoft Visual Studio .NET',
supported_arch=['x86'],
),
# Visual Studio 6.0
VisualStudio('6.0',
sdk_version='2003R1',
hkeys=[r'Microsoft\VisualStudio\6.0\Setup\Microsoft Visual Studio\ProductDir',
'use_dir'],
common_tools_var='VS60COMNTOOLS',
executable_path=r'Common\MSDev98\Bin\MSDEV.COM',
batch_file_path=r'Common7\Tools\vsvars32.bat',
default_dirname='Microsoft Visual Studio',
supported_arch=['x86'],
),
]
SupportedVSMap = {}
for vs in SupportedVSList:
SupportedVSMap[vs.version] = vs
# Finding installed versions of Visual Studio isn't cheap, because it
# goes not only to the registry but also to the disk to sanity-check
# that there is, in fact, a Visual Studio directory there and that the
# registry entry isn't just stale. Find this information once, when
# requested, and cache it.
InstalledVSList = None
InstalledVSMap = None
def get_installed_visual_studios():
global InstalledVSList
global InstalledVSMap
if InstalledVSList is None:
InstalledVSList = []
InstalledVSMap = {}
for vs in SupportedVSList:
debug('trying to find VS %s' % vs.version)
if vs.get_executable():
debug('found VS %s' % vs.version)
InstalledVSList.append(vs)
InstalledVSMap[vs.version] = vs
return InstalledVSList
def reset_installed_visual_studios():
global InstalledVSList
global InstalledVSMap
InstalledVSList = None
InstalledVSMap = None
for vs in SupportedVSList:
vs.reset()
# Need to clear installed VC's as well as they are used in finding
# installed VS's
SCons.Tool.MSCommon.vc.reset_installed_vcs()
# We may be asked to update multiple construction environments with
# SDK information. When doing this, we check on-disk for whether
# the SDK has 'mfc' and 'atl' subdirectories. Since going to disk
# is expensive, cache results by directory.
#SDKEnvironmentUpdates = {}
#
#def set_sdk_by_directory(env, sdk_dir):
# global SDKEnvironmentUpdates
# try:
# env_tuple_list = SDKEnvironmentUpdates[sdk_dir]
# except KeyError:
# env_tuple_list = []
# SDKEnvironmentUpdates[sdk_dir] = env_tuple_list
#
# include_path = os.path.join(sdk_dir, 'include')
# mfc_path = os.path.join(include_path, 'mfc')
# atl_path = os.path.join(include_path, 'atl')
#
# if os.path.exists(mfc_path):
# env_tuple_list.append(('INCLUDE', mfc_path))
# if os.path.exists(atl_path):
# env_tuple_list.append(('INCLUDE', atl_path))
# env_tuple_list.append(('INCLUDE', include_path))
#
# env_tuple_list.append(('LIB', os.path.join(sdk_dir, 'lib')))
# env_tuple_list.append(('LIBPATH', os.path.join(sdk_dir, 'lib')))
# env_tuple_list.append(('PATH', os.path.join(sdk_dir, 'bin')))
#
# for variable, directory in env_tuple_list:
# env.PrependENVPath(variable, directory)
def msvs_exists():
return (len(get_installed_visual_studios()) > 0)
def get_vs_by_version(msvs):
global InstalledVSMap
global SupportedVSMap
if not SupportedVSMap.has_key(msvs):
msg = "Visual Studio version %s is not supported" % repr(msvs)
raise SCons.Errors.UserError, msg
get_installed_visual_studios()
vs = InstalledVSMap.get(msvs)
debug('InstalledVSMap:%s'%InstalledVSMap)
# Some check like this would let us provide a useful error message
# if they try to set a Visual Studio version that's not installed.
# However, we also want to be able to run tests (like the unit
# tests) on systems that don't, or won't ever, have it installed.
# It might be worth resurrecting this, with some configurable
# setting that the tests can use to bypass the check.
#if not vs:
# msg = "Visual Studio version %s is not installed" % repr(msvs)
# raise SCons.Errors.UserError, msg
return vs
def get_default_version(env):
"""Returns the default version string to use for MSVS.
If no version was requested by the user through the MSVS environment
variable, query all the available the visual studios through
query_versions, and take the highest one.
Return
------
version: str
the default version.
"""
if not env.has_key('MSVS') or not SCons.Util.is_Dict(env['MSVS']):
# TODO(1.5):
#versions = [vs.version for vs in get_installed_visual_studios()]
versions = map(lambda vs: vs.version, get_installed_visual_studios())
env['MSVS'] = {'VERSIONS' : versions}
else:
versions = env['MSVS'].get('VERSIONS', [])
if not env.has_key('MSVS_VERSION'):
if versions:
env['MSVS_VERSION'] = versions[0] #use highest version by default
else:
env['MSVS_VERSION'] = SupportedVSList[0].version
env['MSVS']['VERSION'] = env['MSVS_VERSION']
return env['MSVS_VERSION']
def get_default_arch(env):
"""Return the default arch to use for MSVS
if no version was requested by the user through the MSVS_ARCH environment
variable, select x86
Return
------
arch: str
"""
arch = env.get('MSVS_ARCH', 'x86')
msvs = InstalledVSMap.get(env['MSVS_VERSION'])
if not msvs:
arch = 'x86'
elif not arch in msvs.get_supported_arch():
fmt = "Visual Studio version %s does not support architecture %s"
raise SCons.Errors.UserError, fmt % (env['MSVS_VERSION'], arch)
return arch
def merge_default_version(env):
version = get_default_version(env)
arch = get_default_arch(env)
msvs = get_vs_by_version(version)
if msvs is None:
return
batfilename = msvs.get_batch_file()
# XXX: I think this is broken. This will silently set a bogus tool instead
# of failing, but there is no other way with the current scons tool
# framework
if batfilename is not None:
vars = ('LIB', 'LIBPATH', 'PATH', 'INCLUDE')
msvs_list = get_installed_visual_studios()
# TODO(1.5):
#vscommonvarnames = [ vs.common_tools_var for vs in msvs_list ]
vscommonvarnames = map(lambda vs: vs.common_tools_var, msvs_list)
nenv = normalize_env(env['ENV'], vscommonvarnames + ['COMSPEC'])
output = get_output(batfilename, arch, env=nenv)
vars = parse_output(output, vars)
for k, v in vars.items():
env.PrependENVPath(k, v, delete_existing=1)
def query_versions():
"""Query the system to get available versions of VS. A version is
considered when a batfile is found."""
msvs_list = get_installed_visual_studios()
# TODO(1.5)
#versions = [ msvs.version for msvs in msvs_list ]
versions = map(lambda msvs: msvs.version, msvs_list)
return versions
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
|
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import mox
from oslo.config import cfg
from oslo.serialization import jsonutils
from webob import exc
from nova.api.openstack.compute import extensions
from nova.api.openstack.compute import plugins
from nova.api.openstack.compute.plugins.v3 import block_device_mapping
from nova.api.openstack.compute.plugins.v3 import servers as servers_v3
from nova.api.openstack.compute import servers as servers_v2
from nova import block_device
from nova.compute import api as compute_api
from nova import exception
from nova import objects
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests.image import fake
from nova.tests import matchers
CONF = cfg.CONF
class BlockDeviceMappingTestV21(test.TestCase):
def _setup_controller(self):
ext_info = plugins.LoadedExtensionInfo()
self.controller = servers_v3.ServersController(extension_info=ext_info)
CONF.set_override('extensions_blacklist', 'os-block-device-mapping',
'osapi_v3')
self.no_bdm_v2_controller = servers_v3.ServersController(
extension_info=ext_info)
CONF.set_override('extensions_blacklist', '', 'osapi_v3')
def setUp(self):
super(BlockDeviceMappingTestV21, self).setUp()
self._setup_controller()
fake.stub_out_image_service(self.stubs)
self.bdm = [{
'no_device': None,
'source_type': 'volume',
'destination_type': 'volume',
'uuid': 'fake',
'device_name': 'vda',
'delete_on_termination': False,
}]
def _get_servers_body(self, no_image=False):
body = {
'server': {
'name': 'server_test',
'imageRef': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'flavorRef': 'http://localhost/123/flavors/3',
'metadata': {
'hello': 'world',
'open': 'stack',
},
},
}
if no_image:
del body['server']['imageRef']
return body
def _test_create(self, params, no_image=False, override_controller=None):
body = self._get_servers_body(no_image)
body['server'].update(params)
req = fakes.HTTPRequest.blank('/v2/fake/servers')
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.body = jsonutils.dumps(body)
if override_controller:
override_controller.create(req, body=body).obj['server']
else:
self.controller.create(req, body=body).obj['server']
def test_create_instance_with_block_device_mapping_disabled(self):
bdm = [{'device_name': 'foo'}]
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertNotIn('block_device_mapping', kwargs)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
params = {block_device_mapping.ATTRIBUTE_NAME: bdm}
self._test_create(params,
override_controller=self.no_bdm_v2_controller)
def test_create_instance_with_volumes_enabled_no_image(self):
"""Test that the create will fail if there is no image
and no bdms supplied in the request
"""
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertNotIn('imageRef', kwargs)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self.assertRaises(exc.HTTPBadRequest,
self._test_create, {}, no_image=True)
def test_create_instance_with_bdms_and_no_image(self):
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertThat(
block_device.BlockDeviceDict(self.bdm[0]),
matchers.DictMatches(kwargs['block_device_mapping'][0])
)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self.mox.StubOutWithMock(compute_api.API, '_validate_bdm')
self.mox.StubOutWithMock(compute_api.API, '_get_bdm_image_metadata')
compute_api.API._validate_bdm(
mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(True)
compute_api.API._get_bdm_image_metadata(
mox.IgnoreArg(), mox.IgnoreArg(), False).AndReturn({})
self.mox.ReplayAll()
params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
self._test_create(params, no_image=True)
def test_create_instance_with_device_name_not_string(self):
self.bdm[0]['device_name'] = 123
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['block_device_mapping'], self.bdm)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
self.assertRaises(exc.HTTPBadRequest,
self._test_create, params, no_image=True)
@mock.patch.object(compute_api.API, 'create')
def test_create_instance_with_bdm_param_not_list(self, mock_create):
self.params = {'block_device_mapping': '/dev/vdb'}
self.assertRaises(exc.HTTPBadRequest,
self._test_create, self.params)
def test_create_instance_with_device_name_empty(self):
self.bdm[0]['device_name'] = ''
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['block_device_mapping'], self.bdm)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
self.assertRaises(exc.HTTPBadRequest,
self._test_create, params, no_image=True)
def test_create_instance_with_device_name_too_long(self):
self.bdm[0]['device_name'] = 'a' * 256
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['block_device_mapping'], self.bdm)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
self.assertRaises(exc.HTTPBadRequest,
self._test_create, params, no_image=True)
def test_create_instance_with_space_in_device_name(self):
self.bdm[0]['device_name'] = 'v da'
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertTrue(kwargs['legacy_bdm'])
self.assertEqual(kwargs['block_device_mapping'], self.bdm)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
self.assertRaises(exc.HTTPBadRequest,
self._test_create, params, no_image=True)
def test_create_instance_with_invalid_size(self):
self.bdm[0]['volume_size'] = 'hello world'
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['block_device_mapping'], self.bdm)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
self.assertRaises(exc.HTTPBadRequest,
self._test_create, params, no_image=True)
def test_create_instance_bdm(self):
bdm = [{
'source_type': 'volume',
'device_name': 'fake_dev',
'uuid': 'fake_vol'
}]
bdm_expected = [{
'source_type': 'volume',
'device_name': 'fake_dev',
'volume_id': 'fake_vol'
}]
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertFalse(kwargs['legacy_bdm'])
for expected, received in zip(bdm_expected,
kwargs['block_device_mapping']):
self.assertThat(block_device.BlockDeviceDict(expected),
matchers.DictMatches(received))
return old_create(*args, **kwargs)
def _validate_bdm(*args, **kwargs):
pass
self.stubs.Set(compute_api.API, 'create', create)
self.stubs.Set(compute_api.API, '_validate_bdm', _validate_bdm)
params = {block_device_mapping.ATTRIBUTE_NAME: bdm}
self._test_create(params, no_image=True)
def test_create_instance_bdm_missing_device_name(self):
del self.bdm[0]['device_name']
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertFalse(kwargs['legacy_bdm'])
self.assertNotIn(None,
kwargs['block_device_mapping'][0]['device_name'])
return old_create(*args, **kwargs)
def _validate_bdm(*args, **kwargs):
pass
self.stubs.Set(compute_api.API, 'create', create)
self.stubs.Set(compute_api.API, '_validate_bdm', _validate_bdm)
params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
self._test_create(params, no_image=True)
def test_create_instance_bdm_validation_error(self):
def _validate(*args, **kwargs):
raise exception.InvalidBDMFormat(details='Wrong BDM')
self.stubs.Set(block_device.BlockDeviceDict,
'_validate', _validate)
params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
self.assertRaises(exc.HTTPBadRequest,
self._test_create, params, no_image=True)
@mock.patch('nova.compute.api.API._get_bdm_image_metadata')
def test_create_instance_non_bootable_volume_fails(self, fake_bdm_meta):
params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
fake_bdm_meta.side_effect = exception.InvalidBDMVolumeNotBootable(id=1)
self.assertRaises(exc.HTTPBadRequest, self._test_create, params,
no_image=True)
def test_create_instance_bdm_api_validation_fails(self):
self.validation_fail_test_validate_called = False
self.validation_fail_instance_destroy_called = False
bdm_exceptions = ((exception.InvalidBDMSnapshot, {'id': 'fake'}),
(exception.InvalidBDMVolume, {'id': 'fake'}),
(exception.InvalidBDMImage, {'id': 'fake'}),
(exception.InvalidBDMBootSequence, {}),
(exception.InvalidBDMLocalsLimit, {}))
ex_iter = iter(bdm_exceptions)
def _validate_bdm(*args, **kwargs):
self.validation_fail_test_validate_called = True
ex, kargs = ex_iter.next()
raise ex(**kargs)
def _instance_destroy(*args, **kwargs):
self.validation_fail_instance_destroy_called = True
self.stubs.Set(compute_api.API, '_validate_bdm', _validate_bdm)
self.stubs.Set(objects.Instance, 'destroy', _instance_destroy)
for _unused in xrange(len(bdm_exceptions)):
params = {block_device_mapping.ATTRIBUTE_NAME:
[self.bdm[0].copy()]}
self.assertRaises(exc.HTTPBadRequest,
self._test_create, params)
self.assertTrue(self.validation_fail_test_validate_called)
self.assertTrue(self.validation_fail_instance_destroy_called)
self.validation_fail_test_validate_called = False
self.validation_fail_instance_destroy_called = False
class BlockDeviceMappingTestV2(BlockDeviceMappingTestV21):
def _setup_controller(self):
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {'os-volumes': 'fake',
'os-block-device-mapping-v2-boot': 'fake'}
self.controller = servers_v2.Controller(self.ext_mgr)
self.ext_mgr_bdm_v2 = extensions.ExtensionManager()
self.ext_mgr_bdm_v2.extensions = {'os-volumes': 'fake'}
self.no_bdm_v2_controller = servers_v2.Controller(
self.ext_mgr_bdm_v2)
def test_create_instance_with_block_device_mapping_disabled(self):
bdm = [{'device_name': 'foo'}]
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertIsNone(kwargs['block_device_mapping'], None)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
params = {block_device_mapping.ATTRIBUTE_NAME: bdm}
self._test_create(params,
override_controller=self.no_bdm_v2_controller)
|
|
import numpy as np
import _correlate
import numpy.fft as dft
import iraf_frame
VALID = 0
SAME = 1
FULL = 2
PASS = 3
convolution_modes = {
"valid":0,
"same":1,
"full":2,
"pass":3,
}
def _condition_inputs(data, kernel):
data, kernel = np.asarray(data), np.asarray(kernel)
if np.rank(data) == 0:
data.shape = (1,)
if np.rank(kernel) == 0:
kernel.shape = (1,)
if np.rank(data) > 1 or np.rank(kernel) > 1:
raise ValueError("arrays must be 1D")
if len(data) < len(kernel):
data, kernel = kernel, data
return data, kernel
def correlate(data, kernel, mode=FULL):
"""correlate(data, kernel, mode=FULL)
>>> correlate(np.arange(8), [1, 2], mode=VALID)
array([ 2, 5, 8, 11, 14, 17, 20])
>>> correlate(np.arange(8), [1, 2], mode=SAME)
array([ 0, 2, 5, 8, 11, 14, 17, 20])
>>> correlate(np.arange(8), [1, 2], mode=FULL)
array([ 0, 2, 5, 8, 11, 14, 17, 20, 7])
>>> correlate(np.arange(8), [1, 2, 3], mode=VALID)
array([ 8, 14, 20, 26, 32, 38])
>>> correlate(np.arange(8), [1, 2, 3], mode=SAME)
array([ 3, 8, 14, 20, 26, 32, 38, 20])
>>> correlate(np.arange(8), [1, 2, 3], mode=FULL)
array([ 0, 3, 8, 14, 20, 26, 32, 38, 20, 7])
>>> correlate(np.arange(8), [1, 2, 3, 4, 5, 6], mode=VALID)
array([ 70, 91, 112])
>>> correlate(np.arange(8), [1, 2, 3, 4, 5, 6], mode=SAME)
array([ 17, 32, 50, 70, 91, 112, 85, 60])
>>> correlate(np.arange(8), [1, 2, 3, 4, 5, 6], mode=FULL)
array([ 0, 6, 17, 32, 50, 70, 91, 112, 85, 60, 38, 20, 7])
>>> correlate(np.arange(8), 1+1j)
Traceback (most recent call last):
...
TypeError: array cannot be safely cast to required type
"""
data, kernel = _condition_inputs(data, kernel)
lenk = len(kernel)
halfk = int(lenk/2)
even = (lenk % 2 == 0)
kdata = [0] * lenk
if mode in convolution_modes.keys():
mode = convolution_modes[ mode ]
result_type = max(kernel.dtype.name, data.dtype.name)
if mode == VALID:
wdata = np.concatenate((kdata, data, kdata))
result = wdata.astype(result_type)
_correlate.Correlate1d(kernel, wdata, result)
return result[lenk+halfk:-lenk-halfk+even]
elif mode == SAME:
wdata = np.concatenate((kdata, data, kdata))
result = wdata.astype(result_type)
_correlate.Correlate1d(kernel, wdata, result)
return result[lenk:-lenk]
elif mode == FULL:
wdata = np.concatenate((kdata, data, kdata))
result = wdata.astype(result_type)
_correlate.Correlate1d(kernel, wdata, result)
return result[halfk+1:-halfk-1+even]
elif mode == PASS:
result = data.astype(result_type)
_correlate.Correlate1d(kernel, data, result)
return result
else:
raise ValueError("Invalid convolution mode.")
cross_correlate = correlate
pix_modes = {
"nearest" : 0,
"reflect": 1,
"wrap" : 2,
"constant": 3
}
def convolve(data, kernel, mode=FULL):
"""convolve(data, kernel, mode=FULL)
Returns the discrete, linear convolution of 1-D
sequences a and v; mode can be 0 (VALID), 1 (SAME), or 2 (FULL)
to specify size of the resulting sequence.
>>> convolve(np.arange(8), [1, 2], mode=VALID)
array([ 1, 4, 7, 10, 13, 16, 19])
>>> convolve(np.arange(8), [1, 2], mode=SAME)
array([ 0, 1, 4, 7, 10, 13, 16, 19])
>>> convolve(np.arange(8), [1, 2], mode=FULL)
array([ 0, 1, 4, 7, 10, 13, 16, 19, 14])
>>> convolve(np.arange(8), [1, 2, 3], mode=VALID)
array([ 4, 10, 16, 22, 28, 34])
>>> convolve(np.arange(8), [1, 2, 3], mode=SAME)
array([ 1, 4, 10, 16, 22, 28, 34, 32])
>>> convolve(np.arange(8), [1, 2, 3], mode=FULL)
array([ 0, 1, 4, 10, 16, 22, 28, 34, 32, 21])
>>> convolve(np.arange(8), [1, 2, 3, 4, 5, 6], mode=VALID)
array([35, 56, 77])
>>> convolve(np.arange(8), [1, 2, 3, 4, 5, 6], mode=SAME)
array([ 4, 10, 20, 35, 56, 77, 90, 94])
>>> convolve(np.arange(8), [1, 2, 3, 4, 5, 6], mode=FULL)
array([ 0, 1, 4, 10, 20, 35, 56, 77, 90, 94, 88, 71, 42])
>>> convolve([1.,2.], np.arange(10.))
array([ 0., 1., 4., 7., 10., 13., 16., 19., 22., 25., 18.])
"""
data, kernel = _condition_inputs(data, kernel)
if len(data) >= len(kernel):
return correlate(data, kernel[::-1], mode)
else:
return correlate(kernel, data[::-1], mode)
def _gaussian(sigma, mew, npoints, sigmas):
ox = np.arange(mew-sigmas*sigma,
mew+sigmas*sigma,
2*sigmas*sigma/npoints, type=np.float64)
x = ox-mew
x /= sigma
x = x * x
x *= -1/2
x = np.exp(x)
return ox, 1/(sigma * np.sqrt(2*np.pi)) * x
def _correlate2d_fft(data0, kernel0, output=None, mode="nearest", cval=0.0):
"""_correlate2d_fft does 2d correlation of 'data' with 'kernel', storing
the result in 'output' using the FFT to perform the correlation.
supported 'mode's include:
'nearest' elements beyond boundary come from nearest edge pixel.
'wrap' elements beyond boundary come from the opposite array edge.
'reflect' elements beyond boundary come from reflection on same array edge.
'constant' elements beyond boundary are set to 'cval'
"""
shape = data0.shape
kshape = kernel0.shape
oversized = (np.array(shape) + np.array(kshape))
dy = kshape[0] // 2
dx = kshape[1] // 2
kernel = np.zeros(oversized, dtype=np.float64)
kernel[:kshape[0], :kshape[1]] = kernel0[::-1,::-1] # convolution <-> correlation
data = iraf_frame.frame(data0, oversized, mode=mode, cval=cval)
complex_result = (isinstance(data, np.complexfloating) or
isinstance(kernel, np.complexfloating))
Fdata = dft.fft2(data)
del data
Fkernel = dft.fft2(kernel)
del kernel
np.multiply(Fdata, Fkernel, Fdata)
del Fkernel
if complex_result:
convolved = dft.irfft2( Fdata, s=oversized)
else:
convolved = dft.irfft2( Fdata, s=oversized)
result = convolved[ kshape[0]-1:shape[0]+kshape[0]-1, kshape[1]-1:shape[1]+kshape[1]-1 ]
if output is not None:
output._copyFrom( result )
else:
return result
def _correlate2d_naive(data, kernel, output=None, mode="nearest", cval=0.0):
return _correlate.Correlate2d(kernel, data, output, pix_modes[mode], cval)
def _fix_data_kernel(data, kernel):
"""The _correlate.Correlate2d C-code can only handle kernels which
fit inside the data array. Since convolution and correlation are
commutative, _fix_data_kernel reverses kernel and data if necessary
and panics if there's no good order.
"""
data, kernel = map(np.asarray, [data, kernel])
if np.rank(data) == 0:
data.shape = (1,1)
elif np.rank(data) == 1:
data.shape = (1,) + data.shape
if np.rank(kernel) == 0:
kernel.shape = (1,1)
elif np.rank(kernel) == 1:
kernel.shape = (1,) + kernel.shape
if (kernel.shape[0] > data.shape[0] and
kernel.shape[1] > data.shape[1]):
kernel, data = data, kernel
elif (kernel.shape[0] <= data.shape[0] and
kernel.shape[1] <= data.shape[1]):
pass
return data, kernel
def correlate2d(data, kernel, output=None, mode="nearest", cval=0.0, fft=0):
"""correlate2d does 2d correlation of 'data' with 'kernel', storing
the result in 'output'.
supported 'mode's include:
'nearest' elements beyond boundary come from nearest edge pixel.
'wrap' elements beyond boundary come from the opposite array edge.
'reflect' elements beyond boundary come from reflection on same array edge.
'constant' elements beyond boundary are set to 'cval'
If fft is True, the correlation is performed using the FFT, else the
correlation is performed using the naive approach.
>>> a = np.arange(20*20)
>>> a = a.reshape((20,20))
>>> b = np.ones((5,5), dtype=np.float64)
>>> rn = correlate2d(a, b, fft=0)
>>> rf = correlate2d(a, b, fft=1)
>>> np.alltrue(np.ravel(rn-rf<1e-10))
True
"""
data, kernel = _fix_data_kernel(data, kernel)
if fft:
return _correlate2d_fft(data, kernel, output, mode, cval)
else:
a = _correlate2d_naive(data, kernel, output, mode, cval)
#a = a.byteswap()
return a
def convolve2d(data, kernel, output=None, mode="nearest", cval=0.0, fft=0):
"""convolve2d does 2d convolution of 'data' with 'kernel', storing
the result in 'output'.
supported 'mode's include:
'nearest' elements beyond boundary come from nearest edge pixel.
'wrap' elements beyond boundary come from the opposite array edge.
'reflect' elements beyond boundary come from reflection on same array edge.
'constant' elements beyond boundary are set to 'cval'
>>> a = np.arange(20*20)
>>> a = a.reshape((20,20))
>>> b = np.ones((5,5), dtype=np.float64)
>>> rn = convolve2d(a, b, fft=0)
>>> rf = convolve2d(a, b, fft=1)
>>> np.alltrue(np.ravel(rn-rf<1e-10))
True
"""
data, kernel = _fix_data_kernel(data, kernel)
kernel = kernel[::-1,::-1] # convolution -> correlation
if fft:
return _correlate2d_fft(data, kernel, output, mode, cval)
else:
return _correlate2d_naive(data, kernel, output, mode, cval)
def _boxcar(data, output, boxshape, mode, cval):
if len(boxshape) == 1:
_correlate.Boxcar2d(data[np.newaxis,...], 1, boxshape[0],
output[np.newaxis,...], mode, cval)
elif len(boxshape) == 2:
_correlate.Boxcar2d(data, boxshape[0], boxshape[1], output, mode, cval)
else:
raise ValueError("boxshape must be a 1D or 2D shape.")
def boxcar(data, boxshape, output=None, mode="nearest", cval=0.0):
"""boxcar computes a 1D or 2D boxcar filter on every 1D or 2D subarray of data.
'boxshape' is a tuple of integers specifying the dimensions of the filter: e.g. (3,3)
if 'output' is specified, it should be the same shape as 'data' and
None will be returned.
supported 'mode's include:
'nearest' elements beyond boundary come from nearest edge pixel.
'wrap' elements beyond boundary come from the opposite array edge.
'reflect' elements beyond boundary come from reflection on same array edge.
'constant' elements beyond boundary are set to 'cval'
>>> boxcar(np.array([10, 0, 0, 0, 0, 0, 1000]), (3,), mode="nearest").astype(np.longlong)
array([ 6, 3, 0, 0, 0, 333, 666], dtype=int64)
>>> boxcar(np.array([10, 0, 0, 0, 0, 0, 1000]), (3,), mode="wrap").astype(np.longlong)
array([336, 3, 0, 0, 0, 333, 336], dtype=int64)
>>> boxcar(np.array([10, 0, 0, 0, 0, 0, 1000]), (3,), mode="reflect").astype(np.longlong)
array([ 6, 3, 0, 0, 0, 333, 666], dtype=int64)
>>> boxcar(np.array([10, 0, 0, 0, 0, 0, 1000]), (3,), mode="constant").astype(np.longlong)
array([ 3, 3, 0, 0, 0, 333, 333], dtype=int64)
>>> a = np.zeros((10,10))
>>> a[0,0] = 100
>>> a[5,5] = 1000
>>> a[9,9] = 10000
>>> boxcar(a, (3,3)).astype(np.longlong)
array([[ 44, 22, 0, 0, 0, 0, 0, 0, 0, 0],
[ 22, 11, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 111, 111, 111, 0, 0, 0],
[ 0, 0, 0, 0, 111, 111, 111, 0, 0, 0],
[ 0, 0, 0, 0, 111, 111, 111, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 1111, 2222],
[ 0, 0, 0, 0, 0, 0, 0, 0, 2222, 4444]], dtype=int64)
>>> boxcar(a, (3,3), mode="wrap").astype(np.longlong)
array([[1122, 11, 0, 0, 0, 0, 0, 0, 1111, 1122],
[ 11, 11, 0, 0, 0, 0, 0, 0, 0, 11],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 111, 111, 111, 0, 0, 0],
[ 0, 0, 0, 0, 111, 111, 111, 0, 0, 0],
[ 0, 0, 0, 0, 111, 111, 111, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1111, 0, 0, 0, 0, 0, 0, 0, 1111, 1111],
[1122, 11, 0, 0, 0, 0, 0, 0, 1111, 1122]], dtype=int64)
>>> boxcar(a, (3,3), mode="reflect").astype(np.longlong)
array([[ 44, 22, 0, 0, 0, 0, 0, 0, 0, 0],
[ 22, 11, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 111, 111, 111, 0, 0, 0],
[ 0, 0, 0, 0, 111, 111, 111, 0, 0, 0],
[ 0, 0, 0, 0, 111, 111, 111, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 1111, 2222],
[ 0, 0, 0, 0, 0, 0, 0, 0, 2222, 4444]], dtype=int64)
>>> boxcar(a, (3,3), mode="constant").astype(np.longlong)
array([[ 11, 11, 0, 0, 0, 0, 0, 0, 0, 0],
[ 11, 11, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 111, 111, 111, 0, 0, 0],
[ 0, 0, 0, 0, 111, 111, 111, 0, 0, 0],
[ 0, 0, 0, 0, 111, 111, 111, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 1111, 1111],
[ 0, 0, 0, 0, 0, 0, 0, 0, 1111, 1111]], dtype=int64)
>>> a = np.zeros((10,10))
>>> a[3:6,3:6] = 111
>>> boxcar(a, (3,3)).astype(np.longlong)
array([[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 12, 24, 37, 24, 12, 0, 0, 0],
[ 0, 0, 24, 49, 74, 49, 24, 0, 0, 0],
[ 0, 0, 37, 74, 111, 74, 37, 0, 0, 0],
[ 0, 0, 24, 49, 74, 49, 24, 0, 0, 0],
[ 0, 0, 12, 24, 37, 24, 12, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=int64)
"""
mode = pix_modes[ mode ]
if output is None:
woutput = data.astype(np.float64)
else:
woutput = output
_fbroadcast(_boxcar, len(boxshape), data.shape,
(data, woutput), (boxshape, mode, cval))
if output is None:
return woutput
def _fbroadcast(f, N, shape, args, params=()):
"""_fbroadcast(f, N, args, shape, params=()) calls 'f' for each of the
'N'-dimensional inner subnumarray of 'args'. Each subarray has
.shape == 'shape'[-N:]. There are a total of product(shape[:-N],axis=0)
calls to 'f'.
"""
if len(shape) == N:
apply(f, tuple(args)+params)
else:
for i in range(shape[0]):
_fbroadcast(f, N, shape[1:], [x[i] for x in args], params)
def test():
import doctest, Convolve
return doctest.testmod(Convolve)
if __name__ == "__main__":
print test()
|
|
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
from django.core.mail import send_mail
from django.http import (HttpResponseForbidden, HttpResponseRedirect, Http404,
HttpResponseNotAllowed)
from django.shortcuts import get_object_or_404, render
from django.template.loader import render_to_string
from django.utils.translation import gettext_lazy as _
from django.views.decorators.http import require_POST
from django.views.generic import View, DetailView
from taggit.utils import edit_string_for_tags
from scampcat.scamp.models import Scamp, Annotation
from scampcat.scamp.emitters import json_response
from scampcat.scamp.forms import (AnnotationForm, ScampForm, ScampCloneForm,
ScampUploadForm, ScampTagsForm)
from scampcat.scamp.sessions import set_scamp_session
from scampcat.scamp.utils import coerce_put_post
def homepage(request):
"""Upload page for the scamp; handles the POST
request for either uploading an image or linking to one.
"""
if request.method == 'POST':
form = ScampUploadForm(request.user, request.POST, request.FILES)
if form.is_valid():
scamp, image = form.save()
# The ``scamp_key`` session key is a list of scamp keys
# (uid's on the scamp) that can be edited by the user with
# that active session. The only other way a user can edit
# a scamp is if they are logged in and owner of that scamp.
set_scamp_session(request, scamp)
return HttpResponseRedirect(scamp.get_absolute_url())
else:
form = ScampUploadForm()
extra_context = {'form': form}
return render(request, 'homepage.html', extra_context)
class ScampDetailView(DetailView):
"""Classed based view to render the scamp details"""
context_object_name = 'object'
def get_object(self):
obj = get_object_or_404(Scamp, slug=self.kwargs['slug'])
return obj
def get_context_data(self, **kwargs):
context = super(ScampDetailView, self).get_context_data(**kwargs)
if self.object.is_editable(self.request.user,
self.request.session.get('scamp_key')):
context['editable'] = True
else:
context['editable'] = False
return context
def post(self, request, *args, **kwargs):
"""POSTing to this view is used to update attributes
on the scamp such as the title and description.
"""
self.object = self.get_object()
form = ScampForm(request.POST, instance=self.object)
if self.object.is_editable(request.user,
request.session.get('scamp_key')):
if form.is_valid():
scamp = form.save()
response = {'title': scamp.title,
'description': scamp.description.rendered,
'description_raw': scamp.description.raw,
}
return json_response(200, **response)
else:
return json_response(400, message=form.errors.as_ul())
else:
return json_response(403, message="Permission denied.")
def put(self, request, *args, **kwargs):
"""PUTing to this view is used to create
a specific annotation which is attached
to the scamp at this URL.
"""
# Due to Django's handling of PUT requests
# we have to coerce our PUT variables via
# the POST handling stuff first.
coerce_put_post(request)
self.object = self.get_object()
form = AnnotationForm(self.object, request.PUT)
if self.object.is_editable(request.user,
request.session.get('scamp_key')):
if form.is_valid():
annotation = form.save()
response = {'id': annotation.id,
'text_raw': annotation.text.raw,
'text_rendered': annotation.text.rendered,
'edit_url': reverse('annotation_detail',
args=[self.object.slug,
annotation.id])}
return json_response(200, **response)
else:
return json_response(400, message=form.errors.as_ul())
else:
return json_response(403, message="Permission denied.")
class AnnotationDetailView(DetailView):
def get_object(self):
obj = get_object_or_404(Annotation,
scamp__slug=self.kwargs['scamp_slug'],
id=self.kwargs['annotation_id'])
return obj
def get(self, request, *args, **kwargs):
raise Http404
def post(self, request, *args, **kwargs):
"""POSTing here updates a specific existing annotation.
"""
self.object = self.get_object()
form = AnnotationForm(self.object.scamp, request.POST,
instance=self.object)
if self.object.scamp.is_editable(request.user,
request.session.get('scamp_key')):
if form.is_valid():
annotation = form.save()
response = {'id': annotation.id,
'text_rendered': annotation.text.rendered}
return json_response(200, **response)
else:
return json_response(400, message=form.errors.as_ul())
else:
return json_response(403, message="Permission denied.")
def delete(self, request, *args, **kwargs):
"""DELETEing here deletes a specific existing annotation.
"""
self.object = self.get_object()
if self.object.scamp.is_editable(request.user,
request.session.get('scamp_key')):
self.object.delete()
return json_response(200)
else:
return json_response(403, message="Permission denied.")
class ScampReorderView(View):
"""Provides the API for reordering of annotations
on a specific scamp.
"""
def get_object(self):
obj = get_object_or_404(Scamp, slug=self.kwargs['slug'])
return obj
def get(self, request, *args, **kwargs):
"""Currently this is a POST only view so
we return an http 405.
"""
return HttpResponseNotAllowed()
def post(self, request, *args, **kwargs):
"""Post a list of ordered annotation IDs to
this view to get them reordered on the scamp.
"""
self.object = self.get_object()
if self.object.is_editable(request.user,
request.session.get('scamp_key')):
new_order = request.POST.getlist('order')
new_order = [int(x) for x in new_order]
old_order = self.object.annotations.all().values_list('id',
flat=True)
# Check we have the same IDs before and
# after before we start reordering.
if set(new_order) == set(old_order):
for i, pk in enumerate(new_order):
Annotation.objects.filter(pk=pk).update(order=i+1)
return json_response(200)
else:
return json_response(400,
message="IDs sent for reordering do"
" not match those already saved.")
else:
return json_response(403, message="Permission denied.")
def scamp_add_tags(request, slug):
"""Helps to tags the ``Scamp``"""
scamp = get_object_or_404(Scamp, slug=slug)
if request.method == 'POST':
form = ScampTagsForm(request.POST)
if form.is_valid():
form.cleaned_data['tags']
scamp.tags.set(*form.cleaned_data['tags'])
return HttpResponseRedirect(scamp.get_absolute_url())
else:
tags = edit_string_for_tags([o for o in scamp.tags.all()])
if tags:
form = ScampTagsForm({'tags': tags})
else:
form = ScampTagsForm()
extra_context = {'form': form,
'object': scamp}
return render(request, 'scamp/scamp_tags.html', extra_context)
@login_required
def scamp_claim(request, slug):
"""Allows a user to claim an anonymous scamp and
attach it to the account that they are logged into.
"""
scamp = get_object_or_404(Scamp, slug=slug)
if scamp.is_editable(request.user,
request.session.get('scamp_key')):
# The scamp is editable, either because it's attached to the
# logged in user or because the scamp key is set on their session
# key. Therefore if the scamp is not attached to a user, we know
# the user is logged in (due to the decorator_ so we can safely
# attach it to the user.
if not scamp.user:
scamp.user = request.user
scamp.save()
messages.success(request,
_("This Scamp is now attached to your account."))
else:
messages.warning(request,
_("This Scamp is already attaced to another "
"account."))
else:
messages.error(request,
_("You do not have permission to claim this Scamp."))
url = scamp.get_absolute_url()
return HttpResponseRedirect(url)
@require_POST
def scamp_clone(request, slug):
"""Clones the current Scamp and everything
relevent that is attached to it.
"""
scamp = get_object_or_404(Scamp, slug=slug)
form = ScampCloneForm(request.user, {'scamp_to_clone': scamp.id})
if form.is_valid():
new_scamp = form.save()
set_scamp_session(request, new_scamp)
messages.success(request, _("Scamp cloned succesfully!"))
return HttpResponseRedirect(new_scamp.get_absolute_url())
else:
return HttpResponseForbidden()
def scamp_delete(request, slug):
"""Scamp delete view, handles confirmation
and the deletion.
"""
scamp = get_object_or_404(Scamp, slug=slug)
if request.method == 'POST':
if scamp.is_editable(request.user,
request.session.get('scamp_key')):
scamp.delete()
messages.success(request, _("The Scamp was deleted succesfully."))
if request.user.is_authenticated():
url = reverse('accounts_profile', args=[request.user.username])
else:
url = reverse('homepage')
return HttpResponseRedirect(url)
else:
messages.error(request, _("You do not have permission to delete "
"this Scamp."))
return HttpResponseRedirect(scamp.get_absolute_url())
context = {'object': scamp}
return render(request, 'scamp/scamp_delete.html', context)
@login_required
def scamp_report(request, slug):
"""Reports an ``Scamp`` as inappropriate"""
scamp = get_object_or_404(Scamp, slug=slug)
if request.method == 'POST':
site = Site.objects.get_current()
context = {'user': request.user,
'site': site,
'EMAIL_SUBJECT_PREFIX': settings.EMAIL_SUBJECT_PREFIX,
'object': scamp,
}
subject = render_to_string('scamp/email/subject.txt', context)
subject = ''.join(subject.splitlines())
message = render_to_string('scamp/email/message.txt', context)
send_mail(subject, message, settings.DEFAULT_FROM_EMAIL,
[settings.SCAMP_MANAGER])
messages.success(request, _("The Scamp has been reported for "
"violation of terms, thank you."))
return HttpResponseRedirect(scamp.get_absolute_url())
def scamp_tags(request, tag):
"""Helps to tags the ``Scamp``"""
tag_list = tag.split('+')
object_list = Scamp.objects.filter(tags__name__in=tag_list)
extra_context = {'object_list': object_list,
'tag_list': tag_list}
return render(request, 'scamp/scamp_list_tags.html', extra_context)
@require_POST
def scamp_toggle_lock(request, slug):
"""Toggles a scamps editable status"""
scamp = get_object_or_404(Scamp, slug=slug)
if scamp.is_editable(request.user,
request.session.get('scamp_key')):
scamp.is_locked = False if scamp.is_locked else True
scamp.save()
message = _('Scamp locked for editing.') if scamp.is_locked else \
_('Scamp unlocked for editing.')
messages.success(request, message)
return HttpResponseRedirect(scamp.get_absolute_url())
|
|
""" Vanilla RNN
@author Graham Taylor
"""
import numpy as np
import theano
import theano.tensor as T
from sklearn.base import BaseEstimator
import logging
import time
import os
import datetime
import pickle as pickle
import math
import matplotlib.pyplot as plt
plt.ion()
mode = theano.Mode(linker='cvm')
#mode = 'DEBUG_MODE'
class RNN(object):
""" Recurrent neural network class
Supported output types:
real : linear output units, use mean-squared error
binary : binary output units, use cross-entropy error
softmax : single softmax out, use cross-entropy error
"""
def __init__(self, input, n_in, n_hidden, n_out, activation=T.tanh,
output_type='real', use_symbolic_softmax=False):
self.input = input
self.activation = activation
self.output_type = output_type
# when using HF, SoftmaxGrad.grad is not implemented
# use a symbolic softmax which is slightly slower than T.nnet.softmax
# See: http://groups.google.com/group/theano-dev/browse_thread/
# thread/3930bd5a6a67d27a
if use_symbolic_softmax:
def symbolic_softmax(x):
e = T.exp(x)
return e / T.sum(e, axis=1).dimshuffle(0, 'x')
self.softmax = symbolic_softmax
else:
self.softmax = T.nnet.softmax
# recurrent weights as a shared variable
W_init = np.asarray(np.random.uniform(size=(n_hidden, n_hidden),
low=-.01, high=.01),
dtype=theano.config.floatX)
self.W = theano.shared(value=W_init, name='W')
# input to hidden layer weights
W_in_init = np.asarray(np.random.uniform(size=(n_in, n_hidden),
low=-.01, high=.01),
dtype=theano.config.floatX)
self.W_in = theano.shared(value=W_in_init, name='W_in')
# hidden to output layer weights
W_out_init = np.asarray(np.random.uniform(size=(n_hidden, n_out),
low=-.01, high=.01),
dtype=theano.config.floatX)
self.W_out = theano.shared(value=W_out_init, name='W_out')
h0_init = np.zeros((n_hidden,), dtype=theano.config.floatX)
self.h0 = theano.shared(value=h0_init, name='h0')
bh_init = np.zeros((n_hidden,), dtype=theano.config.floatX)
self.bh = theano.shared(value=bh_init, name='bh')
by_init = np.zeros((n_out,), dtype=theano.config.floatX)
self.by = theano.shared(value=by_init, name='by')
self.params = [self.W, self.W_in, self.W_out, self.h0,
self.bh, self.by]
# for every parameter, we maintain it's last update
# the idea here is to use "momentum"
# keep moving mostly in the same direction
self.updates = {}
for param in self.params:
init = np.zeros(param.get_value(borrow=True).shape,
dtype=theano.config.floatX)
self.updates[param] = theano.shared(init)
# recurrent function (using tanh activation function) and linear output
# activation function
def step(x_t, h_tm1):
h_t = self.activation(T.dot(x_t, self.W_in) + \
T.dot(h_tm1, self.W) + self.bh)
y_t = T.dot(h_t, self.W_out) + self.by
return h_t, y_t
# the hidden state `h` for the entire sequence, and the output for the
# entire sequence `y` (first dimension is always time)
[self.h, self.y_pred], _ = theano.scan(step,
sequences=self.input,
outputs_info=[self.h0, None])
# L1 norm ; one regularization option is to enforce L1 norm to
# be small
self.L1 = 0
self.L1 += abs(self.W.sum())
self.L1 += abs(self.W_in.sum())
self.L1 += abs(self.W_out.sum())
# square of L2 norm ; one regularization option is to enforce
# square of L2 norm to be small
self.L2_sqr = 0
self.L2_sqr += (self.W ** 2).sum()
self.L2_sqr += (self.W_in ** 2).sum()
self.L2_sqr += (self.W_out ** 2).sum()
if self.output_type == 'real':
self.loss = lambda y: self.mse(y)
elif self.output_type == 'binary':
# push through sigmoid
self.p_y_given_x = T.nnet.sigmoid(self.y_pred) # apply sigmoid
self.y_out = T.round(self.p_y_given_x) # round to {0,1}
self.loss = lambda y: self.nll_binary(y)
elif self.output_type == 'softmax':
# push through softmax, computing vector of class-membership
# probabilities in symbolic form
self.p_y_given_x = self.softmax(self.y_pred)
# compute prediction as class whose probability is maximal
self.y_out = T.argmax(self.p_y_given_x, axis=-1)
self.loss = lambda y: self.nll_multiclass(y)
else:
raise NotImplementedError
def mse(self, y):
# error between output and target
return T.mean((self.y_pred - y) ** 2)
def nll_binary(self, y):
# negative log likelihood based on binary cross entropy error
return T.mean(T.nnet.binary_crossentropy(self.p_y_given_x, y))
def nll_multiclass(self, y):
# negative log likelihood based on multiclass cross entropy error
# y.shape[0] is (symbolically) the number of rows in y, i.e.,
# number of time steps (call it T) in the sequence
# T.arange(y.shape[0]) is a symbolic vector which will contain
# [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of
# Log-Probabilities (call it LP) with one row per example and
# one column per class LP[T.arange(y.shape[0]),y] is a vector
# v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ...,
# LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is
# the mean (across minibatch examples) of the elements in v,
# i.e., the mean log-likelihood across the minibatch.
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
def errors(self, y):
"""Return a float representing the number of errors in the sequence
over the total number of examples in the sequence ; zero one
loss over the size of the sequence
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
"""
# check if y has same dimension of y_pred
if y.ndim != self.y_out.ndim:
raise TypeError('y should have the same shape as self.y_out',
('y', y.type, 'y_out', self.y_out.type))
if self.output_type in ('binary', 'softmax'):
# check if y is of the correct datatype
if y.dtype.startswith('int'):
# the T.neq operator returns a vector of 0s and 1s, where 1
# represents a mistake in prediction
return T.mean(T.neq(self.y_out, y))
else:
raise NotImplementedError()
class MetaRNN(BaseEstimator):
def __init__(self, n_in=5, n_hidden=50, n_out=5, learning_rate=0.01,
n_epochs=100, L1_reg=0.00, L2_reg=0.00, learning_rate_decay=1,
activation='tanh', output_type='real',
final_momentum=0.9, initial_momentum=0.5,
momentum_switchover=5,
use_symbolic_softmax=False):
self.n_in = int(n_in)
self.n_hidden = int(n_hidden)
self.n_out = int(n_out)
self.learning_rate = float(learning_rate)
self.learning_rate_decay = float(learning_rate_decay)
self.n_epochs = int(n_epochs)
self.L1_reg = float(L1_reg)
self.L2_reg = float(L2_reg)
self.activation = activation
self.output_type = output_type
self.initial_momentum = float(initial_momentum)
self.final_momentum = float(final_momentum)
self.momentum_switchover = int(momentum_switchover)
self.use_symbolic_softmax = use_symbolic_softmax
self.ready()
def ready(self):
# input (where first dimension is time)
self.x = T.matrix()
# target (where first dimension is time)
if self.output_type == 'real':
self.y = T.matrix(name='y', dtype=theano.config.floatX)
elif self.output_type == 'binary':
self.y = T.matrix(name='y', dtype='int32')
elif self.output_type == 'softmax': # only vector labels supported
self.y = T.vector(name='y', dtype='int32')
else:
raise NotImplementedError
# initial hidden state of the RNN
self.h0 = T.vector()
# learning rate
self.lr = T.scalar()
if self.activation == 'tanh':
activation = T.tanh
elif self.activation == 'sigmoid':
activation = T.nnet.sigmoid
elif self.activation == 'relu':
activation = lambda x: x * (x > 0)
elif self.activation == 'cappedrelu':
activation = lambda x: T.minimum(x * (x > 0), 6)
else:
raise NotImplementedError
self.rnn = RNN(input=self.x, n_in=self.n_in,
n_hidden=self.n_hidden, n_out=self.n_out,
activation=activation, output_type=self.output_type,
use_symbolic_softmax=self.use_symbolic_softmax)
if self.output_type == 'real':
self.predict = theano.function(inputs=[self.x, ],
outputs=self.rnn.y_pred,
mode=mode)
elif self.output_type == 'binary':
self.predict_proba = theano.function(inputs=[self.x, ],
outputs=self.rnn.p_y_given_x, mode=mode)
self.predict = theano.function(inputs=[self.x, ],
outputs=T.round(self.rnn.p_y_given_x),
mode=mode)
elif self.output_type == 'softmax':
self.predict_proba = theano.function(inputs=[self.x, ],
outputs=self.rnn.p_y_given_x, mode=mode)
self.predict = theano.function(inputs=[self.x, ],
outputs=self.rnn.y_out, mode=mode)
else:
raise NotImplementedError
def shared_dataset(self, data_xy):
""" Load the dataset into shared variables """
data_x, data_y = data_xy
shared_x = theano.shared(np.asarray(data_x,
dtype=theano.config.floatX))
shared_y = theano.shared(np.asarray(data_y,
dtype=theano.config.floatX))
if self.output_type in ('binary', 'softmax'):
return shared_x, T.cast(shared_y, 'int32')
else:
return shared_x, shared_y
def __getstate__(self):
""" Return state sequence."""
params = self._get_params() # parameters set in constructor
weights = [p.get_value() for p in self.rnn.params]
state = (params, weights)
return state
def _set_weights(self, weights):
""" Set fittable parameters from weights sequence.
Parameters must be in the order defined by self.params:
W, W_in, W_out, h0, bh, by
"""
i = iter(weights)
for param in self.rnn.params:
param.set_value(i.next())
def __setstate__(self, state):
""" Set parameters from state sequence.
Parameters must be in the order defined by self.params:
W, W_in, W_out, h0, bh, by
"""
params, weights = state
self.set_params(**params)
self.ready()
self._set_weights(weights)
def save(self, fpath='.', fname=None):
""" Save a pickled representation of Model state. """
fpathstart, fpathext = os.path.splitext(fpath)
if fpathext == '.pkl':
# User supplied an absolute path to a pickle file
fpath, fname = os.path.split(fpath)
elif fname is None:
# Generate filename based on date
date_obj = datetime.datetime.now()
date_str = date_obj.strftime('%Y-%m-%d-%H:%M:%S')
class_name = self.__class__.__name__
fname = '%s.%s.pkl' % (class_name, date_str)
fabspath = os.path.join(fpath, fname)
logging.info("Saving to %s ..." % fabspath)
file = open(fabspath, 'wb')
state = self.__getstate__()
pickle.dump(state, file, protocol=pickle.HIGHEST_PROTOCOL)
file.close()
def load(self, path):
""" Load model parameters from path. """
logging.info("Loading from %s ..." % path)
file = open(path, 'rb')
state = pickle.load(file)
self.__setstate__(state)
file.close()
def fit(self, X_train, Y_train, X_test=None, Y_test=None,
validation_frequency=100):
""" Fit model
Pass in X_test, Y_test to compute test error and report during
training.
X_train : ndarray (n_seq x n_steps x n_in)
Y_train : ndarray (n_seq x n_steps x n_out)
validation_frequency : int
in terms of number of sequences (or number of weight updates)
"""
f = file('trainProcess/trainOutput-b04-500-200-20.txt','a+')
if X_test is not None:
assert(Y_test is not None)
self.interactive = True
test_set_x, test_set_y = self.shared_dataset((X_test, Y_test))
else:
self.interactive = False
train_set_x, train_set_y = self.shared_dataset((X_train, Y_train))
n_train = train_set_x.get_value(borrow=True).shape[0]
if self.interactive:
n_test = test_set_x.get_value(borrow=True).shape[0]
######################
# BUILD ACTUAL MODEL #
######################
logging.info('... building the model')
index = T.lscalar('index') # index to a case
# learning rate (may change)
l_r = T.scalar('l_r', dtype=theano.config.floatX)
mom = T.scalar('mom', dtype=theano.config.floatX) # momentum
cost = self.rnn.loss(self.y) \
+ self.L1_reg * self.rnn.L1 \
+ self.L2_reg * self.rnn.L2_sqr
compute_train_error = theano.function(inputs=[index, ],
outputs=self.rnn.loss(self.y),
givens={
self.x: train_set_x[index],
self.y: train_set_y[index]},
mode=mode)
if self.interactive:
compute_test_error = theano.function(inputs=[index, ],
outputs=self.rnn.loss(self.y),
givens={
self.x: test_set_x[index],
self.y: test_set_y[index]},
mode=mode)
# compute the gradient of cost with respect to theta = (W, W_in, W_out)
# gradients on the weights using BPTT
gparams = []
for param in self.rnn.params:
gparam = T.grad(cost, param)
gparams.append(gparam)
updates = {}
for param, gparam in zip(self.rnn.params, gparams):
weight_update = self.rnn.updates[param]
upd = mom * weight_update - l_r * gparam
updates[weight_update] = upd
updates[param] = param + upd
# compiling a Theano function `train_model` that returns the
# cost, but in the same time updates the parameter of the
# model based on the rules defined in `updates`
train_model = theano.function(inputs=[index, l_r, mom],
outputs=cost,
updates=updates,
givens={
self.x: train_set_x[index],
self.y: train_set_y[index]},
mode=mode)
###############
# TRAIN MODEL #
###############
logging.info('... training')
epoch = 0
while (epoch < self.n_epochs):
epoch = epoch + 1
for idx in xrange(n_train):
effective_momentum = self.final_momentum \
if epoch > self.momentum_switchover \
else self.initial_momentum
example_cost = train_model(idx, self.learning_rate,
effective_momentum)
# iteration number (how many weight updates have we made?)
# epoch is 1-based, index is 0 based
iter = (epoch - 1) * n_train + idx + 1
if iter % validation_frequency == 0:
# compute loss on training set
train_losses = [compute_train_error(i)
for i in xrange(n_train)]
this_train_loss = np.mean(train_losses)
if self.interactive:
test_losses = [compute_test_error(i)
for i in xrange(n_test)]
this_test_loss = np.mean(test_losses)
f.write('epoch %i, seq %i/%i, tr loss %f '
'te loss %f lr: %f \n' % \
(epoch, idx + 1, n_train,
this_train_loss, this_test_loss, self.learning_rate))
print('epoch %i, seq %i/%i, tr loss %f '
'te loss %f lr: %f' % \
(epoch, idx + 1, n_train,
this_train_loss, this_test_loss, self.learning_rate))
else:
f.write('epoch %i, seq %i/%i, train loss %f '
'lr: %f \n' % \
(epoch, idx + 1, n_train, this_train_loss,
self.learning_rate))
print('epoch %i, seq %i/%i, train loss %f '
'lr: %f' % \
(epoch, idx + 1, n_train, this_train_loss,
self.learning_rate))
self.learning_rate *= self.learning_rate_decay
f.close()
def test_real():
""" Test RNN with real-valued outputs. """
n_hidden = 200
n_in = 20
n_out = 5
n_steps = 10
n_seq = 100
np.random.seed(0)
# simple lag test
seq = np.random.randn(n_seq, n_steps, n_in)
targets = np.zeros((n_seq, n_steps, n_out))
targets[:, 1:, 0] = seq[:, :-1, 3] # delayed 1
targets[:, 1:, 1] = seq[:, :-1, 2] # delayed 1
targets[:, 2:, 2] = seq[:, :-2, 0] # delayed 2
targets += 0.01 * np.random.standard_normal(targets.shape)
model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out,
learning_rate=0.001, learning_rate_decay=0.999,
n_epochs=400, activation='tanh')
model.fit(seq, targets, validation_frequency=1000)
[seqNum,lineNum,colNum] = targets.shape
print(seqNum,lineNum,colNum)
error = [0 for i in range(colNum)]
plt.close('all')
fig = plt.figure()
ax1 = plt.subplot(211)
plt.plot(seq[0])
ax1.set_title('input')
ax2 = plt.subplot(212)
true_targets = plt.plot(targets[0])
guess = model.predict(seq[0])
guessed_targets = plt.plot(guess, linestyle='--')
for i, x in enumerate(guessed_targets):
x.set_color(true_targets[i].get_color())
ax2.set_title('solid: true output, dashed: model output')
dif = abs(guess - targets[0])
[linedif,coldif] = dif.shape
print(linedif,coldif)
errorsum = 0
for i in range (colNum):
sum = 0
for j in range (lineNum):
sum += dif[j][i] ** 2
error[i] = math.sqrt(sum/lineNum)
errorsum += error[i]
print(error[i])
print("average error = ", errorsum/colNum)
def test_binary(multiple_out=False, n_epochs=250):
""" Test RNN with binary outputs. """
n_hidden = 80
n_in = 11
n_out = 66
n_steps = 40
n_seq = 1000
np.random.seed(0)
# simple lag test
seqlist = []
count = 0
data = []
BASE_DIR = os.path.dirname(__file__)
#file_path1 = os.path.join(BASE_DIR,"traindata/inputdata-b04-1000-40.txt")
file_path1 = os.path.join(BASE_DIR,"traindata/inputdata-b04-40-20-50.txt")
for l in open(file_path1):
#for l in open("inputdata-b02-300-10.txt"):
count += 1
row = [int(x) for x in l.split()]
if len(row) > 0:
data.append(row)
if (count == n_steps):
count = 0
if len(data) >0:
seqlist.append(data)
data = []
seqarray = np.asarray(seqlist)
seq = seqarray[:,:,:n_in]
targets = seqarray[:,:,n_in:]
seqlistTest = []
count = 0
dataTest = []
#file_path2 = os.path.join(BASE_DIR, 'testdata/inputdata-b04-300-40.txt')
file_path2 = os.path.join(BASE_DIR, "testdata/inputdata-b04-20-20-30.txt")
for l in open(file_path2):
#for l in open("inputdata-b02-100-10.txt"):
count += 1
row = [int(x) for x in l.split()]
if len(row) > 0:
dataTest.append(row)
if (count == n_steps):
count = 0
if len(dataTest) >0:
seqlistTest.append(dataTest)
dataTest = []
seqarrayTest = np.asarray(seqlistTest)
seqTest = seqarrayTest[:,:,:n_in]
targetsTest = seqarrayTest[:,:,n_in:]
model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out,
learning_rate=0.15, learning_rate_decay=0.999,
n_epochs=n_epochs, activation='tanh', output_type='binary')
#model.fit(seq, targets, validation_frequency=1000)
model.fit(seq, targets, seqTest, targetsTest, validation_frequency=1000)
ferror = file('errorRate/errorRate-b04-2000-600-20.txt','a+')
[seqNum,lineNum,colNum] = targetsTest.shape
#print (seqTest.shape)
seqs = xrange(seqNum)
error = [0 for i in range(lineNum*seqNum)]
errorsum = 0
for k in seqs:
guess = model.predict_proba(seqTest[k])
dif = abs(guess - targetsTest[k])
[lineDif,colDif] = dif.shape
#print(lineDif,colDif)
for i in range (lineDif):
ki = k*lineDif+i
for j in range (colDif):
if (dif[i][j] > 0.5):
error[ki] += 1
ferror.write('error %d = %d \n' % (ki,error[ki]))
if (error[ki]>0):
errorsum += 1
print(errorsum)
errorRate = errorsum/1.0/seqNum/lineNum
ferror.write("average error = %f \n" % (errorRate))
## seqs = xrange(1)
##
## [seqNum,lineNum,colNum] = targets.shape
## print(seqNum,lineNum,colNum)
## error = [0 for i in range(colNum)]
##
## plt.close('all')
## for seq_num in seqs:
## fig = plt.figure()
## ax1 = plt.subplot(211)
## plt.plot(seq[seq_num])
## ax1.set_title('input')
## ax2 = plt.subplot(212)
## true_targets = plt.step(xrange(n_steps), targets[seq_num], marker='o')
##
## guess = model.predict_proba(seq[seq_num])
## guessed_targets = plt.step(xrange(n_steps), guess)
## plt.setp(guessed_targets, linestyle='--', marker='d')
## for i, x in enumerate(guessed_targets):
## x.set_color(true_targets[i].get_color())
## ax2.set_ylim((-0.1, 1.1))
## ax2.set_title('solid: true output, dashed: model output (prob)')
##
##
## dif = abs(guess - targets[seq_num])
## [lineDif,colDif] = dif.shape
## print(lineDif,colDif)
## errorsum = 0
## for i in range (colNum):
## for j in range (lineNum):
## if (dif[j][i] > 0.5):
## error[i] += 1
## print(error[i])
## errorsum += error[i]
## print("average error = ", errorsum/colNum)
def test_softmax(n_epochs=250):
""" Test RNN with softmax outputs. """
n_hidden = 10
n_in = 5
n_steps = 10
n_seq = 100
n_classes = 3
n_out = n_classes # restricted to single softmax per time step
np.random.seed(0)
# simple lag test
seq = np.random.randn(n_seq, n_steps, n_in)
targets = np.zeros((n_seq, n_steps), dtype=np.int)
thresh = 0.5
# if lag 1 (dim 3) is greater than lag 2 (dim 0) + thresh
# class 1
# if lag 1 (dim 3) is less than lag 2 (dim 0) - thresh
# class 2
# if lag 2(dim0) - thresh <= lag 1 (dim 3) <= lag2(dim0) + thresh
# class 0
targets[:, 2:][seq[:, 1:-1, 3] > seq[:, :-2, 0] + thresh] = 1
targets[:, 2:][seq[:, 1:-1, 3] < seq[:, :-2, 0] - thresh] = 2
#targets[:, 2:, 0] = np.cast[np.int](seq[:, 1:-1, 3] > seq[:, :-2, 0])
model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out,
learning_rate=0.001, learning_rate_decay=0.999,
n_epochs=n_epochs, activation='tanh',
output_type='softmax', use_symbolic_softmax=False)
model.fit(seq, targets, validation_frequency=1000)
seqs = xrange(10)
[seqNum,lineNum,colNum] = seq.shape
print(seqNum,lineNum,colNum)
error = [0 for i in range(colNum)]
plt.close('all')
for seq_num in seqs:
fig = plt.figure()
ax1 = plt.subplot(211)
plt.plot(seq[seq_num])
ax1.set_title('input??')
ax2 = plt.subplot(212)
# blue line will represent true classes
true_targets = plt.step(xrange(n_steps), targets[seq_num], marker='o')
# show probabilities (in b/w) output by model
guess = model.predict_proba(seq[seq_num])
guessed_probs = plt.imshow(guess.T, interpolation='nearest',
cmap='gray')
ax2.set_title('blue: true class, grayscale: probs assigned by model')
dif = abs(seq[seq_num] - targets[seq_num])
for i in range (colNum):
sum = 0
for j in range (lineNum):
sum += dif[i,j] ** 2
error[i] = math.sqrt(sum/lineNum)
print(error[i])
if __name__ == "__main__":
##logging.basicConfig(
## level = logging.INFO,
## format = 'LINE %(lineno)-4d %(levelname)-8s %(message)s',
## datafmt = '%m-%d %H:%M',
## filename = "D:/logresult20160123/one.log",
## filemode = 'w')
t0 = time.time()
#test_real()
# problem takes more epochs to solve
test_binary(multiple_out=True, n_epochs=30)
#test_softmax(n_epochs=250)
print ("Elapsed time: %f" % (time.time() - t0))
|
|
#!/usr/bin/env python
#
# Protein Engineering Analysis Tool DataBase (PEATDB)
# Copyright (C) 2010 Damien Farrell & Jens Erik Nielsen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Contact information:
# Email: Jens.Nielsen_at_gmail.com
# Normal mail:
# Jens Nielsen
# SBBS, Conway Institute
# University College Dublin
# Dublin 4, Ireland
#
"""
DB Actions class to handle peat actions from the within the GUI Application
Author: Damien Farrell 2010
"""
from Tkinter import *
import os
import tkSimpleDialog, tkFileDialog, tkMessageBox
from Base import zDatabase
from Record import PEATRecord
from Extfile import FileHandler
class DBActions(object):
"""This class handles misc application tasks like opening sub apps"""
MUT = None
yasara = None
yasaraobjects = {}
three_to_one={'ALA':'A','CYS':'C','ASP':'D','GLU':'E','PHE':'F','GLY':'G','HIS':'H','ILE':'I',
'LYS':'K','LEU':'L','MET':'M','ASN':'N','PRO':'P','GLN':'Q','ARG':'R','SER':'S',
'THR':'T','VAL':'V','TRP':'W','TYR':'Y','***':'*'}
def __init__(self, DB=None):
return
@classmethod
def AAList2String(self, aalist):
"""Convert the amino acid sequence as stored in aaseq to a string"""
s = ''
for a in aalist:
s+=self.three_to_one[a[1]]
return s
@classmethod
def string2AAseq(self, seq, chain='A'):
"""Convert string to amino acid aaseq as stored in PEAT"""
from PEATSA.Core import Utilities
codemap = Utilities.InvertedCodeDictionary()
codes = [codemap[el] for el in list(seq)]
indexes = [Utilities.CreateResidueCode(chain=chain, number=index) for index in range(1, len(seq) + 1)]
return zip(indexes, codes)
def addProteinSeq(self, DB, name):
"""Add a protein sequence"""
seq_win=Toplevel()
seq_win.title('Enter amino acid sequence')
lbl=Label(seq_win,text='Please enter AA sequence below (1-letter code) or click browse to select a file')
lbl.grid(row=0,column=0,columnspan=8,sticky='w')
seq_box=Text(seq_win,height=10,width=85)
seq_box.grid(row=1,column=0,columnspan=15,sticky='news')
self.seq_file=''
self.seq_text=''
self.seq_start=StringVar()
self.seq_start.set("1")
Label(seq_win,text='Sequence starts at number').grid(row=2,column=0,sticky='e',columnspan=2)
Entry(seq_win,textvariable=self.seq_start,width=5).grid(row=2,column=2,sticky='w')
def get_seq_file():
import tkFileDialog
savedir=self.preferences.get('datadir')
self.seq_file=tkFileDialog.askopenfilename(defaultextension='.PIR',
initialdir=savedir,
filetypes=[("PIR file","*.PIR"),
("All files","*.*")],
parent=seq_win)
import AA_sequence
SEQ=AA_sequence.sequenceIO()
SEQ.readpir(self.seq_file)
seq_box.insert(END,SEQ.sequence)
return
Button(seq_win,text='Browse',command=get_seq_file).grid(row=2,column=4)
def get_seq_text():
self.seq_text=seq_box.get('1.0',END)
seq_win.destroy()
Button(seq_win,text='Create protein',command=get_seq_text).grid(row=2,column=6)
def cancel_get_seq():
self.seq_file=None
seq_win.destroy()
return
Button(seq_win,text='Cancel',command=cancel_get_seq).grid(row=2,column=7)
self.master.wait_window(seq_win)
# Add the protein after checking
if self.seq_file is None:
#self.data['DBinstance'].delete_complete_record(protein_name)
return
import string
if string.strip(self.seq_text)!='':
import string
sequence=string.strip(self.seq_text)
else:
import tkMessageBox
tkMessageBox.showwarning('No sequence',
'No sequence found\nNo protein added',
parent=self.master)
return
# Change the sequence to three-letter code
import DNAtool.mutation
three_lt_seq=[]
for aa in sequence:
if DNAtool.mutation.one_to_three.has_key(aa.upper()):
three_lt_seq.append(DNAtool.mutation.one_to_three[aa.upper()])
else:
import tkMessageBox
tkMessageBox.showwarning('Invalid sequence',
'Sequence contains invalid character: "%s"' %aa,
parent=self.master)
self.data['DBinstance'].delete_complete_record(protein_name)
return
# Add the protein
ok=self.data['DBinstance'].add_protseq(protein_name,
three_lt_seq,
int(self.seq_start.get()))
if not ok:
raise 'Something went wrong when adding the protein sequence'
return
@classmethod
def checkMutation(self, DB, name, ref=None, X=None):
"""Check mutations based on ref sequence and current mutant
sequence, should be triggered whenever ref protein is altered so
that the mutation codes are updated."""
prot = DB.get(name)
if prot.aaseq == None:
return
if ref == None:
ref = self.DB.meta.refprotein
refseq = self.AAList2String(DB.get(ref).aaseq)
if prot.aaseq == None:
return
#get mutations from sequence
seq = self.AAList2String(prot.aaseq)
if seq == refseq:
return
#get alignment for pdb seq and AA from DNA seq
import PEATSA.Core as Core
if X == None:
#we need to also provide the ref structure
import Protool
X=Protool.structureIO()
X.parsepdb(DB.get(ref).Structure)
print X
mset = Core.Data.mutationSetFromSequencesAndStructure(refseq, seq, X)
#prot.Mutations = '+'.join(mset.mutationCodes())
prot.Mutations = mset.codeString(X)
return
@classmethod
def addPDBFile(self, DB=None, name=None, pdbfile=None,
pdbdata=None, pdbname=None, gui=True):
"""Add a PDB file to the record given as argument"""
import os, tkMessageBox
if pdbdata == None and pdbfile == None:
savedir=os.getcwd()
global PDB_code
pdbfile=tkFileDialog.askopenfilename(defaultextension='.pdb',
initialdir=savedir,
filetypes=[("PDB file","*.pdb"),
("PDB file","*.brk"),
("All files","*.*")])
if not pdbfile:
return
if pdbfile:
pdbname = os.path.basename(pdbfile)
import Protool
self.X=Protool.structureIO()
# Extracting PDB_code from pdbfile
if pdbdata != None:
self.X.readpdb(data=pdbdata)
elif os.path.isfile(pdbfile):
PDB_code=pdbfile.split('/').pop().split('.')[0]
# Try to read it using Protool
try:
self.X.readpdb(filename=pdbfile)
except:
tkMessageBox.showwarning('Error reading PDB file',
'I could not read the PDB file. This probably means that the PDB file is corrupt in some way.')
return
AlignmentMap = None
if gui==True:
if tkMessageBox.askyesno('Reset AA Seq?',
'Do you want to reset the amino acid Sequence?'):
AlignmentMap = self.checkPDBSequence(name)
#store it
DB.storePDB(name, self.X, AlignmentMap)
if hasattr(DB.meta,'refprotein'):
ref = DB.meta.refprotein
#if this is the reference protein remodel mutations and rewrite mut codes
if name == ref:
print name, ref
print 'rechecking mutation codes, ref prot structure has changed'
#get new mutation codes
import PEATSA.Core as Core
for p in DB.getRecs():
self.checkMutation(DB, p, ref, self.X)
#self.checkModels(DB)
#add the original pdb name
DB.data[name]['pdbname'] = pdbname
return
@classmethod
def checkPDBSequence(self, name):
"""Check the PDB sequence against a newly added structure, optional.
Adds the amino acid seq of the PDB file, overwriting the old one"""
# Extract the sequence
import sequence_alignment
pdb_1,ignored_res1=sequence_alignment.Protool2pir(self.X.sequence)
print 'IGNORED',ignored_res1
if ignored_res1!={}:
igroups=ignored_res1.keys()
igroups.sort()
import tkMessageBox
tkMessageBox.showwarning('Unknown entities in PDB file',
'I ignored the following residue types/molecules in the PDB file:\n%s' %(str(igroups)))
# Get the entry sequence
accept_alignment_automatically=None
record_AA = DB.get_AA_sequence(name)
if record_AA:
record_AA1,ignored_res=sequence_alignment.Protool2pir(record_AA)
# If we do not have an amino acid sequence for the record, then
# we simply use the one from the PDB file and accept the alignment
# straight away
accept_alignment_automatically=1
import copy
record_AA1=copy.deepcopy(pdb_1)
# Also deposit the amino acid sequence in the protein record
DB.data[name]['aaseq'] = copy.deepcopy(self.X.sequence)
# Align the two sequences
NW_align = sequence_alignment.NW(pdb_1,record_AA1)
al_pdb,al_record,map_pdb,map_record = NW_align.Align()
self.al_pdb = al_pdb
self.al_record = al_record
# Find regions of overlap
ids=0
for count in range(len(al_pdb)):
res_pdb=al_pdb[count]
res_rec=al_record[count]
if res_pdb==res_rec:
ids=ids+1
print 'Sequence identity %5.3f' %(100.0*float(ids)/float(len(al_pdb)))
AlignmentMap = {}
AlignmentMap['OrigAa']=al_record
AlignmentMap['AlignedAa']=al_pdb
#Make alignment window
AlignWindow=Toplevel()
self.AlingWindow=AlignWindow
AlignWindow.geometry('+100+200')
AlignWindow.title('Please check alignment')
AlignWindow.button = Button(AlignWindow,
{"text": "Alignment OK", "fg": "black",
"command":storePDB})
AlignWindow.button.grid(row=3,column=0)
AlignWindow.button = Button(AlignWindow,
{"text": "Alignment not OK", "fg": "black",
"command": AlignWindow.destroy})
AlignWindow.button.grid(row=3,column=1)
AlignWindow.Slider=Scrollbar(AlignWindow,orient=HORIZONTAL)
AlignWindow.Slider.grid(row=1,column=0,sticky='news',columnspan=2)
listbox = Listbox(AlignWindow,{"height": 2,"width":80,"font":"courier 14"})
listbox.insert('end',"PEAT_DB record: "+al_record)
listbox.insert('end',"PDB file : "+al_pdb)
listbox.grid(row=0,column=0,columnspan=2)
listbox.config(xscrollcommand=AlignWindow.Slider.set)
AlignWindow.Slider.config(command=listbox.xview)
return AlignmentMap
@classmethod
def fetchPDB(self, pdbid):
import urllib
url = 'http://www.rcsb.org/pdb/files/%s.pdb' % pdbid
stream = urllib.urlopen(url).read()
if stream.startswith('<html>') or len(stream)<10:
return None
else:
print 'pdb file %s found' %pdbid
return stream
@classmethod
def displayStructure(self, protein, field_name='Structure',
molapp=None, path=None, color='green', clear=True):
"""Display a structure"""
pdblines,X = self.DB.getStructure(protein, field_name)
if not pdblines:
return
pdbname=os.path.join(os.getcwd(),'._'+protein)
# Write the pdbpipe
fd=open(pdbname,'w')
fd.writelines(pdblines)
fd.close()
if molapp == 'pymol':
try:
self.launchPyMol(pdbname)
except:
import subprocess
print path,pdbname
p=subprocess.Popen([path,pdbname])
elif molapp == 'yasara':
self.launchYasara(protein, pdbname, path, color, clear)
elif molapp in ['vmd','rasmol']:
import subprocess
try:
p=subprocess.Popen([molapp,pdbname])
except:
print 'command not found, trying path..'
if path == '' or path == None:
print 'no path for application set...'
else:
p=subprocess.Popen([path,pdbname])
else:
import tkMessageBox
tkMessageBox.showwarning('Cannot display structure',
'The structure cannot be displayed because no molecular '
'graphics program has been set up.')
return
@classmethod
def launchPyMol(self, pdbfile):
"""Open pdb in PyMOL"""
import pymol
# Call the function below before using any PyMOL modules.
pymol.finish_launching()
from pymol import cmd
cmd.load(pdbfile)
cmd.show_as('sticks')
return
@classmethod
def launchYasara(self, protein, pdbfile, path, color='green', clear=False):
"""Launch yasara"""
yasaradir = path
import os
if not os.path.isdir(yasaradir):
yasaradir=os.path.split(yasaradir)[0]
dirname=os.path.split(yasaradir)[1]
if dirname.lower()=='yasara.app':
yasaradir=os.path.join(yasaradir,'yasara')
else:
pass
if self.yasara == None:
self.importYasara(yasaradir)
yasara = self.yasara
# Get Yasara to load the PDB file
if clear == True:
yasara.Clear()
yasara.run('OriAll 0,0,0')
obj=yasara.LoadPDB(pdbfile)
yasara.Style('stick')
yasara.HideRes('Hoh')
yasara.ColorObj(obj, color)
yasara.ColorBG('black')
self.yasaraobjects[pdbfile] = obj
#If a reference protein is selected then try to colour the mutations
DB = self.DB
refprot = DB.meta.refprotein
rec = DB.get(protein)
if refprot is None:
pass
else:
parentrec = DB.get(refprot)
is_parent, operations = rec.getAncestry(parentrec)
if is_parent:
for op in operations:
import pKa.pKD_tools
number=int(pKa.pKD_tools.get_resnum_from_mut(op))
yasara.ColorRes(number,'red')
return
@classmethod
def importYasara(self, yasaradir):
import sys,os
sys.path.append(os.path.join(yasaradir,'pym'))
sys.path.append(os.path.join(yasaradir,'plg'))
import yasaramodule as yasara
self.yasara = yasara
return
@classmethod
def initProtool(self, callback=None):
"""Import Protool"""
# Init the modelling routines
print 'Importing Protool'
import Protool.mutate
self.MUT = Protool.mutate.Mutate()
print 'Done importing Protool'
return self.MUT
@classmethod
def writePDB(self, pdblines, filename):
fd=open(filename,'w')
for line in pdblines:
fd.write(line)
fd.close()
return
@classmethod
def makemutantSequence(self, sequence, operations):
"""Apply the specified mutations to a sequence and return the mutant seq
Sequence must be in the [[A:0001:ALA],[A:0002:GLU]] format
Operations is a list of the following types:
Mutations: A:0001:ALA:ASP
Deletions: delete:A:0002:GLU
Insertions: insert:1:A:0003:THR:ALA, insert:2:A:0003:THR:TRP (insert THR,TRP after A:0003:THR)
Operations are always performed in sequence numbering order """
if operations==[]:
return sequence
ops_sorted={}
insertions=[]
for operation in operations:
s_op=operation.split(':')
# Normal mutation
import pKa.pKD_tools as pKD_tools
resid=pKD_tools.get_resid_from_mut(operation)
if ops_sorted.has_key(resid):
raise Exception('More than one operation on the same residue: %s' %resid)
ops_sorted[resid]=['mutate',operation]
# Perform the operations
new_seq=[]
new_count=None
new_chain=None
for resid,restyp in sequence:
# Make sure that the chain hasn't changed or if we are at the beginning then init
if resid.split(':')[0]!=new_chain:
#Initialise
sp_resid=resid.split(':')
new_chain=sp_resid[0]
new_count=int(sp_resid[1])
newresid='%s:%s' %(new_chain,string.zfill(new_count,4))
# Does this residue have an operation?
if ops_sorted.has_key(resid):
op=ops_sorted[resid]
if op[0]=='delete':
# Deletion
if op[1]==restyp:
pass # This deletes the residue
else:
raise Exception('Incorrect org residue in deletion: %s' %op)
elif op[0]=='insert':
# Insertion
inserts=op[1].keys()
inserts.sort()
for i in inserts:
if i[0]==restyp:
new_seq.append([newresid,i[1]])
new_count=new_count+1
newresid='%s:%s' %(new_chain,string.zfill(new_count,4))
elif op[0]=='mutate':
# Mutation
import pKa.pKD_tools as pKD_tools
orgres=pKD_tools.get_oldrestyp_from_mut(op[1])
if orgres==restyp:
new_seq.append([newresid,pKD_tools.get_newrestyp_from_mut(op[1])])
new_count=new_count+1
newresid='%s:%s' %(new_chain,string.zfill(new_count,4))
pass
else:
raise Exception('Unknown mutations spec: %s' %op)
else:
new_seq.append([resid,restyp])
new_count=new_count+1
newresid='%s:%s' %(new_chain,string.zfill(new_count,4))
return new_seq
@classmethod
def checkModels(self, DB=None, callback=None, selected=None, usemutationcodes=False):
"""Check that we have modelled a structure for everything we can"""
if DB == None:
return
proteins = DB.getRecs()
refprot = DB.meta.refprotein
refseq = DB[refprot].aaseq
refaa = self.AAList2String(refseq)
refpdb = DB[refprot].Structure
refpdbfile = os.path.join(os.getcwd(), 'ref.pdb')
self.writePDB(refpdb, refpdbfile)
failed = []
# Check that Protool is loaded
if not self.MUT:
self.initProtool()
#Create protool oinstance for ref pdb
import Protool
Xref = Protool.structureIO()
Xref.parsepdb(refpdb)
# Find all potential parents
records_with_structure=[]
for protein in proteins:
rec = DB.get(protein)
if rec.hasStructure() == 'available':
records_with_structure.append(protein)
# Loop over selected or all
if selected == None:
selected = list(set(proteins) - set(records_with_structure))
numrecords=len(selected)
count=1
for protein in selected:
rec = DB.get(protein)
if rec.hasStructure() == 'available':
continue
print 'Protein:', protein
#if no sequence try create one from mutation code
if rec.aaseq == None and rec.Mutations != None:
#print refaa
print 'no sequence, using mutation code and ref protein seq'
import PEATSA.Core as Core
print 'Record has mutation code %s' %rec.Mutations
mutationSet = Core.Data.MutationSet(rec.Mutations)
mutseq = mutationSet.applyToSequence(refaa, id='A', pdb=Xref)
rec.aaseq = self.string2AAseq(mutseq)
parent_with_structure = []
for parent in records_with_structure:
parentrec = DB.get(parent)
is_parent, operations = rec.getAncestry(parentrec)
# We can only model on X-ray structures
if parentrec.hasStructure() == 'available' and is_parent:
parent_with_structure.append([parent, len(operations)])
# Record failure to model
if parent_with_structure == []:
continue
# Find the best parent
def compare_func(x,y):
if x[1]>y[1]:
return 1
elif x[1]==y[1]:
return 0
if x[1]<y[1]:
return -1
parent_with_structure.sort(cmp=compare_func)
parent = parent_with_structure[0][0]
operations = rec.getAncestry(parentrec)[1]
print 'Using %s as template with %d operations.' %(parent, len(operations))
# Start the modelling
pdblines = parentrec.Structure
# Load the pdb file
import Protool
X=Protool.structureIO()
X.parsepdb(pdblines)
self.MUT.new_PDB(X)
self.MUT.max_tolerated_bump=0.5
atom_changes=[]
skip_protein=None
self.MUT.new_mutation()
for operation in operations:
# Is this a deletion?
if operation.find('delete')!=-1:
print 'This is a deletion - Jens should write code for modelling this'
print 'Deletion ignored for now'
continue
elif operation.find('insert')!=-1:
print 'This is an insertion - Jens should write code for modelling insertions'
print 'Insertion ignored for now'
continue
# This is a normal mutation
# Get the residue number, old residue and new residue
import pKa.pKD_tools as pKD_tools
new_res = pKD_tools.get_newrestyp_from_mut(operation)
old_res = pKD_tools.get_oldrestyp_from_mut(operation)
resid = pKD_tools.get_resid_from_mut(operation)
#print operation, resid
if not X.residues.has_key(resid):
print 'No structural info for mutation %8s. Not modelling this mutation\n' %operation
skip_protein=True
continue
# Actually make the mutation
bump_score=self.MUT.mutate(resid,new_res,orgtype=old_res)
print 'Mutation: %s, bump_score: %s' %(resid+new_res,str(bump_score))
if bump_score is None:
skip_protein=True
break
else:
atom_changes=atom_changes+self.MUT.mutate_operations
self.MUT.mutate_operations=[]
# Update progress
completion = float(count)/float(numrecords)*100.0
if callback != None:
callback(completion)
else:
print '%4d of %4d, completion; %5.2f%%' %(count,float(numrecords),completion)
count=count+1
# Did it work?
if skip_protein:
print
print 'Modelling failed for %s' %protein
failed.append(protein)
rec.Structure = 'Bumps'
rec.structuretype = 'failed model'
continue
# We have all sets of changes in atom_changes
rec.Structure = {'Rotamer_operations': atom_changes}
rec.Structure['parent'] = parent
rec.structuretype = 'peat model'
print 'Done'
if len(failed)>0:
print 'Failed to model the following proteins:'
for f in failed: print f
return
@classmethod
def setSequencesfromMutationCodes(self, DB=None, callback=None, selected=None):
"""Set the aa sequence using wt ref aa and mutation code
Assumes mutation code is consistent with ref aa seq"""
if DB == None:
return
proteins = DB.getRecs()
refprot = DB.meta.refprotein
refseq = DB[refprot].aaseq
refaa = self.AAList2String(refseq)
refpdb = DB[refprot].Structure
#Create protool oinstance for ref pdb
import Protool
Xref = Protool.structureIO()
Xref.parsepdb(refpdb)
for protein in selected:
rec = DB.get(protein)
if rec.hasStructure() == 'available':
continue
print 'Protein:', protein
#if no sequence try create one from mutation code
if rec.aaseq == None and rec.Mutations != None:
print 'no sequence, using mutation code and ref protein seq'
import PEATSA.Core as Core
print 'Record has mutation code %s' %rec.Mutations
mutationSet = Core.Data.MutationSet(rec.Mutations)
Xref.Remove_All_NonAminoAcids()
refaa = Core.Data.GetChainSequences(Xref)['A']
#print refaa
mutseq = mutationSet.applyToSequence(refaa, id='A', offset=None, pdb=Xref)
rec.aaseq = self.string2AAseq(mutseq)
return
@classmethod
def modelFromMutationCode(self):
"""Model directly from mutation code"""
import PEATSA.Core as Core
print 'Record has mutation code %s' %rec.Mutations
mutationSet = Core.Data.MutationSet(rec.Mutations)
mutationCodes = mutationSet.mutationCodes(Xref, reduced=False)
#print refpdbfile, mutationCodes
result, score = Protool.mutate.Model_Mutations(refpdbfile, [],
mutationCodes,return_score=True)
if result == False:
print 'failed to model'
else:
mutant = result.PI
mutant.writepdb('mutant.pdb')
#rec.Structure = mutant.write_pdb('dummy',nowrite=1)
rec.structuretype = 'protool model'
return
@classmethod
def getRecordsSelector(self, DB, dwin):
"""Selection boxes for record/col selection"""
recs = DB.getRecs()
fields = DB.getFields()
yscrollbar=Scrollbar(dwin,orient='vertical',width=12)
yscrollbar.grid(row=1,column=2,sticky='news',padx=2)
recsbox=Listbox(dwin,bg='white',
exportselection=0,
height=18,
width=20,
yscrollcommand=yscrollbar.set,
selectmode=EXTENDED)
yscrollbar.config(command=recsbox.yview)
Label(dwin,text='Records:').grid(row=0,column=0,sticky='news')
recsbox.grid(row=1,column=0,columnspan=2,sticky='news',padx=1,pady=3)
recsbox.config(state=NORMAL)
y1scrollbar=Scrollbar(dwin,orient='vertical',width=12)
y1scrollbar.grid(row=1,column=5,sticky='news',padx=2)
colsbox=Listbox(dwin,bg='white',
exportselection=0,
height=20,
width=20,
yscrollcommand=y1scrollbar.set,
selectmode=EXTENDED)
y1scrollbar.config(command=colsbox.yview)
Label(dwin,text='Fields:').grid(row=0,column=3,sticky='news')
colsbox.grid(row=1,column=3,columnspan=2,sticky='news',padx=1,pady=3)
colsbox.config(state=NORMAL)
dwin.rowconfigure(1, weight=1)
dwin.columnconfigure(0, weight=1)
for r in recs:
recsbox.insert('end', r)
for f in fields:
colsbox.insert('end', f)
return recsbox, colsbox
@classmethod
def sendDB2Labbook(self, DB, recs=None, cols=None, name='main'):
"""Send copy of selected DB cols to a labbook table
convenience method to allow quick duplication of main DB data"""
from PEATDB.TableModels import TableModel
if cols == None:
cols = DB.meta.userfields.keys()
if recs == None:
recs = DB.getRecs()
cols.append('name')
M = TableModel()
#M.addColumn('name')
for rec in recs:
M.addRow(rec)
for c in cols:
M.addColumn(c)
if DB[rec].has_key(c):
M.data[rec][c] = DB[rec][c]
DB.createLabbookSheet(name, M)
return M
@classmethod
def showTkFigure(self, fig, parent=False, side=TOP):
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
if parent == True:
fr=Frame(parent)
else:
fr = Toplevel()
canvas = FigureCanvasTkAgg(fig, master=fr)
#self.canvas.show()
canvas.get_tk_widget().pack(side=side, fill=BOTH, expand=1)
canvas._tkcanvas.pack(side=side, fill=BOTH, expand=1)
mtoolbar = NavigationToolbar2TkAgg( canvas, fr )
mtoolbar.update()
return fr
|
|
from hamcrest import assert_that, is_, contains, has_entries, has_entry
from backdrop.core.nested_merge import nested_merge, group_by, \
apply_collect_to_group, collect_all_values
from backdrop.core.timeseries import WEEK, MONTH
def datum(name=None, version=None, place=None, age=None, stamp=None, count=1):
result = {
"_count": count
}
if name is not None:
result['name'] = name
if version is not None:
result['version'] = version
if place is not None:
result['place'] = place
if age is not None:
result['age'] = age
if stamp is not None:
result['_timestamp'] = stamp
result['_week_start_at'] = WEEK.start(stamp)
result['_month_start_at'] = MONTH.start(stamp)
return result
class TestNestedMerge(object):
def test_one_level_grouping_with_collect(self):
data = [
datum(name='Jill', age=[12, 45]),
datum(name='Jack', age=[34, 34]),
datum(name='John', age=[56, 65])
]
results = nested_merge([['name']], [('age', 'mean')], data)
assert_that(results,
contains(
has_entries({'name': 'Jack', 'age:mean': 34}),
has_entries({'name': 'Jill', 'age:mean': 28.5}),
has_entries({'name': 'John', 'age:mean': 60.5}),
))
def test_two_level_grouping_with_collect(self):
data = [
datum(name='Jill', place='Kettering', age=[34, 36], count=2),
datum(name='Jack', place='Kennington', age=[23], count=1),
datum(name='James', place='Keswick', age=[10, 21, 32], count=3),
datum(name='James', place='Kettering', age=[43, 87], count=2),
datum(name='Jill', place='Keswick', age=[76, 32], count=2),
]
results = nested_merge([['name'], ['place']], [('age', 'mean')], data)
assert_that(results,
contains(
has_entries({
'name': 'Jack',
'age:mean': 23,
'_subgroup': contains(
has_entries({
'place': 'Kennington',
'age:mean': 23
})
)}),
has_entries({
'name': 'James',
'age:mean': 38.6,
'_subgroup': contains(
has_entries({
'place': 'Keswick',
'age:mean': 21.0
}),
has_entries({
'place': 'Kettering',
'age:mean': 65.0
})
)
}),
has_entries({
'name': 'Jill',
'age:mean': 44.5,
'_subgroup': contains(
has_entries({
'place': 'Keswick',
'age:mean': 54.0
}),
has_entries({
'place': 'Kettering',
'age:mean': 35.0
})
)
}),
))
def test_two_level_grouping_combination_of_keys(self):
data = [
datum(name='IE', version='6', place='England', age=[13, 12], count=2),
datum(name='IE', version='6', place='Wales', age=[13, 14], count=2),
datum(name='IE', version='7', place='England', age=[8, 7], count=2),
datum(name='IE', version='7', place='Wales', age=[8, 9], count=2),
datum(name='IE', version='8', place='England', age=[5, 4], count=2),
datum(name='IE', version='8', place='Wales', age=[5, 6], count=2),
datum(name='Chrome', version='20', place='England', age=[2, 1], count=2),
datum(name='Chrome', version='20', place='Wales', age=[2, 3], count=2),
]
results = nested_merge([['name', 'version'], ['place']], [('age', 'mean')], data)
assert_that(results,
contains(
has_entries({
'name': 'Chrome',
'version': '20',
'age:mean': 2,
'_subgroup': contains(
has_entries({
'place': 'England',
'age:mean': 1.5
}),
has_entries({
'place': 'Wales',
'age:mean': 2.5
})
)
}),
has_entries({
'name': 'IE',
'version': '6',
'age:mean': 13,
'_subgroup': contains(
has_entries({
'place': 'England',
'age:mean': 12.5
}),
has_entries({
'place': 'Wales',
'age:mean': 13.5
})
)
}),
has_entries({
'name': 'IE',
'version': '7',
'age:mean': 8,
'_subgroup': contains(
has_entries({
'place': 'England',
'age:mean': 7.5
}),
has_entries({
'place': 'Wales',
'age:mean': 8.5
})
)
}),
has_entries({
'name': 'IE',
'version': '8',
'age:mean': 5,
'_subgroup': contains(
has_entries({
'place': 'England',
'age:mean': 4.5
}),
has_entries({
'place': 'Wales',
'age:mean': 5.5
})
)
}),
))
class TestGroupBy(object):
def test_one_level_grouping(self):
data = [
datum(name='Jill', age=[12, 45]),
datum(name='Jack', age=[34, 34]),
datum(name='John', age=[56, 65])
]
results = group_by(data, [['name']])
assert_that(results,
contains(
is_({'name': 'Jack', 'age': [34, 34], '_count': 1}),
is_({'name': 'Jill', 'age': [12, 45], '_count': 1}),
is_({'name': 'John', 'age': [56, 65], '_count': 1}),
))
def test_two_level_grouping(self):
data = [
datum(name='Jill', place='Kettering', age=[34, 36], count=2),
datum(name='James', place='Kettering', age=[43, 87], count=2),
datum(name='Jill', place='Keswick', age=[76, 32], count=2),
]
results = group_by(data, [['name'], ['place']])
assert_that(results,
contains(
is_({
'name': 'James',
'_subgroup': [
{'place': 'Kettering', 'age': [43, 87], '_count': 2}
]}),
is_({
'name': 'Jill',
'_subgroup': [
{'place': 'Keswick', 'age': [76, 32], '_count': 2},
{'place': 'Kettering', 'age': [34, 36], '_count': 2},
]}),
))
class TestApplyCollectToGroup(object):
def test_single_level_collect_sum(self):
group = {'name': 'Joanne', 'age': [34, 56]}
assert_that(apply_collect_to_group(group, [('age', 'sum')]),
has_entry('age:sum', 90))
def test_single_level_collect_default(self):
group = {'name': 'Joanne', 'age': [34, 56]}
assert_that(apply_collect_to_group(group, [('age', 'default')]),
is_({
'name': 'Joanne', 'age:set': [34, 56], 'age': [34, 56]}))
def test_double_level_collect_sum(self):
group = {'name': 'Joanne', '_subgroup': [
{'place': 'Kettering', 'age': [34, 56]},
{'place': 'Keswick', 'age': [87, 2]},
]}
collected = apply_collect_to_group(group, [('age', 'sum')])
# level one
assert_that(collected, has_entry('age:sum', 179))
# level two
assert_that(collected, has_entry('_subgroup',
contains(
has_entry('age:sum', 90),
has_entry('age:sum', 89)
)))
def test_double_level_collect_default(self):
group = {'name': 'Joanne', '_subgroup': [
{'place': 'Kettering', 'age': [34, 56]},
{'place': 'Keswick', 'age': [87, 2]},
]}
collected = apply_collect_to_group(group, [('age', 'default')])
assert_that(collected, has_entries({
'age:set': [2, 34, 56, 87],
'age': [2, 34, 56, 87],
}))
assert_that(collected, has_entry('_subgroup',
contains(
has_entries({
'age:set': [34, 56],
'age': [34, 56],
}),
has_entries({
'age:set': [2, 87],
'age': [2, 87],
}),
)))
class TestCollectAllValues(object):
def test_single_level_collect(self):
group = {
'age': [5]
}
assert_that(collect_all_values(group, 'age'), [5])
def test_double_level_collect(self):
group = {
'age': [5], # This will be discarded as there are sub groups.
'_subgroup': [
{'age': [1, 2]},
{'age': [3, 4]},
]
}
assert_that(collect_all_values(group, 'age'), [1, 2, 3, 4])
|
|
"""
Test breakpoint names.
"""
from __future__ import print_function
import os
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class BreakpointNames(TestBase):
mydir = TestBase.compute_mydir(__file__)
NO_DEBUG_INFO_TESTCASE = True
@add_test_categories(['pyapi'])
def test_setting_names(self):
"""Use Python APIs to test that we can set breakpoint names."""
self.build()
self.setup_target()
self.do_check_names()
def test_illegal_names(self):
"""Use Python APIs to test that we don't allow illegal names."""
self.build()
self.setup_target()
self.do_check_illegal_names()
def test_using_names(self):
"""Use Python APIs to test that operations on names works correctly."""
self.build()
self.setup_target()
self.do_check_using_names()
def test_configuring_names(self):
"""Use Python APIs to test that configuring options on breakpoint names works correctly."""
self.build()
self.make_a_dummy_name()
self.setup_target()
self.do_check_configuring_names()
def test_configuring_permissions_sb(self):
"""Use Python APIs to test that configuring permissions on names works correctly."""
self.build()
self.setup_target()
self.do_check_configuring_permissions_sb()
def test_configuring_permissions_cli(self):
"""Use Python APIs to test that configuring permissions on names works correctly."""
self.build()
self.setup_target()
self.do_check_configuring_permissions_cli()
def setup_target(self):
exe = self.getBuildArtifact("a.out")
# Create a targets we are making breakpoint in and copying to:
self.target = self.dbg.CreateTarget(exe)
self.assertTrue(self.target, VALID_TARGET)
self.main_file_spec = lldb.SBFileSpec(os.path.join(self.getSourceDir(), "main.c"))
def check_name_in_target(self, bkpt_name):
name_list = lldb.SBStringList()
self.target.GetBreakpointNames(name_list)
found_it = False
for name in name_list:
if name == bkpt_name:
found_it = True
break
self.assertTrue(found_it, "Didn't find the name %s in the target's name list:"%(bkpt_name))
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# These are the settings we're going to be putting into names & breakpoints:
self.bp_name_string = "ABreakpoint"
self.is_one_shot = True
self.ignore_count = 1000
self.condition = "1 == 2"
self.auto_continue = True
self.tid = 0xaaaa
self.tidx = 10
self.thread_name = "Fooey"
self.queue_name = "Blooey"
self.cmd_list = lldb.SBStringList()
self.cmd_list.AppendString("frame var")
self.cmd_list.AppendString("bt")
self.help_string = "I do something interesting"
def do_check_names(self):
"""Use Python APIs to check that we can set & retrieve breakpoint names"""
bkpt = self.target.BreakpointCreateByLocation(self.main_file_spec, 10)
bkpt_name = "ABreakpoint"
other_bkpt_name = "_AnotherBreakpoint"
# Add a name and make sure we match it:
success = bkpt.AddName(bkpt_name)
self.assertTrue(success, "We couldn't add a legal name to a breakpoint.")
matches = bkpt.MatchesName(bkpt_name)
self.assertTrue(matches, "We didn't match the name we just set")
# Make sure we don't match irrelevant names:
matches = bkpt.MatchesName("NotABreakpoint")
self.assertTrue(not matches, "We matched a name we didn't set.")
# Make sure the name is also in the target:
self.check_name_in_target(bkpt_name)
# Add another name, make sure that works too:
bkpt.AddName(other_bkpt_name)
matches = bkpt.MatchesName(bkpt_name)
self.assertTrue(matches, "Adding a name means we didn't match the name we just set")
self.check_name_in_target(other_bkpt_name)
# Remove the name and make sure we no longer match it:
bkpt.RemoveName(bkpt_name)
matches = bkpt.MatchesName(bkpt_name)
self.assertTrue(not matches,"We still match a name after removing it.")
# Make sure the name list has the remaining name:
name_list = lldb.SBStringList()
bkpt.GetNames(name_list)
num_names = name_list.GetSize()
self.assertTrue(num_names == 1, "Name list has %d items, expected 1."%(num_names))
name = name_list.GetStringAtIndex(0)
self.assertTrue(name == other_bkpt_name, "Remaining name was: %s expected %s."%(name, other_bkpt_name))
def do_check_illegal_names(self):
"""Use Python APIs to check that we reject illegal names."""
bkpt = self.target.BreakpointCreateByLocation(self.main_file_spec, 10)
bad_names = ["-CantStartWithADash",
"1CantStartWithANumber",
"^CantStartWithNonAlpha",
"CantHave-ADash",
"Cant Have Spaces"]
for bad_name in bad_names:
success = bkpt.AddName(bad_name)
self.assertTrue(not success,"We allowed an illegal name: %s"%(bad_name))
bp_name = lldb.SBBreakpointName(self.target, bad_name)
self.assertFalse(bp_name.IsValid(), "We made a breakpoint name with an illegal name: %s"%(bad_name));
retval =lldb.SBCommandReturnObject()
self.dbg.GetCommandInterpreter().HandleCommand("break set -n whatever -N '%s'"%(bad_name), retval)
self.assertTrue(not retval.Succeeded(), "break set succeeded with: illegal name: %s"%(bad_name))
def do_check_using_names(self):
"""Use Python APIs to check names work in place of breakpoint ID's."""
bkpt = self.target.BreakpointCreateByLocation(self.main_file_spec, 10)
bkpt_name = "ABreakpoint"
other_bkpt_name= "_AnotherBreakpoint"
# Add a name and make sure we match it:
success = bkpt.AddName(bkpt_name)
self.assertTrue(success, "We couldn't add a legal name to a breakpoint.")
bkpts = lldb.SBBreakpointList(self.target)
self.target.FindBreakpointsByName(bkpt_name, bkpts)
self.assertTrue(bkpts.GetSize() == 1, "One breakpoint matched.")
found_bkpt = bkpts.GetBreakpointAtIndex(0)
self.assertTrue(bkpt.GetID() == found_bkpt.GetID(),"The right breakpoint.")
retval = lldb.SBCommandReturnObject()
self.dbg.GetCommandInterpreter().HandleCommand("break disable %s"%(bkpt_name), retval)
self.assertTrue(retval.Succeeded(), "break disable failed with: %s."%(retval.GetError()))
self.assertTrue(not bkpt.IsEnabled(), "We didn't disable the breakpoint.")
# Also make sure we don't apply commands to non-matching names:
self.dbg.GetCommandInterpreter().HandleCommand("break modify --one-shot 1 %s"%(other_bkpt_name), retval)
self.assertTrue(retval.Succeeded(), "break modify failed with: %s."%(retval.GetError()))
self.assertTrue(not bkpt.IsOneShot(), "We applied one-shot to the wrong breakpoint.")
def check_option_values(self, bp_object):
self.assertEqual(bp_object.IsOneShot(), self.is_one_shot, "IsOneShot")
self.assertEqual(bp_object.GetIgnoreCount(), self.ignore_count, "IgnoreCount")
self.assertEqual(bp_object.GetCondition(), self.condition, "Condition")
self.assertEqual(bp_object.GetAutoContinue(), self.auto_continue, "AutoContinue")
self.assertEqual(bp_object.GetThreadID(), self.tid, "Thread ID")
self.assertEqual(bp_object.GetThreadIndex(), self.tidx, "Thread Index")
self.assertEqual(bp_object.GetThreadName(), self.thread_name, "Thread Name")
self.assertEqual(bp_object.GetQueueName(), self.queue_name, "Queue Name")
set_cmds = lldb.SBStringList()
bp_object.GetCommandLineCommands(set_cmds)
self.assertEqual(set_cmds.GetSize(), self.cmd_list.GetSize(), "Size of command line commands")
for idx in range(0, set_cmds.GetSize()):
self.assertEqual(self.cmd_list.GetStringAtIndex(idx), set_cmds.GetStringAtIndex(idx), "Command %d"%(idx))
def make_a_dummy_name(self):
"This makes a breakpoint name in the dummy target to make sure it gets copied over"
dummy_target = self.dbg.GetDummyTarget()
self.assertTrue(dummy_target.IsValid(), "Dummy target was not valid.")
def cleanup ():
self.dbg.GetDummyTarget().DeleteBreakpointName(self.bp_name_string)
# Execute the cleanup function during test case tear down.
self.addTearDownHook(cleanup)
# Now find it in the dummy target, and make sure these settings took:
bp_name = lldb.SBBreakpointName(dummy_target, self.bp_name_string)
# Make sure the name is right:
self.assertTrue (bp_name.GetName() == self.bp_name_string, "Wrong bp_name: %s"%(bp_name.GetName()))
bp_name.SetOneShot(self.is_one_shot)
bp_name.SetIgnoreCount(self.ignore_count)
bp_name.SetCondition(self.condition)
bp_name.SetAutoContinue(self.auto_continue)
bp_name.SetThreadID(self.tid)
bp_name.SetThreadIndex(self.tidx)
bp_name.SetThreadName(self.thread_name)
bp_name.SetQueueName(self.queue_name)
bp_name.SetCommandLineCommands(self.cmd_list)
# Now look it up again, and make sure it got set correctly.
bp_name = lldb.SBBreakpointName(dummy_target, self.bp_name_string)
self.assertTrue(bp_name.IsValid(), "Failed to make breakpoint name.")
self.check_option_values(bp_name)
def do_check_configuring_names(self):
"""Use Python APIs to check that configuring breakpoint names works correctly."""
other_bp_name_string = "AnotherBreakpointName"
cl_bp_name_string = "CLBreakpointName"
# Now find the version copied in from the dummy target, and make sure these settings took:
bp_name = lldb.SBBreakpointName(self.target, self.bp_name_string)
self.assertTrue(bp_name.IsValid(), "Failed to make breakpoint name.")
self.check_option_values(bp_name)
# Now add this name to a breakpoint, and make sure it gets configured properly
bkpt = self.target.BreakpointCreateByLocation(self.main_file_spec, 10)
success = bkpt.AddName(self.bp_name_string)
self.assertTrue(success, "Couldn't add this name to the breakpoint")
self.check_option_values(bkpt)
# Now make a name from this breakpoint, and make sure the new name is properly configured:
new_name = lldb.SBBreakpointName(bkpt, other_bp_name_string)
self.assertTrue(new_name.IsValid(), "Couldn't make a valid bp_name from a breakpoint.")
self.check_option_values(bkpt)
# Now change the name's option and make sure it gets propagated to
# the breakpoint:
new_auto_continue = not self.auto_continue
bp_name.SetAutoContinue(new_auto_continue)
self.assertEqual(bp_name.GetAutoContinue(), new_auto_continue, "Couldn't change auto-continue on the name")
self.assertEqual(bkpt.GetAutoContinue(), new_auto_continue, "Option didn't propagate to the breakpoint.")
# Now make this same breakpoint name - but from the command line
cmd_str = "breakpoint name configure %s -o %d -i %d -c '%s' -G %d -t %d -x %d -T '%s' -q '%s' -H '%s'"%(cl_bp_name_string,
self.is_one_shot,
self.ignore_count,
self.condition,
self.auto_continue,
self.tid,
self.tidx,
self.thread_name,
self.queue_name,
self.help_string)
for cmd in self.cmd_list:
cmd_str += " -C '%s'"%(cmd)
self.runCmd(cmd_str, check=True)
# Now look up this name again and check its options:
cl_name = lldb.SBBreakpointName(self.target, cl_bp_name_string)
self.check_option_values(cl_name)
# Also check the help string:
self.assertEqual(self.help_string, cl_name.GetHelpString(), "Help string didn't match")
# Change the name and make sure that works:
new_help = "I do something even more interesting"
cl_name.SetHelpString(new_help)
self.assertEqual(new_help, cl_name.GetHelpString(), "SetHelpString didn't")
# We should have three names now, make sure the target can list them:
name_list = lldb.SBStringList()
self.target.GetBreakpointNames(name_list)
for name_string in [self.bp_name_string, other_bp_name_string, cl_bp_name_string]:
self.assertTrue(name_string in name_list, "Didn't find %s in names"%(name_string))
# Delete the name from the current target. Make sure that works and deletes the
# name from the breakpoint as well:
self.target.DeleteBreakpointName(self.bp_name_string)
name_list.Clear()
self.target.GetBreakpointNames(name_list)
self.assertTrue(self.bp_name_string not in name_list, "Didn't delete %s from a real target"%(self.bp_name_string))
# Also make sure the name got removed from breakpoints holding it:
self.assertFalse(bkpt.MatchesName(self.bp_name_string), "Didn't remove the name from the breakpoint.")
# Test that deleting the name we injected into the dummy target works (there's also a
# cleanup that will do this, but that won't test the result...
dummy_target = self.dbg.GetDummyTarget()
dummy_target.DeleteBreakpointName(self.bp_name_string)
name_list.Clear()
dummy_target.GetBreakpointNames(name_list)
self.assertTrue(self.bp_name_string not in name_list, "Didn't delete %s from the dummy target"%(self.bp_name_string))
# Also make sure the name got removed from breakpoints holding it:
self.assertFalse(bkpt.MatchesName(self.bp_name_string), "Didn't remove the name from the breakpoint.")
def check_permission_results(self, bp_name):
self.assertEqual(bp_name.GetAllowDelete(), False, "Didn't set allow delete.")
protected_bkpt = self.target.BreakpointCreateByLocation(self.main_file_spec, 10)
protected_id = protected_bkpt.GetID()
unprotected_bkpt = self.target.BreakpointCreateByLocation(self.main_file_spec, 10)
unprotected_id = unprotected_bkpt.GetID()
success = protected_bkpt.AddName(self.bp_name_string)
self.assertTrue(success, "Couldn't add this name to the breakpoint")
self.target.DisableAllBreakpoints()
self.assertEqual(protected_bkpt.IsEnabled(), True, "Didnt' keep breakpoint from being disabled")
self.assertEqual(unprotected_bkpt.IsEnabled(), False, "Protected too many breakpoints from disabling.")
# Try from the command line too:
unprotected_bkpt.SetEnabled(True)
result = lldb.SBCommandReturnObject()
self.dbg.GetCommandInterpreter().HandleCommand("break disable", result)
self.assertTrue(result.Succeeded())
self.assertEqual(protected_bkpt.IsEnabled(), True, "Didnt' keep breakpoint from being disabled")
self.assertEqual(unprotected_bkpt.IsEnabled(), False, "Protected too many breakpoints from disabling.")
self.target.DeleteAllBreakpoints()
bkpt = self.target.FindBreakpointByID(protected_id)
self.assertTrue(bkpt.IsValid(), "Didn't keep the breakpoint from being deleted.")
bkpt = self.target.FindBreakpointByID(unprotected_id)
self.assertFalse(bkpt.IsValid(), "Protected too many breakpoints from deletion.")
# Remake the unprotected breakpoint and try again from the command line:
unprotected_bkpt = self.target.BreakpointCreateByLocation(self.main_file_spec, 10)
unprotected_id = unprotected_bkpt.GetID()
self.dbg.GetCommandInterpreter().HandleCommand("break delete -f", result)
self.assertTrue(result.Succeeded())
bkpt = self.target.FindBreakpointByID(protected_id)
self.assertTrue(bkpt.IsValid(), "Didn't keep the breakpoint from being deleted.")
bkpt = self.target.FindBreakpointByID(unprotected_id)
self.assertFalse(bkpt.IsValid(), "Protected too many breakpoints from deletion.")
def do_check_configuring_permissions_sb(self):
bp_name = lldb.SBBreakpointName(self.target, self.bp_name_string)
# Make a breakpoint name with delete disallowed:
bp_name = lldb.SBBreakpointName(self.target, self.bp_name_string)
self.assertTrue(bp_name.IsValid(), "Failed to make breakpoint name for valid name.")
bp_name.SetAllowDelete(False)
bp_name.SetAllowDisable(False)
bp_name.SetAllowList(False)
self.check_permission_results(bp_name)
def do_check_configuring_permissions_cli(self):
# Make the name with the right options using the command line:
self.runCmd("breakpoint name configure -L 0 -D 0 -A 0 %s"%(self.bp_name_string), check=True)
# Now look up the breakpoint we made, and check that it works.
bp_name = lldb.SBBreakpointName(self.target, self.bp_name_string)
self.assertTrue(bp_name.IsValid(), "Didn't make a breakpoint name we could find.")
self.check_permission_results(bp_name)
|
|
# Copyright 2015 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import datetime
import pytest
from dateutil import tz
from ..util import isclose
from datacube.api.query import Query, DescriptorQuery, _datetime_to_timestamp, query_group_by
from datacube.model import Range
def test_convert_descriptor_query_to_search_query():
descriptor_query = {
'dimensions': {
'latitude': {
'range': (-35.5, -36.5),
},
'longitude': {
'range': (148.3, 149.9)
},
'time': {
'range': (datetime.datetime(2001, 5, 7), datetime.datetime(2002, 3, 9))
}
}
}
descriptor_query_dimensions = descriptor_query['dimensions']
query = DescriptorQuery(descriptor_query)
search_query = query.search_terms
assert min(descriptor_query_dimensions['latitude']['range']) == search_query['lat'].begin
assert max(descriptor_query_dimensions['latitude']['range']) == search_query['lat'].end
assert min(descriptor_query_dimensions['longitude']['range']) == search_query['lon'].begin
assert max(descriptor_query_dimensions['longitude']['range']) == search_query['lon'].end
assert datetime.datetime(2001, 5, 7, tzinfo=tz.tzutc()) == search_query['time'].begin
assert datetime.datetime(2002, 3, 9, tzinfo=tz.tzutc()) == search_query['time'].end
def test_convert_descriptor_query_to_search_query_with_slices():
descriptor_query = {
'dimensions': {
'latitude': {
'range': (-35.5, -36.5),
'array_range': (100, 200)
},
'longitude': {
'range': (148.3, 149.9),
'array_range': (100, 200)
},
'time': {
'range': (datetime.datetime(2001, 5, 7), datetime.datetime(2002, 3, 9)),
'array_range': (5, 10)
}
}
}
query = DescriptorQuery(descriptor_query)
assert query.slices
assert query.slices['latitude'] == slice(100, 200)
assert query.slices['longitude'] == slice(100, 200)
assert query.slices['time'] == slice(5, 10)
def test_convert_descriptor_query_to_search_query_with_groupby():
descriptor_query = {
'dimensions': {
'time': {
'range': (datetime.datetime(2001, 5, 7), datetime.datetime(2002, 3, 9)),
'group_by': 'solar_day'
}
}
}
query = DescriptorQuery(descriptor_query)
assert query.group_by
assert callable(query.group_by.group_by_func)
assert query.group_by.dimension == 'time'
assert query.group_by.units == 'seconds since 1970-01-01 00:00:00'
def test_convert_descriptor_query_to_search_query_with_crs_conversion():
descriptor_query = {
'dimensions': {
'latitude': {
'range': (-3971790.0737348166, -4101004.3359463234),
'crs': 'EPSG:3577',
},
'longitude': {
'range': (1458629.8414059384, 1616407.8831088375),
'crs': 'EPSG:3577',
}
}
}
expected_result = {
'lat': Range(-36.6715565808, -35.3276413143),
'lon': Range(148.145408153, 150.070966341),
}
query = DescriptorQuery(descriptor_query)
search_query = query.search_terms
assert all(map(isclose, search_query['lat'], expected_result['lat']))
assert all(map(isclose, search_query['lon'], expected_result['lon']))
def test_convert_descriptor_query_to_search_query_with_single_value():
descriptor_query = {
'dimensions': {
'latitude': {
'range': -3971790.0737348166,
'crs': 'EPSG:3577',
},
'longitude': {
'range': 1458629.8414059384,
'crs': 'EPSG:3577',
}
}
}
expected_lat = -35.5160921229
expected_lon = 148.145408153
query = DescriptorQuery(descriptor_query)
search_query = query.search_terms
assert min(*search_query['lat']) <= expected_lat <= max(*search_query['lat'])
assert search_query['lat'].begin != search_query['lat'].end
assert min(*search_query['lon']) <= expected_lon <= max(*search_query['lon'])
assert search_query['lon'].begin != search_query['lon'].end
def test_descriptor_handles_bad_input():
with pytest.raises(ValueError):
descriptor_query = "Not a descriptor"
DescriptorQuery(descriptor_query)
with pytest.raises(ValueError):
descriptor_query = ["Not a descriptor"]
DescriptorQuery(descriptor_query)
with pytest.raises(ValueError):
descriptor_query = {
'dimensions': {
'latitude': {
'range': -35,
'crs': 'EPSG:4326',
},
'longitude': {
'range': 1458629.8414059384,
'crs': 'EPSG:3577',
}
}
}
DescriptorQuery(descriptor_query)
def test_datetime_to_timestamp():
assert _datetime_to_timestamp((1990, 1, 7)) == 631670400
assert _datetime_to_timestamp(datetime.datetime(1990, 1, 7)) == 631670400
assert _datetime_to_timestamp(631670400) == 631670400
assert _datetime_to_timestamp('1990-01-07T00:00:00.0Z') == 631670400
def test_query_kwargs():
from mock import MagicMock
mock_index = MagicMock()
mock_index.datasets.get_field_names = lambda: [u'product', u'lat', u'sat_path', 'type_id', u'time', u'lon',
u'orbit', u'instrument', u'sat_row', u'platform', 'metadata_type',
u'gsi', 'type', 'id', ]
query = Query(index=mock_index, product='ls5_nbar_albers')
assert str(query)
assert query.product == 'ls5_nbar_albers'
assert query.search_terms['product'] == 'ls5_nbar_albers'
query = Query(index=mock_index, latitude=(-35, -36), longitude=(148, 149))
assert query.geopolygon
assert 'lat' in query.search_terms
assert 'lon' in query.search_terms
query = Query(index=mock_index, latitude=-35, longitude=148)
assert query.geopolygon
assert 'lat' in query.search_terms
assert 'lon' in query.search_terms
query = Query(index=mock_index, y=(-4174726, -4180011), x=(1515184, 1523263), crs='EPSG:3577')
assert query.geopolygon
assert 'lat' in query.search_terms
assert 'lon' in query.search_terms
query = Query(index=mock_index, y=-4174726, x=1515184, crs='EPSG:3577')
assert query.geopolygon
assert 'lat' in query.search_terms
assert 'lon' in query.search_terms
query = Query(index=mock_index, y=-4174726, x=1515184, crs='EPSG:3577')
assert query.geopolygon
assert 'lat' in query.search_terms
assert 'lon' in query.search_terms
query = Query(index=mock_index, time='2001')
assert 'time' in query.search
query = Query(index=mock_index, time=('2001', '2002'))
assert 'time' in query.search
with pytest.raises(ValueError):
Query(index=mock_index,
y=-4174726, coordinate_reference_system='WGS84',
x=1515184, crs='EPSG:3577')
with pytest.raises(LookupError):
Query(index=mock_index, y=-4174726, x=1515184, crs='EPSG:3577', made_up_key='NotReal')
with pytest.raises(LookupError):
query_group_by(group_by='magic')
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# to configure behavior, define $CQL_TEST_HOST to the destination address
# for Thrift connections, and $CQL_TEST_PORT to the associated port.
from __future__ import with_statement
import re
from itertools import izip
from .basecase import (BaseTestCase, cqlshlog, dedent, at_a_time, cql,
TEST_HOST, TEST_PORT)
from .cassconnect import (get_test_keyspace, testrun_cqlsh, testcall_cqlsh,
cassandra_cursor, split_cql_commands, quote_name)
from .ansi_colors import (ColoredText, lookup_colorcode, lookup_colorname,
lookup_colorletter, ansi_seq)
CONTROL_C = '\x03'
CONTROL_D = '\x04'
class TestCqlshOutput(BaseTestCase):
def setUp(self):
pass
def tearDown(self):
pass
def assertNoHasColors(self, text, msg=None):
self.assertNotRegexpMatches(text, ansi_seq, msg='ANSI CSI sequence found in %r' % text)
def assertHasColors(self, text, msg=None):
self.assertRegexpMatches(text, ansi_seq, msg=msg)
def assertColored(self, coloredtext, colorname):
wanted_colorcode = lookup_colorcode(colorname)
for num, c in enumerate(coloredtext):
if not c.isspace():
ccolor = c.colorcode()
self.assertEqual(ccolor, wanted_colorcode,
msg='Output text %r (char #%d) is colored %s, not %s'
% (coloredtext, num, lookup_colorname(ccolor), colorname))
def assertColorFromTags(self, coloredtext, tags):
for (char, tag) in izip(coloredtext, tags):
if char.isspace():
continue
if tag.isspace():
tag = 'n' # neutral
self.assertEqual(char.colorcode(), lookup_colorletter(tag),
msg='Coloring mismatch.\nExpected coloring: %s\n'
'Actually got: %s\ncolor code: %s'
% (tags, coloredtext.colored_version(), coloredtext.colortags()))
def assertCqlverQueriesGiveColoredOutput(self, queries_and_expected_outputs,
cqlver=(), **kwargs):
if not isinstance(cqlver, (tuple, list)):
cqlver = (cqlver,)
for ver in cqlver:
self.assertQueriesGiveColoredOutput(queries_and_expected_outputs, cqlver=ver, **kwargs)
def assertQueriesGiveColoredOutput(self, queries_and_expected_outputs, **kwargs):
"""
Allow queries and expected output to be specified in structured tuples,
along with expected color information.
"""
with testrun_cqlsh(tty=True, **kwargs) as c:
for query, expected in queries_and_expected_outputs:
cqlshlog.debug('Testing %r' % (query,))
output = c.cmd_and_response(query).lstrip("\r\n")
c_output = ColoredText(output)
pairs = at_a_time(dedent(expected).split('\n'), 2)
outlines = c_output.splitlines()
for (plain, colorcodes), outputline in zip(pairs, outlines):
self.assertEqual(outputline.plain().rstrip(), plain)
self.assertColorFromTags(outputline, colorcodes)
def test_no_color_output(self):
for termname in ('', 'dumb', 'vt100'):
cqlshlog.debug('TERM=%r' % termname)
with testrun_cqlsh(tty=True, env={'TERM': termname}) as c:
c.send('select * from has_all_types;\n')
self.assertNoHasColors(c.read_to_next_prompt())
c.send('select * from has_value_encoding_errors;\n')
self.assertNoHasColors(c.read_to_next_prompt())
c.send('select count(*) from has_all_types;\n')
self.assertNoHasColors(c.read_to_next_prompt())
c.send('totally invalid cql;\n')
self.assertNoHasColors(c.read_to_next_prompt())
def test_no_prompt_or_colors_output(self):
# CQL queries and number of lines expected in output:
queries = (('select * from has_all_types limit 1;', 7),
('select * from has_value_encoding_errors limit 1;', 8))
for termname in ('', 'dumb', 'vt100', 'xterm'):
cqlshlog.debug('TERM=%r' % termname)
for cql, lines_expected in queries:
output, result = testcall_cqlsh(prompt=None, env={'TERM': termname},
tty=False, input=cql + '\n')
output = output.splitlines()
for line in output:
self.assertNoHasColors(line)
self.assertNotRegexpMatches(line, r'^cqlsh\S*>')
self.assertEqual(len(output), lines_expected,
msg='output: %r' % '\n'.join(output))
self.assertEqual(output[0], '')
self.assertNicelyFormattedTableHeader(output[1])
self.assertNicelyFormattedTableRule(output[2])
self.assertNicelyFormattedTableData(output[3])
self.assertEqual(output[4].strip(), '')
def test_color_output(self):
for termname in ('xterm', 'unknown-garbage'):
cqlshlog.debug('TERM=%r' % termname)
with testrun_cqlsh(tty=True, env={'TERM': termname}) as c:
c.send('select * from has_all_types;\n')
self.assertHasColors(c.read_to_next_prompt())
c.send('select * from has_value_encoding_errors;\n')
self.assertHasColors(c.read_to_next_prompt())
c.send('select count(*) from has_all_types;\n')
self.assertHasColors(c.read_to_next_prompt())
c.send('totally invalid cql;\n')
self.assertHasColors(c.read_to_next_prompt())
def test_count_output(self):
self.assertCqlverQueriesGiveColoredOutput((
('select count(*) from has_all_types;', """
count
MMMMM
-------
5
G
(1 rows)
nnnnnnnn
"""),
('select COUNT(*) FROM empty_table;', """
count
MMMMM
-------
0
G
(1 rows)
nnnnnnnn
"""),
('select COUNT(*) FROM empty_composite_table;', """
count
MMMMM
-------
0
G
(1 rows)
nnnnnnnn
"""),
('select COUNT(*) FROM twenty_rows_table limit 10;', """
count
MMMMM
-------
10
GG
(1 rows)
nnnnnnnn
"""),
('select COUNT(*) FROM twenty_rows_table limit 1000000;', """
count
MMMMM
-------
20
GG
(1 rows)
nnnnnnnn
"""),
), cqlver=3)
q = 'select COUNT(*) FROM twenty_rows_composite_table limit 1000000;'
self.assertQueriesGiveColoredOutput((
(q, """
count
MMMMM
-------
20
GG
(1 rows)
nnnnnnnn
"""),
), cqlver=3)
def test_static_cf_output(self):
self.assertCqlverQueriesGiveColoredOutput((
("select a, b from twenty_rows_table where a in ('1', '13', '2');", """
a | b
MM MM
----+----
1 | 1
YY YY
13 | 13
YY YY
2 | 2
YY YY
(3 rows)
nnnnnnnn
"""),
), cqlver=3)
self.assertQueriesGiveColoredOutput((
('select * from dynamic_columns;', """
somekey | column1 | value
MMMMMMM MMMMMMM MMMMM
---------+---------+-------------------------
1 | 1.2 | one point two
GG GGGGGGG YYYYYYYYYYYYYYYYYYYYYYY
2 | 2.3 | two point three
GG GGGGGGG YYYYYYYYYYYYYYYYYYYYYYY
3 | -0.0001 | negative ten thousandth
GG GGGGGGG YYYYYYYYYYYYYYYYYYYYYYY
3 | 3.46 | three point four six
GG GGGGGGG YYYYYYYYYYYYYYYYYYYYYYY
3 | 99 | ninety-nine point oh
GG GGGGGGG YYYYYYYYYYYYYYYYYYYYYYY
(5 rows)
nnnnnnnn
"""),
), cqlver=3)
def test_empty_cf_output(self):
self.assertCqlverQueriesGiveColoredOutput((
('select * from empty_table;', """
(0 rows)
"""),
), cqlver=3)
q = 'select * from has_all_types where num = 999;'
# same query should show up as empty in cql 3
self.assertQueriesGiveColoredOutput((
(q, """
(0 rows)
"""),
), cqlver=3)
def test_columnless_key_output(self):
q = "select a from twenty_rows_table where a in ('1', '2', '-9192');"
self.assertQueriesGiveColoredOutput((
(q, """
a
M
---
1
Y
2
Y
(2 rows)
nnnnnnnn
"""),
), cqlver=3)
def test_numeric_output(self):
self.assertCqlverQueriesGiveColoredOutput((
('''select intcol, bigintcol, varintcol \
from has_all_types \
where num in (0, 1, 2, 3, 4);''', """
intcol | bigintcol | varintcol
MMMMMM MMMMMMMMM MMMMMMMMM
-------------+----------------------+-----------------------------
-12 | 1234567890123456789 | 10000000000000000000000000
GGGGGGGGGGG GGGGGGGGGGGGGGGGGGGG GGGGGGGGGGGGGGGGGGGGGGGGGGG
2147483647 | 9223372036854775807 | 9
GGGGGGGGGGG GGGGGGGGGGGGGGGGGGGG GGGGGGGGGGGGGGGGGGGGGGGGGGG
0 | 0 | 0
GGGGGGGGGGG GGGGGGGGGGGGGGGGGGGG GGGGGGGGGGGGGGGGGGGGGGGGGGG
-2147483648 | -9223372036854775808 | -10000000000000000000000000
GGGGGGGGGGG GGGGGGGGGGGGGGGGGGGG GGGGGGGGGGGGGGGGGGGGGGGGGGG
| |
nnnnnnnnnnn nnnnnnnnnnnnnnnnnnnn nnnnnnnnnnnnnnnnnnnnnnnnnnn
(5 rows)
nnnnnnnn
"""),
('''select decimalcol, doublecol, floatcol \
from has_all_types \
where num in (0, 1, 2, 3, 4);''', """
decimalcol | doublecol | floatcol
MMMMMMMMMM MMMMMMMMM MMMMMMMM
------------------+-----------+----------
19952.11882 | 1 | -2.1
GGGGGGGGGGGGGGGG GGGGGGG GGGGG
1E-14 | 1e+07 | 1e+05
GGGGGGGGGGGGGGGG GGGGGGG GGGGG
0.0 | 0 | 0
GGGGGGGGGGGGGGGG GGGGGGG GGGGG
10.0000000000000 | -1004.1 | 1e+08
GGGGGGGGGGGGGGGG GGGGGGG GGGGG
| |
nnnnnnnnnnnnnnnn nnnnnnn nnnnn
(5 rows)
nnnnnnnn
"""),
), cqlver=3)
def test_timestamp_output(self):
self.assertQueriesGiveColoredOutput((
('''select timestampcol from has_all_types where num = 0;''', """
timestampcol
MMMMMMMMMMMM
--------------------------
2012-05-14 12:53:20+0000
GGGGGGGGGGGGGGGGGGGGGGGG
(1 rows)
nnnnnnnn
"""),
), env={'TZ': 'Etc/UTC'})
self.assertQueriesGiveColoredOutput((
('''select timestampcol from has_all_types where num = 0;''', """
timestampcol
MMMMMMMMMMMM
--------------------------
2012-05-14 07:53:20-0500
GGGGGGGGGGGGGGGGGGGGGGGG
(1 rows)
nnnnnnnn
"""),
), env={'TZ': 'EST'})
def test_boolean_output(self):
self.assertCqlverQueriesGiveColoredOutput((
('select num, booleancol from has_all_types where num in (0, 1, 2, 3);', """
num | booleancol
MMM MMMMMMMMMM
-----+------------
0 | True
G GGGGG
1 | True
G GGGGG
2 | False
G GGGGG
3 | False
G GGGGG
(4 rows)
nnnnnnnn
"""),
), cqlver=3)
def test_null_output(self):
# column with metainfo but no values
self.assertCqlverQueriesGiveColoredOutput((
("select k, c, notthere from undefined_values_table where k in ('k1', 'k2');", """
k | c | notthere
M M MMMMMMMM
----+----+----------
k1 | c1 | null
YY YY RRRR
k2 | c2 | null
YY YY RRRR
(2 rows)
nnnnnnnn
"""),
), cqlver=3)
# all-columns, including a metainfo column has no values (cql3)
self.assertQueriesGiveColoredOutput((
("select * from undefined_values_table where k in ('k1', 'k2');", """
k | c | notthere
M M MMMMMMMM
----+----+----------
k1 | c1 | null
YY YY RRRR
k2 | c2 | null
YY YY RRRR
(2 rows)
nnnnnnnn
"""),
), cqlver=3)
def test_string_output_ascii(self):
self.assertCqlverQueriesGiveColoredOutput((
("select * from ascii_with_invalid_and_special_chars where k in (0, 1, 2, 3, 4);", r"""
k | val
M MMM
---+-----------------------------------------------
0 | newline:\n
G YYYYYYYYmm
1 | return\rand null\x00!
G YYYYYYmmYYYYYYYYmmmmY
2 | \x00\x01\x02\x03\x04\x05control chars\x06\x07
G mmmmmmmmmmmmmmmmmmmmmmmmYYYYYYYYYYYYYmmmmmmmm
3 | \xfe\xffbyte order mark
G mmmmmmmmYYYYYYYYYYYYYYY
4 | fake special chars\x00\n
G YYYYYYYYYYYYYYYYYYYYYYYY
(5 rows)
nnnnnnnn
"""),
), cqlver=3)
def test_string_output_utf8(self):
# many of these won't line up visually here, to keep the source code
# here ascii-only. note that some of the special Unicode characters
# here will render as double-width or zero-width in unicode-aware
# terminals, but the color-checking machinery here will still treat
# it as one character, so those won't seem to line up visually either.
self.assertCqlverQueriesGiveColoredOutput((
("select * from utf8_with_special_chars where k in (0, 1, 2, 3, 4, 5, 6);", u"""
k | val
M MMM
---+-------------------------------
0 | Normal string
G YYYYYYYYYYYYY
1 | Text with\\nnewlines\\n
G YYYYYYYYYmmYYYYYYYYmm
2 | Text with embedded \\x01 char
G YYYYYYYYYYYYYYYYYYYmmmmYYYYY
3 | \u24c8\u24c5\u24ba\u24b8\u24be\u24b6\u24c1\u2008\u249e\u24a3\u249c\u24ad\u24ae and normal ones
G YYYYYYYYYYYYYYYYYYYYYYYYYYYYY
4 | double wides: \u2f91\u2fa4\u2f9a
G YYYYYYYYYYYYYYYYY
5 | zero width\u200bspace
G YYYYYYYYYYYYYYYY
6 | fake special chars\\x00\\n
G YYYYYYYYYYYYYYYYYYYYYYYY
(7 rows)
nnnnnnnn
""".encode('utf-8')),
), cqlver=3, env={'LANG': 'en_US.UTF-8'})
def test_blob_output(self):
self.assertCqlverQueriesGiveColoredOutput((
("select num, blobcol from has_all_types where num in (0, 1, 2, 3);", r"""
num | blobcol
MMM MMMMMMM
-----+----------------------
0 | 0x000102030405fffefd
G mmmmmmmmmmmmmmmmmmmm
1 | 0xffffffffffffffffff
G mmmmmmmmmmmmmmmmmmmm
2 | 0x
G mmmmmmmmmmmmmmmmmmmm
3 | 0x80
G mmmmmmmmmmmmmmmmmmmm
(4 rows)
nnnnnnnn
"""),
), cqlver=3)
def test_colname_decoding_errors(self):
# not clear how to achieve this situation in the first place. the
# validator works pretty well, and we can't change the comparator
# after insertion.
#
# guess we could monkey-patch cqlsh or python-cql source to
# explicitly generate an exception on the deserialization of type X..
pass
def test_colval_decoding_errors(self):
self.assertCqlverQueriesGiveColoredOutput((
("select * from has_value_encoding_errors;", r"""
pkey | utf8col
MMMM MMMMMMM
------+--------------------
A | '\x00\xff\x00\xff'
Y RRRRRRRRRRRRRRRRRR
(1 rows)
nnnnnnnn
Failed to decode value '\x00\xff\x00\xff' (for column 'utf8col') as text: 'utf8' codec can't decode byte 0xff in position 1: invalid start byte
RRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRR
"""),
), cqlver=3)
def test_key_decoding_errors(self):
self.assertCqlverQueriesGiveColoredOutput((
("select * from has_key_encoding_errors;", r"""
pkey | col
MMMM MMM
--------------------+----------
'\x00\xff\x02\x8f' | whatever
RRRRRRRRRRRRRRRRRR YYYYYYYY
(1 rows)
nnnnnnnn
Failed to decode value '\x00\xff\x02\x8f' (for column 'pkey') as text: 'utf8' codec can't decode byte 0xff in position 1: invalid start byte
RRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRR
"""),
), cqlver=3)
def test_prompt(self):
with testrun_cqlsh(tty=True, keyspace=None, cqlver=3) as c:
self.assertEqual(c.output_header.splitlines()[-1], 'cqlsh> ')
c.send('\n')
output = c.read_to_next_prompt().replace('\r\n', '\n')
self.assertEqual(output, '\ncqlsh> ')
cmd = "USE \"%s\";\n" % get_test_keyspace().replace('"', '""')
c.send(cmd)
output = c.read_to_next_prompt().replace('\r\n', '\n')
self.assertEqual(output, '%scqlsh:%s> ' % (cmd, get_test_keyspace()))
c.send('use system;\n')
output = c.read_to_next_prompt().replace('\r\n', '\n')
self.assertEqual(output, 'use system;\ncqlsh:system> ')
c.send('use NONEXISTENTKEYSPACE;\n')
outputlines = c.read_to_next_prompt().splitlines()
self.assertEqual(outputlines[0], 'use NONEXISTENTKEYSPACE;')
self.assertEqual(outputlines[2], 'cqlsh:system> ')
midline = ColoredText(outputlines[1])
self.assertEqual(midline.plain(),
"Bad Request: Keyspace 'nonexistentkeyspace' does not exist")
self.assertColorFromTags(midline,
"RRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRR")
def test_describe_keyspace_output(self):
fullcqlver = '3.1.0'
with testrun_cqlsh(tty=True, cqlver=fullcqlver) as c:
ks = get_test_keyspace()
qks = quote_name(fullcqlver, ks)
for cmd in ('describe keyspace', 'desc keyspace'):
for givename in ('system', '', qks):
for semicolon in ('', ';'):
fullcmd = cmd + (' ' if givename else '') + givename + semicolon
desc = c.cmd_and_response(fullcmd)
self.check_describe_keyspace_output(desc, givename or qks, fullcqlver)
# try to actually execute that last keyspace description, with a
# new keyspace name
new_ks_name = 'COPY_OF_' + ks
copy_desc = desc.replace(ks, new_ks_name)
statements = split_cql_commands(copy_desc, cqlver=fullcqlver)
do_drop = True
with cassandra_cursor(cql_version=fullcqlver) as curs:
try:
for stmt in statements:
cqlshlog.debug('TEST EXEC: %s' % stmt)
curs.execute(stmt)
finally:
curs.execute('use system')
if do_drop:
curs.execute('drop keyspace %s' % quote_name(fullcqlver, new_ks_name))
def check_describe_keyspace_output(self, output, qksname, fullcqlver):
expected_bits = [r'(?im)^CREATE KEYSPACE %s WITH\b' % re.escape(qksname),
r'(?im)^USE \S+;$',
r';\s*$',
r'\breplication = {\n \'class\':']
for expr in expected_bits:
self.assertRegexpMatches(output, expr)
def test_describe_columnfamily_output(self):
# we can change these to regular expressions if/when it makes sense
# to do so; these will likely be subject to lots of adjustments.
# note columns are now comparator-ordered instead of original-order.
table_desc3 = dedent("""
CREATE TABLE has_all_types (
num int,
asciicol ascii,
bigintcol bigint,
blobcol blob,
booleancol boolean,
decimalcol decimal,
doublecol double,
floatcol float,
intcol int,
textcol text,
timestampcol timestamp,
uuidcol uuid,
varcharcol text,
varintcol varint,
PRIMARY KEY (num)
) WITH
bloom_filter_fp_chance=0.010000 AND
caching='KEYS_ONLY' AND
comment='' AND
dclocal_read_repair_chance=0.000000 AND
gc_grace_seconds=864000 AND
min_index_interval=128 AND
max_index_interval=2048 AND
read_repair_chance=0.100000 AND
populate_io_cache_on_flush='false' AND
default_time_to_live=0 AND
speculative_retry='NONE' AND
memtable_flush_period_in_ms=0 AND
compaction={'class': 'SizeTieredCompactionStrategy'} AND
compression={'sstable_compression': 'LZ4Compressor'};
""")
with testrun_cqlsh(tty=True, cqlver='3.0.0') as c:
for cmdword in ('describe table', 'desc columnfamily'):
for semicolon in (';', ''):
output = c.cmd_and_response('%s has_all_types%s' % (cmdword, semicolon))
self.assertNoHasColors(output)
self.assertEqual(output, table_desc3)
def test_describe_columnfamilies_output(self):
output_re = r'''
\n
Keyspace [ ] (?P<ksname> \S+ ) \n
-----------* \n
(?P<cfnames> .*? )
\n
'''
ks = get_test_keyspace()
with testrun_cqlsh(tty=True, keyspace=None, cqlver=3) as c:
# when not in a keyspace
for cmdword in ('DESCRIBE COLUMNFAMILIES', 'desc tables'):
for semicolon in (';', ''):
ksnames = []
output = c.cmd_and_response(cmdword + semicolon)
self.assertNoHasColors(output)
self.assertRegexpMatches(output, '(?xs) ^ ( %s )+ $' % output_re)
for section in re.finditer('(?xs)' + output_re, output):
ksname = section.group('ksname')
ksnames.append(ksname)
cfnames = section.group('cfnames')
self.assertNotIn('\n\n', cfnames)
if ksname == ks:
self.assertIn('ascii_with_invalid_and_special_chars', cfnames)
self.assertIn('system', ksnames)
self.assertIn(quote_name('3.0.0', ks), ksnames)
# when in a keyspace
c.send('USE %s;\n' % quote_name('3.0.0', ks))
c.read_to_next_prompt()
for cmdword in ('DESCRIBE COLUMNFAMILIES', 'desc tables'):
for semicolon in (';', ''):
output = c.cmd_and_response(cmdword + semicolon)
self.assertNoHasColors(output)
self.assertEqual(output[0], '\n')
self.assertEqual(output[-1], '\n')
self.assertNotIn('Keyspace %s' % quote_name('3.0.0', ks), output)
self.assertIn('has_value_encoding_errors', output)
self.assertIn('undefined_values_table', output)
def test_describe_cluster_output(self):
output_re = r'''(?x)
^
\n
Cluster: [ ] (?P<clustername> .* ) \n
Partitioner: [ ] (?P<partitionername> .* ) \n
Snitch: [ ] (?P<snitchname> .* ) \n
\n
'''
ringinfo_re = r'''
Range[ ]ownership: \n
(
[ ] .*? [ ][ ] \[ ( \d+ \. ){3} \d+ \] \n
)+
\n
'''
with testrun_cqlsh(tty=True, keyspace=None, cqlver=3) as c:
# not in a keyspace
for semicolon in ('', ';'):
output = c.cmd_and_response('describe cluster' + semicolon)
self.assertNoHasColors(output)
self.assertRegexpMatches(output, output_re + '$')
c.send('USE %s;\n' % quote_name('3.0.0', get_test_keyspace()))
c.read_to_next_prompt()
for semicolon in ('', ';'):
output = c.cmd_and_response('describe cluster' + semicolon)
self.assertNoHasColors(output)
self.assertRegexpMatches(output, output_re + ringinfo_re + '$')
def test_describe_schema_output(self):
with testrun_cqlsh(tty=True) as c:
for semicolon in ('', ';'):
output = c.cmd_and_response('desc full schema' + semicolon)
self.assertNoHasColors(output)
self.assertRegexpMatches(output, '^\nCREATE KEYSPACE')
self.assertIn("\nCREATE KEYSPACE system WITH replication = {\n 'class': 'LocalStrategy'\n};\n",
output)
self.assertRegexpMatches(output, ';\s*$')
def test_show_output(self):
with testrun_cqlsh(tty=True) as c:
output = c.cmd_and_response('show version;')
self.assertRegexpMatches(output,
'^\[cqlsh \S+ \| Cassandra \S+ \| CQL spec \S+ \| Thrift protocol \S+\]$')
output = c.cmd_and_response('show host;')
self.assertHasColors(output)
self.assertRegexpMatches(output, '^Connected to .* at %s:%d\.$'
% (re.escape(TEST_HOST), TEST_PORT))
def test_eof_prints_newline(self):
with testrun_cqlsh(tty=True) as c:
c.send(CONTROL_D)
out = c.read_lines(1)[0].replace('\r', '')
self.assertEqual(out, '\n')
with self.assertRaises(BaseException) as cm:
c.read_lines(1)
self.assertIn(type(cm.exception), (EOFError, OSError))
def test_exit_prints_no_newline(self):
for semicolon in ('', ';'):
with testrun_cqlsh(tty=True) as c:
cmd = 'exit%s\n' % semicolon
c.send(cmd)
out = c.read_lines(1)[0].replace('\r', '')
self.assertEqual(out, cmd)
with self.assertRaises(BaseException) as cm:
c.read_lines(1)
self.assertIn(type(cm.exception), (EOFError, OSError))
def test_help_types(self):
with testrun_cqlsh(tty=True) as c:
output = c.cmd_and_response('help types')
def test_help(self):
pass
def test_printing_parse_error(self):
pass
def test_printing_lex_error(self):
pass
def test_multiline_statements(self):
pass
def test_cancel_statement(self):
pass
def test_printing_integrity_error(self):
pass
def test_printing_cql_error(self):
pass
def test_empty_line(self):
pass
|
|
import os, sys
from datetime import datetime, timedelta
from time import sleep
from timeit import default_timer as timer
from story import Story
from utils import ERROR, SUCCESS, WARNING, CAPTURE_WAIT
from utils import DateIterator, clear_screen, ffi_channel, fmt, fmt_text, force_input, get_lang
def build_paths(session, date_start, date_end):
path_list = []
for _i, day in DateIterator(date_start, date_end, 'Building the path list... %s'):
file_path = Story(session, day).get_path()
if file_path:
path_list.append(file_path)
assert path_list
path_list.append(session.key)
return path_list
def rusty_search(session, date_start, date_end, word): # FFI for giving the searching job to Rust
occurrences = []
list_to_send = build_paths(session, date_start, date_end)
list_to_send.append(word)
count_string, timing = ffi_channel(list_to_send, mode = 1)
print 'Parsing the data stream from Rust...'
for i, string in enumerate(count_string.split(' ')): # spaces in the data stream represent individual files
idx = map(int, string.split(':')) # ... and colons represent the indices where the word has occurred
# Rust fills the indices of the file paths with the number of occurrences
# So, "i" indicates the Nth day from the birthday
if idx[0] > 0:
occurrences.append((i, len(idx), idx))
return occurrences, timing
# NOTE: Exhaustive process (that's why I've written a Rust library for this!)
# The library accelerates the searching time by ~100 times!
def py_search(session, date_start, date_end, word):
occurrences, errors, no_stories, = [], 0, 0
start = timer()
date_iter = DateIterator(date_start, date_end)
for i, day in date_iter:
occurred, story = [], Story(session, day)
try:
if not story.get_path():
no_stories += 1
continue
data = story.decrypt() # AssertionError (if any) is caught here
idx, jump, data_len = 0, len(word), len(data)
# probably an inefficient way to find the word indices
while idx < data_len:
idx = data.find(word, idx)
if idx == -1: break
occurred.append(idx)
idx += jump
except AssertionError:
errors += 1
if errors > 10:
print ERROR, "More than 10 files couldn't be decrypted! Terminating the search..."
return [], (timer() - start)
if occurred and occurred[0] > 0: # "i" indicates the Nth day from the birthday
occurrences.append((i, len(occurred), occurred))
sum_value = sum(map(lambda stuff: stuff[1], occurrences))
date_iter.send_msg('[Found: %d]' % sum_value)
assert no_stories < (i + 1)
return occurrences, (timer() - start)
def find_line_boundary(text, idx, limit, direction_value): # find the closest boundary of text for a given limit
i, num = idx, 0
while text[i + direction_value] not in ('\n', '\t'):
if text[i] == ' ': num += 1
if num == limit: return i
i += direction_value
return i
def mark_text(text, indices, length, color = 'red'): # Mark text and return corrected indices
if sys.platform == 'win32': # damn OS doesn't even support coloring
return text, indices
text = list(text)
formatter = fmt(color), fmt()
lengths = map(len, formatter) # we gotta update the indices when we introduce colored text
i, limit = 0, len(indices)
new_indices = indices[:]
while i < limit:
idx = indices[i]
text[idx] = formatter[0] + text[idx]
text[idx + length - 1] += formatter[1]
new_indices[i] -= lengths[0]
j = i
while j < limit:
new_indices[j] += sum(lengths)
j += 1
i += 1
return ''.join(text), new_indices
def search(session, word = None, lang = None, start = None, end = None, grep = 7):
'''Invokes one of the searching functions and does some useful stuff'''
clear_screen()
now = datetime.now()
def check_date(date):
if date in ['today', 'now', 'end']:
return now
elif date in ['start', 'birthday']:
return session.birthday
try:
return datetime.strptime(date, '%Y-%m-%d')
except (TypeError, ValueError):
return None
sys.stdout.set_mode(1, 0.01)
# Phase 1: Get the user input required for searching through the stories
word = force_input(word, "\nEnter a word: ", ERROR + ' You must enter a word to continue!')
lang = get_lang(lang)
start, end = map(check_date, [start, end])
while not all([start, end]):
try:
print WARNING, 'Enter dates in the form YYYY-MM-DD (Mind you, with hyphen!)\n'
if not start:
lower_bound = session.birthday
start_date = raw_input('Start date (Press [Enter] to begin from the start of your diary): ')
start = datetime.strptime(start_date, '%Y-%m-%d') if start_date else session.birthday
assert (start >= lower_bound and start <= now), 'S'
if not end:
lower_bound = start
end_date = raw_input("End date (Press [Enter] for today's date): ")
end = datetime.strptime(end_date, '%Y-%m-%d') if end_date else now
assert (end > lower_bound and end <= now), 'E'
except AssertionError as msg:
print ERROR, '%s date should be after %s and before %s' % \
(msg, lower_bound.strftime('%b. %d, %Y'), now.strftime('%b. %d, %Y'))
if str(msg) == 'S':
start = None
else:
end = None
except ValueError:
print ERROR, 'Oops! Error in input. Try again...'
# Phase 2: Send the datetimes to the respective searching functions
print "\nSearching your stories for the word '%s'..." % word
search_function = rusty_search if lang == 'r' else py_search
try:
occurrences, timing = search_function(session, start, end, word)
except AssertionError:
print ERROR, 'There are no stories in the given location!'
return
def print_stuff(grep): # function to choose between pretty and ugly printing
sys.stdout.set_mode(0)
results_begin = '\nSearch results from %s to %s:' % (start.strftime('%B %d, %Y'), end.strftime('%B %d, %Y')) + \
"\n\nStories on these days have the word '%s' in them...\n" % word
if grep: # pretty printing the output (at the cost of decrypting time)
try:
timer_start = timer()
print results_begin
for i, (n, word_count, indices) in enumerate(occurrences):
colored = []
date = start + timedelta(n)
content = Story(session, date).decrypt()
numbers = str(i + 1) + '. ' + date.strftime('%B %d, %Y (%A)')
text, indices = mark_text(content, indices, jump) # precisely indicate the word in text
for idx in indices: # find the word occurrences
left_bound = find_line_boundary(text, idx, grep, -1)
right_bound = find_line_boundary(text, idx, grep, 1)
sliced = '\t' + '... ' + text[left_bound:right_bound].strip() + ' ...'
colored.append(sliced)
print numbers, '\n%s' % '\n'.join(colored) # print the numbers along with the word occurrences
timer_stop = timer()
except (KeyboardInterrupt, EOFError):
sleep(CAPTURE_WAIT)
grep = 0 # default back to ugly printing
clear_screen()
print "Yep, it takes time! Let's go back to the good ol' days..."
if not grep: # Yuck, but cleaner way to print the results
sys.stdout.set_mode(0)
print results_begin
for i, (n, word_count, _indices) in enumerate(occurrences):
date = session.birthday + timedelta(n)
numbers = ' ' + str(i + 1) + '. ' + date.strftime('%B %d, %Y (%A)')
spaces = 40 - len(numbers)
print numbers, ' ' * spaces, '[ %s ]' % word_count # print only the datetime and counts in each file
sys.stdout.set_mode(1, 0.015)
msg = fmt_text('Found a total of %d occurrences in %d stories!' % (total_count, num_stories), 'yellow')
print '\n%s %s\n' % (SUCCESS, msg)
print fmt_text(' Time taken for searching: ', 'blue') + \
fmt_text('%s seconds!' % timing, 'green')
if grep:
print fmt_text(' Time taken for pretty printing: ', 'blue') + \
fmt_text('%s seconds!' % (timer_stop - timer_start), 'green')
# Phase 3: Print the results (in a pretty or ugly way) using the giant function below
jump, num_stories = len(word), len(occurrences)
total_count = sum(map(lambda stuff: stuff[1], occurrences))
print SUCCESS, 'Done! Time taken: %s seconds! (%d occurrences in %d stories!)' \
% (timing, total_count, num_stories)
if not total_count:
print ERROR, "Bummer! There are no stories containing '%s'..." % word
return
print_stuff(grep)
# Phase 4: Get the user input and display the stories
while occurrences:
try:
sys.stdout.set_mode(2)
print '\nEnter a number to see the corresponding story...'
print "\r(Enter 'pretty' or 'ugly' to print those search results again, or press [Enter] to exit)"
ch = raw_input('\nInput: ')
if ch == 'pretty':
clear_screen()
print_stuff(grep = 7) # '7' is default, because it looks kinda nice
elif ch == 'ugly':
clear_screen()
print_stuff(grep = 0)
elif not ch:
return
elif int(ch) <= 0:
raise ValueError
else:
n_day, word_count, indices = occurrences[int(ch) - 1]
date = start + timedelta(n_day)
(data, top, bottom) = Story(session, date).view(return_text = True)
sys.stdout.set_mode(3)
print top, mark_text(data, indices, jump, 'skyblue')[0], bottom
except (ValueError, IndexError):
print ERROR, 'Oops! Bad input! Try again...'
|
|
from ..robotsim import *
from ..math import vectorops,so3,se3
import random
from .. import vis
import time
import math
from ..model.trajectory import Trajectory
from ..model.contact import ContactPoint
def settle(world,obj,
forcedir=(0,0,-1),forcept=(0,0,0),
settletol=1e-4,orientationDamping=0.0,
perturb=0,margin=None,
debug=False):
"""Assuming that all other elements in the world besides object are frozen,
this "settles" the object by applying a force in the direction forcedir
and simulating until the object stops moving.
An exception is raised if the object is already colliding with the world.
Args:
world (WorldModel): the world containing other static and moving
objects
obj: a RigidObjectModel, RobotModelLink, or floating-base Robot that
will be settled.
forcedir (list of 3 floats, optional): a vector parallel to the
direction of force whose magnitude is the maximum distance this
procedure will try to move the object.
forcept (list of 3 floats, optional): local coordinates of the center
of force application.
settletol (float, optional): the simulation will stop when two
subsequent transforms lie within this tolerance.
orientationDamping (float, optional): a virtual spring will attempt
to keep the initial orientation with this torsional spring constant
perturb (float, optional): if nonzero, the application force will be
perturbed at random by this amount every step. If equal to 1, this
means the force is sampled from a 45 degree cone in the direction
forcedir.
margin (float, optional): the collision detection margin used in
simulation. If None, uses the Simulator default. Otherwise,
overrides the default. Must be at least settletol.
debug (bool, optional): if True, uses the visualization to debug the
settling process
Returns:
tuple: A pair (transform,touched) with:
- transform (se3 transform): The resulting se3 transform of the
object, or None if the object didn't hit anything by the time it
translated by ||forcedir|| units.
- touched (dict): a dictionary whose keys are object IDs touched by
the object at the final transform, and whose values are lists of
ContactPoints (see :mod:`klampt.model.contact`) giving the
contacts between obj and the touched object.
To convert the result to a hold, call::
h = Hold()
h.setFixed(obj,sum(touched.values(),[]))
"""
assert isinstance(world,WorldModel)
if isinstance(obj,(str,int)):
obj = world.rigidObject(obj)
assert obj.index >= 0,"Object "+str(obj)+" does not exist in world"
elif isinstance(obj,RobotModel):
raise NotImplementedError("TODO: settle free-floating robots")
elif isinstance(obj,RobotModelLink):
if world.index != obj.world:
raise ValueError("Object is not present in the given world")
assert obj.robotIndex >= 0 and obj.robotIndex < world.numRobots()
robot = world.robot(obj.robotIndex)
assert obj.index >= 0 and obj.index < robot.numLinks()
newWorld = WorldModel()
for i in range(world.numRobots()):
if i == obj.robotIndex:
continue
newWorld.add(world.robot(i).getName(),world.robot(i))
for i in range(world.numRigidObjects()):
newWorld.add(world.rigidObject(i).getName(),world.rigidObject(i))
for i in range(world.numTerrains()):
newWorld.add(world.terrain(i).getName(),world.terrain(i))
newObj = newWorld.makeRigidObject("obj")
newObj.geometry().set(obj.geometry())
newObj.setMass(obj.getMass())
#newObj.setContactParameters(obj.getContactParameters())
newObj.setTransform(*obj.getTransform())
return settle(newWorld,newObj,
forcedir,forcept,
settletol,orientationDamping,
perturb,margin,debug)
elif isinstance(obj,RigidObjectModel):
pass
else:
raise ValueError("Invalid object type given, only supports RigidObjectModels, RobotModelLinks, and RobotModels")
#get a bounding box around the object
forcept_world = se3.apply(obj.getTransform(),forcept)
bmin,bmax = obj.geometry().getBB()
#compute radius about forcept, expand BB due to potential for orientation change
R = 0
for i in range(3):
R += pow(max(forcept_world[i]-bmin[i],bmax[i]-forcept_world[i]),2)
R = math.sqrt(R)
bmin = [x - R for x in forcept_world]
bmax = [x + R for x in forcept_world]
#expand BB about force direction
for i in range(3):
if forcedir[i] < 0:
bmin[i] += forcedir[i]
else:
bmax[i] += forcedir[i]
if world.index != obj.world:
raise ValueError("Object is not present in the given world")
assert obj.index >= 0 and obj.index < world.numRigidObjects()
#exclude objects that have no chance of being in way of object
newWorld = WorldModel()
newObj = None
for i in range(world.numRobots()):
robot = world.robot(i)
for j in range(robot.numLinks()):
link = robot.link(j)
if _bboverlap((bmin,bmax),link):
#add robot link as static geometry
newObj = newWorld.makeRigidObject(robot.getName()+":"+link.getName())
newObj.geometry().set(link.geometry())
mass = Mass()
mass.setMass(float('inf'))
mass.setCom([0]*3)
mass.setInertia([float('inf')]*3)
newObj.setMass(mass)
newObj.setTransform(*link.getTransform())
#TODO: what surface properties?
for i in range(world.numRigidObjects()):
if _bboverlap((bmin,bmax),world.rigidObject(i)):
o = newWorld.add(world.rigidObject(i).getName(),world.rigidObject(i))
if i == obj.index:
newObj = o
else:
assert i != obj.index
for i in range(world.numTerrains()):
if _bboverlap((bmin,bmax),world.terrain(i)):
newWorld.add(world.terrain(i).getName(),world.terrain(i))
world = newWorld
obj = newObj
movedist = vectorops.norm(forcedir)
if movedist < settletol:
print("sim.settle(): warning, force movement distance less than settletol. Was this intended?")
return (obj.getTransform(),[])
forcedir = vectorops.div(forcedir,movedist)
forceamt = obj.getMass().mass
sim = Simulator(world)
body = sim.body(obj)
otherbodies = []
otherids = []
for i in range(world.numRigidObjects()):
otherids.append(world.rigidObject(i).getID())
otherbodies.append(sim.body(world.rigidObject(i)))
for i in range(world.numTerrains()):
otherids.append(world.terrain(i).getID())
otherbodies.append(sim.body(world.terrain(i)))
otherids.remove(obj.getID())
otherbodies = [b for b in otherbodies if b.body != body.body]
if len(otherbodies) == 0:
print("sim.settle(): no objects in direction",vectorops.mul(forcedir,movedist))
return (None,[])
if margin != None:
assert margin >= settletol,"Collision margin must be at least settletol"
for b in otherbodies:
b.setCollisionPadding(0)
body.setCollisionPadding(margin)
else:
margin = body.getCollisionPadding()
margin += min([b.getCollisionPadding() for b in otherbodies])
#set up simulation
dt = max(settletol,margin*0.5)
forceamt /= dt*0.5
sim.setGravity((0,0,0))
sim.setSimStep(dt)
for id in otherids:
sim.enableContactFeedback(obj.getID(),id)
#turn off all restitution
s = body.getSurface()
s.kRestitution = 0
body.setSurface(s)
for b in otherbodies:
s = b.getSurface()
s.kRestitution = 0
b.setSurface(s)
#disable other bodies
for b in otherbodies:
b.setVelocity([0.0]*3,[0.0]*3)
b.enableDynamics(False)
body.setVelocity([0.0]*3,vectorops.mul(forcedir,movedist))
sim.simulate(0)
s = sim.getStatus()
if s == Simulator.STATUS_CONTACT_UNRELIABLE:
print("sim.settle(): warning, object already penetrating other objects. Trying to pull back...")
T0 = body.getTransform()
body.setTransform(T0[0],vectorops.madd(T0[1],forcedir,margin))
sim.simulate(0)
s = sim.getStatus()
if s == Simulator.STATUS_CONTACT_UNRELIABLE:
print(" pulling back failed.")
return (None,[])
if debug:
vis.createWindow("settle")
vis.add("world",world)
vis.show()
time.sleep(1.0)
springanchorworld = se3.apply(obj.getTransform(),forcept)
Rspringanchor = obj.getTransform()[0]
numSettled = 0
Told = body.getTransform()
t = 0
while t < 1:
#print("Simulating, t =",t)
if perturb:
fpert = (random.gauss(0,perturb),random.gauss(0,perturb),random.gauss(0,perturb))
fpert = vectorops.sub(fpert,vectorops.mul(forcedir,vectorops.dot(forcedir,fpert)))
f = vectorops.add(forcedir,fpert)
else:
f = forcedir
springanchorbody = se3.apply(body.getObjectTransform(),forcept)
Rspringbody = body.getObjectTransform()[0]
#vis.add("Tobject",body.getObjectTransform())
kSpring = forceamt
#kSpring = 0
f = vectorops.madd(vectorops.mul(f,forceamt),vectorops.sub(springanchorworld,springanchorbody),kSpring)
body.applyForceAtLocalPoint(f,forcept)
if orientationDamping > 0:
#local orientation change: transform from body to anchor frame
wlocal = so3.moment(so3.mul(Rspringanchor,so3.inv(Rspringbody)))
#world orientation spring
w = so3.apply(Rspringbody,wlocal)
#w = so3.apply(Rspringbody,so3.error(Rspringanchor,Rspringbody))
#vis.add("orientation",Trajectory([0,1],[Told[1],vectorops.madd(Told[1],w,1)]))
#vis.add("orientationloc",Trajectory([0,1],[Told[1],vectorops.madd(Told[1],wlocal,1)]))
#vis.setColor("orientationloc",0,1,0)
#body.applyWrench([0]*3,vectorops.mul(w,orientationDamping))
body.applyWrench([0]*3,vectorops.mul(wlocal,orientationDamping))
if debug:
vis.lock()
sim.simulate(dt)
sim.updateWorld()
vis.unlock()
time.sleep(0)
time.sleep(0.1)
else:
sim.simulate(dt)
sim.updateWorld()
#test for settling
w,v = body.getVelocity()
T = body.getTransform()
err = se3.error(T,Told)
#if debug:
# print("Status:",sim.getStatus(),"velocity",w,v,"error",vectorops.norm(err))
if vectorops.norm(err) < settletol:
numSettled += 1
else:
numSettled = 0
if numSettled >= 2:
print("sim.settle(): Settled at time",t)
touched = [id for id in otherids if sim.inContact(obj.getID(),id)]
cps = [sim.getContacts(obj.getID(),id) for id in touched]
tdict = dict()
for id,cplist in zip(touched,cps):
tdict[id] = [ContactPoint(ci[0:3],ci[3:6],ci[6]) for ci in cplist]
if debug:
vis.show(False)
return (body.getObjectTransform(),tdict)
#apply drag
body.setVelocity(vectorops.mul(w,0.8),vectorops.mul(v,0.8))
Told = T
springanchorworld = vectorops.madd(springanchorworld,forcedir,dt*movedist)
t += dt
if debug:
vis.show(False)
print("Failed to settle? Final velocity",body.getVelocity())
touched = [id for id in otherids if sim.inContact(obj.getID(),id)]
cps = [sim.getContacts(obj.getID(),id) for id in touched]
tdict = dict()
for id,cplist in zip(touched,cps):
tdict[id] = [ContactPoint(ci[0:3],ci[3:6],ci[6]) for ci in cplist]
return (body.getObjectTransform(),tdict)
def _bboverlap(bb,element):
if isinstance(element,RobotModel):
return any(_bboverlap(bb,element.link(i)) for i in range(element.numLinks()))
else:
bb2 = element.geometry().getBB()
print("BBox",bb)
print(" Testing overlap with",element.getName(),"bbox",bb2)
for (a,b,c,d) in zip(bb[0],bb[1],bb2[0],bb2[1]):
if not (a >= c and a <= d or b >= c and b <= d) and not (c >= a and c <= b or d >= a and d <= b):
print(" No overlap")
return False
print(" Overlap")
return True
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: bigip_monitor_https
short_description: Manages F5 BIG-IP LTM HTTPS monitors
description: Manages F5 BIG-IP LTM HTTPS monitors.
version_added: "1.0.0"
options:
name:
description:
- Monitor name.
type: str
required: True
description:
description:
- The description of the monitor.
type: str
parent:
description:
- The parent template of this monitor template. Once this value has
been set, it cannot be changed. By default, this value is the C(https)
parent on the C(Common) partition.
type: str
default: /Common/https
send:
description:
- The Send string for the monitor call. When creating a new monitor, if
this value is not provided, the default C(GET /\\r\\n) is used.
type: str
receive:
description:
- The Receive string for the monitor call.
type: str
receive_disable:
description:
- This setting works like C(receive), except the system marks the node
or pool member disabled when its response matches the C(receive_disable)
string but not C(receive). To use this setting, you must specify both
C(receive_disable) and C(receive).
type: str
ip:
description:
- IP address part of the IP/port definition. If this parameter is not
provided when creating a new monitor, the default value is '*'.
type: str
port:
description:
- Port address part of the IP/port definition. If this parameter is not
provided when creating a new monitor, the default value is
'*'. If specifying an IP address, you must specify a value between 1 and 65535.
type: str
interval:
description:
- The interval specifying how frequently the monitor instance of this
template will run. If this parameter is not provided when creating
a new monitor, the default value is 5. This value B(must)
be less than the C(timeout) value.
type: int
timeout:
description:
- The number of seconds in which the node or service must respond to
the monitor request. If the target responds within the set time
period, it is considered up. If the target does not respond within
the set time period, it is considered down. You can change this
to any number, however, it should be 3 times the
interval number of seconds plus 1 second. If this parameter is not
provided when creating a new monitor, the default value is 16.
type: int
time_until_up:
description:
- Specifies the amount of time in seconds after the first successful
response before a node will be marked up. A value of 0 causes a
node to be marked up immediately after a valid response is received
from the node. If this parameter is not provided when creating
a new monitor, then the default value will be 0.
type: int
target_username:
description:
- Specifies the user name, if the monitored target requires authentication.
type: str
target_password:
description:
- Specifies the password, if the monitored target requires authentication.
type: str
ssl_profile:
description:
- Specifies the SSL profile to use for the HTTPS monitor.
- Defining SSL profiles enables refined customization of the SSL attributes
for an HTTPS monitor.
- This parameter is only supported on BIG-IP versions 13.x and later.
type: str
up_interval:
description:
- Specifies the interval for the system to use to perform the health check
when a resource is up.
- When C(0), specifies the system uses the interval specified in
C(interval) to check the health of the resource.
- When any other number, enables you to specify a different interval
when checking the health of a resource that is up.
type: int
cipher_list:
description:
- Specifies the list of ciphers for this monitor.
- The items in the cipher list are separated with the colon C(:) symbol.
type: str
version_added: "1.3.0"
partition:
description:
- Device partition to manage resources on.
type: str
default: Common
state:
description:
- When C(present), ensures the monitor exists.
- When C(absent), ensures the monitor is removed.
type: str
choices:
- present
- absent
default: present
notes:
- Requires BIG-IP software version >= 12
extends_documentation_fragment: f5networks.f5_modules.f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Create HTTPS Monitor
bigip_monitor_https:
name: my_http_monitor
state: present
ip: 10.10.10.10
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Remove HTTPS Monitor
bigip_monitor_https:
name: my_http_monitor
state: absent
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
'''
RETURN = r'''
parent:
description: New parent template of the monitor.
returned: changed
type: str
sample: https
ip:
description: The new IP of IP/port definition.
returned: changed
type: str
sample: 10.12.13.14
interval:
description: The new interval at which to run the monitor check.
returned: changed
type: int
sample: 2
description:
description: The description of the monitor.
returned: changed
type: str
sample: Important Monitor
timeout:
description: The new timeout in which the remote system must respond to the monitor.
returned: changed
type: int
sample: 10
time_until_up:
description: The new time in which to mark a system as up after first successful response.
returned: changed
type: int
sample: 2
up_interval:
description: Interval for the system to use to perform the health check when a resource is up.
returned: changed
type: int
sample: 0
cipher_list:
description: The new value for the cipher list.
returned: changed
type: str
sample: +3DES:+kEDH
'''
from datetime import datetime
from ansible.module_utils.basic import (
AnsibleModule, env_fallback
)
from ..module_utils.bigip import F5RestClient
from ..module_utils.common import (
F5ModuleError, AnsibleF5Parameters, transform_name, f5_argument_spec, fq_name
)
from ..module_utils.compare import cmp_str_with_none
from ..module_utils.ipaddress import is_valid_ip
from ..module_utils.icontrol import tmos_version
from ..module_utils.teem import send_teem
class Parameters(AnsibleF5Parameters):
api_map = {
'timeUntilUp': 'time_until_up',
'defaultsFrom': 'parent',
'recv': 'receive',
'recvDisable': 'receive_disable',
'sslProfile': 'ssl_profile',
'upInterval': 'up_interval',
'cipherlist': 'cipher_list',
}
api_attributes = [
'timeUntilUp',
'defaultsFrom',
'interval',
'timeout',
'recv',
'send',
'destination',
'username',
'password',
'recvDisable',
'description',
'sslProfile',
'upInterval',
'cipherlist',
]
returnables = [
'parent',
'send',
'receive',
'ip',
'port',
'interval',
'timeout',
'time_until_up',
'receive_disable',
'description',
'ssl_profile',
'up_interval',
'cipher_list',
]
updatables = [
'destination',
'send',
'receive',
'interval',
'timeout',
'time_until_up',
'target_username',
'target_password',
'receive_disable',
'description',
'ssl_profile',
'up_interval',
'cipher_list',
]
@property
def username(self):
return self._values['target_username']
@property
def password(self):
return self._values['target_password']
@property
def destination(self):
if self.ip is None and self.port is None:
return None
destination = '{0}:{1}'.format(self.ip, self.port)
return destination
@destination.setter
def destination(self, value):
ip, port = value.split(':')
self._values['ip'] = ip
self._values['port'] = port
@property
def interval(self):
if self._values['interval'] is None:
return None
# Per BZ617284, the BIG-IP UI does not raise a warning about this.
# So I do
if 1 > int(self._values['interval']) > 86400:
raise F5ModuleError(
"Interval value must be between 1 and 86400"
)
return int(self._values['interval'])
@property
def timeout(self):
if self._values['timeout'] is None:
return None
return int(self._values['timeout'])
@property
def ip(self):
if self._values['ip'] is None:
return None
elif self._values['ip'] in ['*', '0.0.0.0']:
return '*'
elif is_valid_ip(self._values['ip']):
return self._values['ip']
raise F5ModuleError(
"The provided 'ip' parameter is not an IP address."
)
@property
def port(self):
if self._values['port'] is None:
return None
elif self._values['port'] == '*':
return '*'
return int(self._values['port'])
@property
def time_until_up(self):
if self._values['time_until_up'] is None:
return None
return int(self._values['time_until_up'])
@property
def parent(self):
if self._values['parent'] is None:
return None
result = fq_name(self.partition, self._values['parent'])
return result
@property
def type(self):
return 'https'
class ApiParameters(Parameters):
@property
def description(self):
if self._values['description'] in [None, 'none']:
return None
return self._values['description']
class ModuleParameters(Parameters):
@property
def description(self):
if self._values['description'] is None:
return None
elif self._values['description'] in ['none', '']:
return ''
return self._values['description']
@property
def ssl_profile(self):
if self._values['ssl_profile'] is None:
return None
if self._values['ssl_profile'] in ['', 'none']:
return ''
result = fq_name(self.partition, self._values['ssl_profile'])
return result
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
raise
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
result = self.__default(param)
return result
@property
def parent(self):
if self.want.parent != self.have.parent:
raise F5ModuleError(
"The parent monitor cannot be changed"
)
@property
def destination(self):
if self.want.ip is None and self.want.port is None:
return None
if self.want.port is None:
self.want.update({'port': self.have.port})
if self.want.ip is None:
self.want.update({'ip': self.have.ip})
if self.want.port in [None, '*'] and self.want.ip != '*':
raise F5ModuleError(
"Specifying an IP address requires that a port number be specified"
)
if self.want.destination != self.have.destination:
return self.want.destination
@property
def interval(self):
if self.want.timeout is not None and self.want.interval is not None:
if self.want.interval >= self.want.timeout:
raise F5ModuleError(
"Parameter 'interval' must be less than 'timeout'."
)
elif self.want.timeout is not None:
if self.have.interval >= self.want.timeout:
raise F5ModuleError(
"Parameter 'interval' must be less than 'timeout'."
)
elif self.want.interval is not None:
if self.want.interval >= self.have.timeout:
raise F5ModuleError(
"Parameter 'interval' must be less than 'timeout'."
)
if self.want.interval != self.have.interval:
return self.want.interval
@property
def ssl_profile(self):
if self.want.ssl_profile is None:
return None
if self.want.ssl_profile == '' and self.have.ssl_profile is None:
return None
if self.want.ssl_profile != self.have.ssl_profile:
return self.want.ssl_profile
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def description(self):
return cmp_str_with_none(self.want.description, self.have.description)
@property
def receive(self):
return cmp_str_with_none(self.want.receive, self.have.receive)
@property
def receive_disable(self):
return cmp_str_with_none(self.want.receive_disable, self.have.receive_disable)
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
start = datetime.now().isoformat()
version = tmos_version(self.client)
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
send_teem(start, self.module, version)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/monitor/https/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return True
errors = [401, 403, 409, 500, 501, 502, 503, 504]
if resp.status in errors or 'code' in response and response['code'] in errors:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
self._set_default_creation_values()
if self.module.check_mode:
return True
self.create_on_device()
return True
def _set_default_creation_values(self):
if self.want.timeout is None:
self.want.update({'timeout': 16})
if self.want.interval is None:
self.want.update({'interval': 5})
if self.want.time_until_up is None:
self.want.update({'time_until_up': 0})
if self.want.ip is None:
self.want.update({'ip': '*'})
if self.want.port is None:
self.want.update({'port': '*'})
if self.want.send is None:
self.want.update({'send': 'GET /\r\n'})
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/ltm/monitor/https/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return True
raise F5ModuleError(resp.content)
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/ltm/monitor/https/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return True
raise F5ModuleError(resp.content)
def absent(self):
if self.exists():
return self.remove()
return False
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/monitor/https/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.delete(uri)
if resp.status == 200:
return True
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/monitor/https/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return ApiParameters(params=response)
raise F5ModuleError(resp.content)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
parent=dict(default='/Common/https'),
description=dict(),
send=dict(),
receive=dict(),
receive_disable=dict(),
ip=dict(),
up_interval=dict(type='int'),
port=dict(),
interval=dict(type='int'),
timeout=dict(type='int'),
time_until_up=dict(type='int'),
target_username=dict(),
target_password=dict(no_log=True),
ssl_profile=dict(),
cipher_list=dict(),
state=dict(
default='present',
choices=['present', 'absent']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
|
|
# $Filename$
# $Authors$
# Last Changed: $Date$ $Committer$ $Revision-Id$
#
# Copyright (c) 2003-2011, German Aerospace Center (DLR)
# All rights reserved.
#
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are
#met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the German Aerospace Center nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
#OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
#SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
#LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
#THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Test cases for the data store handler.
"""
import unittest
from xml.parsers.expat import ExpatError
from datafinder.core.configuration.datastores import handler, constants
from datafinder.core.configuration.gen import datastores
from datafinder.core.error import ConfigurationError
from datafinder.persistence.error import PersistenceError
from datafinder_test.mocks import SimpleMock
__version__ = "$Revision-Id:$"
class DataStoreHandlerTestsCase(unittest.TestCase):
""" Test cases for the data store handler. """
#pylint: disable=R0904
def setUp(self):
""" Creates object under test. """
self._parseStringMock = SimpleMock()
datastores.parseString = self._parseStringMock
self._createFileStorerMock = SimpleMock()
handler.createFileStorer = self._createFileStorerMock
self._fileStorerMock = SimpleMock()
self._datastoreHandler = handler.DataStoreHandler(self._fileStorerMock)
def testCreate(self):
""" Tests the creation of the data store configuration area. """
self._datastoreHandler.create(dataUri="uri")
self.assertEquals(len(self._datastoreHandler.datastores), 1)
self.assertEquals(self._datastoreHandler.datastores[0].url, "uri")
self._fileStorerMock.error = PersistenceError("")
self.assertRaises(ConfigurationError, self._datastoreHandler.create, dataUri="uri")
def testLoad(self):
""" Tests the initialization of the data store handler. """
self._fileStorerMock.methodNameResultMap = {"exists": (True, None), "readData": (SimpleMock(""), None)}
self._parseStringMock.value = datastores.datastores([datastores.default(name="name1", storeType=constants.DEFAULT_STORE),
datastores.default(name="name2", storeType=constants.DEFAULT_STORE)])
self._datastoreHandler.load()
self.assertEquals(len(self._datastoreHandler.datastores), 2)
self._fileStorerMock.methodNameResultMap = {"exists": (False, None)}
self.assertRaises(ConfigurationError, self._datastoreHandler.load)
self._fileStorerMock.methodNameResultMap = {"exists": (None, PersistenceError(""))}
self.assertRaises(ConfigurationError, self._datastoreHandler.load)
self._fileStorerMock.methodNameResultMap = {"exists": (True, None), "readData": (None, PersistenceError(""))}
self.assertRaises(ConfigurationError, self._datastoreHandler.load)
self._fileStorerMock.methodNameResultMap = {"exists": (True, None), "readData": (SimpleMock(""), None)}
self._parseStringMock.error = ExpatError("")
self.assertRaises(ConfigurationError, self._datastoreHandler.load)
self._fileStorerMock.methodNameResultMap = {"exists": (True, None), "readData": (SimpleMock(""), None)}
self._parseStringMock.error = ValueError("")
self.assertRaises(ConfigurationError, self._datastoreHandler.load)
def testStore(self):
""" Tests the storage of the data store configuration. """
self._datastoreHandler.store()
self._fileStorerMock.error = PersistenceError("")
self.assertRaises(ConfigurationError, self._datastoreHandler.store)
def testDataStoreCreation(self):
""" Tests the data store factory method. """
datastore = self._datastoreHandler.createDataStore()
self.assertFalse(self._datastoreHandler.hasDataStore(datastore.name))
self.assertNotEquals(datastore.name, None)
self.assertEquals(datastore.storeType, constants.DEFAULT_STORE)
datastore = self._datastoreHandler.createDataStore("name", None,
"iconName", "url", True, "owner")
self.assertEquals(datastore.name, "name")
self.assertEquals(datastore.iconName, "iconName")
self.assertEquals(datastore.storeType, constants.DEFAULT_STORE)
self.assertEquals(datastore.url, "url")
self.assertEquals(datastore.isDefault, True)
self.assertEquals(datastore.owner, "owner")
datastore = self._datastoreHandler.createDataStore("name", constants.FTP_STORE,
"iconName", "url", True, "owner")
self.assertEquals(datastore.name, "name")
self.assertEquals(datastore.iconName, "iconName")
self.assertEquals(datastore.storeType, constants.FTP_STORE)
self.assertEquals(datastore.url, "url")
self.assertEquals(datastore.isDefault, True)
self.assertEquals(datastore.owner, "owner")
self.assertRaises(ConfigurationError, self._datastoreHandler.createDataStore, storeType="unknown")
def testImport(self):
""" Tests the import of data store configurations. """
self._createFileStorerMock.value = SimpleMock(SimpleMock())
self._parseStringMock.value = datastores.datastores([datastores.default(name="name1", storeType=constants.DEFAULT_STORE),
datastores.default(name="name2", storeType=constants.DEFAULT_STORE)])
self._datastoreHandler.importDataStores("/local/file/path")
self.assertEquals(len(self._datastoreHandler.datastores), 2)
self._parseStringMock.error = ExpatError("")
self.assertRaises(ConfigurationError, self._datastoreHandler.importDataStores, "/local/file/path")
self.assertEquals(len(self._datastoreHandler.datastores), 2)
self._createFileStorerMock.value = SimpleMock(error=PersistenceError(""))
self.assertRaises(ConfigurationError, self._datastoreHandler.importDataStores, "/local/file/path")
self.assertEquals(len(self._datastoreHandler.datastores), 2)
self._createFileStorerMock.error = PersistenceError("")
self.assertRaises(ConfigurationError, self._datastoreHandler.importDataStores, "/local/file/path")
self.assertEquals(len(self._datastoreHandler.datastores), 2)
def testExport(self):
""" Tests the export of data store configurations. """
self._createFileStorerMock.value = SimpleMock()
self._datastoreHandler.exportDataStores("/local/file/path")
self._createFileStorerMock.value = SimpleMock(error=PersistenceError(""))
self.assertRaises(ConfigurationError, self._datastoreHandler.exportDataStores, "/local/file/path")
self._createFileStorerMock.error = PersistenceError("")
self.assertRaises(ConfigurationError, self._datastoreHandler.exportDataStores, "/local/file/path")
def testDataStoreHandling(self):
""" Tests the management of data store configurations. """
dataStore = self._datastoreHandler.createDataStore()
self.assertFalse(self._datastoreHandler.hasDataStore(dataStore.name))
self.assertEquals(self._datastoreHandler.getDataStore(dataStore.name), None)
self._datastoreHandler.addDataStore(dataStore)
self.assertTrue(self._datastoreHandler.hasDataStore(dataStore.name))
self.assertEquals(self._datastoreHandler.getDataStore(dataStore.name), dataStore)
self.assertEquals(len(self._datastoreHandler.defaultDatastores), 0)
anotherDataStore = self._datastoreHandler.createDataStore()
anotherDataStore.isDefault = True
self._datastoreHandler.addDataStore(anotherDataStore)
self.assertEquals(len(self._datastoreHandler.datastores), 2)
self.assertEquals(len(self._datastoreHandler.defaultDatastores), 1)
self._datastoreHandler.removeDataStore(dataStore.name)
self._datastoreHandler.removeDataStore(dataStore.name)
self.assertEquals(len(self._datastoreHandler.datastores), 1)
self._datastoreHandler.removeDataStore(anotherDataStore.name)
self.assertEquals(len(self._datastoreHandler.datastores), 0)
self._datastoreHandler.setDataStores([dataStore, anotherDataStore])
self.assertEquals(len(self._datastoreHandler.datastores), 2)
def testDataStoreGetter(self):
""" Tests the read-only properties of the handler allowing access to the data store configurations. """
# Fine for testing: pylint: disable=W0212
datastores_ = {"tsm": SimpleMock(storeType=constants.TSM_CONNECTOR_STORE, isMigrated=False),
"file": SimpleMock(storeType=constants.FILE_STORE, isMigrated=False),
"ftp": SimpleMock(storeType=constants.FTP_STORE, isMigrated=False),
"gridftp": SimpleMock(storeType=constants.GRIDFTP_STORE, isMigrated=False),
"default": SimpleMock(storeType=constants.DEFAULT_STORE, isMigrated=False),
"webdav": SimpleMock(storeType=constants.WEBDAV_STORE, isMigrated=False),
"offline": SimpleMock(storeType=constants.OFFLINE_STORE, isMigrated=False),
"s3": SimpleMock(storeType = constants.S3_STORE, isMigrated=False),
"svn": SimpleMock(storeType = constants.SUBVERSION_STORE, isMigrated=False),
"svn_migrated": SimpleMock(storeType = constants.SUBVERSION_STORE, isMigrated=True)}
self._datastoreHandler._datastores = datastores_
self.assertEquals(len(self._datastoreHandler.datastores), 10)
self.assertEquals(len(self._datastoreHandler.archiveDatastores), 1)
self.assertEquals(len(self._datastoreHandler.onlineDatastores), 7)
self.assertEquals(len(self._datastoreHandler.offlineDatastores), 1)
self.assertEquals(len(self._datastoreHandler.externalDatastores), 7)
|
|
#!/usr/bin/python
# Copyright (c) 2015, Michael LeBeane
# The University of Texas at Austin
# The Laboratory for Computer Architecture (LCA)
# All rights reserved.
#
# Redistribution of this source or derived binaries is not authorized without
# the express written consent of the original copyright holders.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AN
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Generates McPAT input files by populating a template
# @date: 11/21/2014
import csv
import os
import xml.etree.ElementTree as ET
import shutil
import sys
import collections
import argparse
def generate_mcpat(stats, output_dir, input_proc_model, l3_avail, bin_size, CORES, HW_THREADS, TSC_FREQUENCY):
print "********** Generating McPAT input files **********"
shutil.rmtree(output_dir,ignore_errors=True)
os.makedirs(output_dir)
# now, import the base McPat config file and augment the runtime statistics with our data
tree = ET.parse(input_proc_model)
root = tree.getroot()
file_num = 0
for time_stamp in stats.itervalues().next().keys():
total_cycles = []
idle_cycles = []
busy_cycles = []
for i in range(0,HW_THREADS):
total_cycles.append(stats["CPU" + str(i)][time_stamp]["total_cycles"])
idle_cycles.append(stats["CPU" + str(i)][time_stamp]["idle_cycles"])
busy_cycles.append(stats["CPU" + str(i)][time_stamp]["busy_cycles"])
total_cycles= max(total_cycles)
idle_cycles = min(idle_cycles)
busy_cycles = max(busy_cycles)
# cycle information
# These cycles define the simulation time only, so just express them as the TSC_Frequency into bin szie
root.find(".//*[@id='system']/stat[@name='total_cycles']").set ('value',str(bin_size * TSC_FREQUENCY))
d = stats["TOTAL"][time_stamp]
#l3 stats
if l3_avail:
# lets say 1/4 are writes and 3/4 are reads
l3_reads = int(d["l3_accesses"]) * 0.75
l3_writes = int(d["l3_accesses"]) * 0.25
l3_write_misses = int(d["l3_misses"]) * 0.25
l3_read_misses = int(d["l3_misses"]) * 0.75
root.find(".//*[@id='system.L30']/stat[@name='read_accesses']").set ('value',str(l3_reads))
root.find(".//*[@id='system.L30']/stat[@name='read_misses']").set ('value',str(l3_read_misses))
root.find(".//*[@id='system.L30']/stat[@name='write_accesses']").set ('value',str(l3_writes))
root.find(".//*[@id='system.L30']/stat[@name='write_misses']").set ('value',str(l3_write_misses))
#mc stats
if l3_avail:
# lets say 1/4 are writes and 3/4 are reads
memory_reads = l3_write_misses
memory_writes = l3_read_misses
root.find(".//*[@id='system.mc']/stat[@name='memory_accesses']").set ('value',str(int(d["l3_misses"])))
root.find(".//*[@id='system.mc']/stat[@name='memory_reads']").set ('value',str(memory_reads))
root.find(".//*[@id='system.mc']/stat[@name='memory_writes']").set ('value',str(memory_writes))
else:
root.find(".//*[@id='system.mc']/stat[@name='memory_accesses']").set ('value',str(int(d["l2_write_misses"]) + int(d["l2_read_misses"])))
root.find(".//*[@id='system.mc']/stat[@name='memory_reads']").set ('value',str(int(d["l2_read_misses"])))
root.find(".//*[@id='system.mc']/stat[@name='memory_writes']").set ('value',str(int(d["l2_write_misses"])))
#Populate core level stats
# For core level stats, we need to merge all the HW_Threads into their shared physical resources
# threads are merged as follows (for a 4 core machine with 8 threads)
# 0/4,2/5,3/6,4/7
threads_per_core = HW_THREADS / CORES
core_id = 0
for k in range(0,CORES,1):
d = {}
for stat in stats.itervalues().next().itervalues().next().keys():
d[stat] = 0
for j in range(k, HW_THREADS, CORES):
d[stat] += stats["CPU" + str(j)][time_stamp][stat]
# The frequency and voltage depends on the host power state!
root.find(".//*[@id='system.core" + str(core_id) + "']/param[@name='clock_rate']").set ('value',str(int(TSC_FREQUENCY / 1000000)))
#root.find(".//*[@id='system.core" + str(core_id) + "']/param[@name='vdd']").set ('value',str(package_voltage))
root.find(".//*[@id='system.core" + str(core_id) + "']/stat[@name='total_instructions']").set ('value',str(int(d["uops_dispatched"])))
# estimate int instructions as uops - FP - BR
int_estimate = (d["uops_dispatched"] - d["fp_uops_executed"] - d["branches_executed"])
root.find(".//*[@id='system.core" + str(core_id) + "']/stat[@name='int_instructions']").set ('value',str(int(int_estimate)))
root.find(".//*[@id='system.core" + str(core_id) + "']/stat[@name='fp_instructions']").set ('value',str(d["fp_uops_executed"]))
root.find(".//*[@id='system.core" + str(core_id) + "']/stat[@name='branch_instructions']").set ('value',str(d["branches_executed"]))
root.find(".//*[@id='system.core" + str(core_id) + "']/stat[@name='branch_mispredictions']").set ('value',str(d["branches_mispredicted"]))
root.find(".//*[@id='system.core" + str(core_id) + "']/stat[@name='load_instructions']").set ('value',str(d["dcache_reads"]))
root.find(".//*[@id='system.core" + str(core_id) + "']/stat[@name='store_instructions']").set ('value',str(d["dcache_writes"]))
root.find(".//*[@id='system.core" + str(core_id) + "']/stat[@name='committed_instructions']").set ('value',str(d["uops_retired"]))
root.find(".//*[@id='system.core" + str(core_id) + "']/stat[@name='committed_int_instructions']").set ('value',str(int_estimate))
root.find(".//*[@id='system.core" + str(core_id) + "']/stat[@name='committed_fp_instructions']").set ('value',str(d["fp_uops_retired"]))
root.find(".//*[@id='system.core" + str(core_id) + "']/stat[@name='context_switches']").set ('value',str(d["context_switches"]))
# this is a little complicated with hyperthreading
# We don't want to count total cycles for each logical thread, only physical core, so devide out the threads per core
# We assume that the busy cycles don't overlap, and can be added. This is an estimate and could result in a value greater than total_cycles, so fix that up!
per_core_total_cycles = d["total_cycles"] / threads_per_core
if (d["busy_cycles"]/2) > per_core_total_cycles:
per_core_busy_cycles = per_core_total_cycles
else:
per_core_busy_cycles = d["busy_cycles"]/2
per_core_idle_cycles = per_core_total_cycles - per_core_busy_cycles
root.find(".//*[@id='system.core" + str(core_id) + "']/stat[@name='total_cycles']").set ('value',str(bin_size * TSC_FREQUENCY))
# CORE STATS
rob_reads = d["uops_dispatched"]
rob_writes = d["uops_retired"]
rename_reads = 2 * int_estimate
rename_writes = int_estimate
fp_rename_reads = 2 * d["fp_uops_executed"]
fp_rename_writes = d["fp_uops_executed"]
inst_window_reads = int_estimate + d["branches_executed"]
inst_window_writes = int_estimate + d["branches_executed"]
inst_window_wakeup_accesses = int_estimate + d["branches_executed"]
fp_inst_window_reads = d["fp_uops_executed"]
fp_inst_window_writes = d["fp_uops_executed"]
fp_inst_window_wakeup_accesses = d["fp_uops_executed"]
int_regfile_reads = 2 * int_estimate
float_regfile_reads = 2 * d["fp_uops_executed"]
int_regfile_writes = int_estimate
float_regfile_writes = d["fp_uops_executed"]
ialu_accesses = int_estimate
fpu_accesses = d["fp_uops_executed"]
mul_accesses = 0.05 * int_estimate
cdb_alu_accesses = int_estimate
cdb_mul_accesses = 0.05 * int_estimate
cdb_fpu_accesses = d["fp_uops_executed"]
pipe_d = d["uops_dispatched"] / float(d["total_cycles"] )
IFU_d = d["uops_dispatched"] / float(d["total_cycles"] )
LSU_d = (d["dcache_reads"] + d["dcache_writes"]) / float(d["total_cycles"] )
MemManU_I_d = d["uops_dispatched"] / float(d["total_cycles"] )
MemManU_D_d = ( d["dcache_reads"] + d["dcache_writes"] ) / float(d["total_cycles"] )
ALU_d = int_estimate / float(d["total_cycles"] )
MUL_d = 0.3
FPU_d = ( d["fp_uops_executed"] * 20 ) / float(d["total_cycles"] )
ALU_cdb = int_estimate / float(d["total_cycles"] )
MUL_cdb = 0.3
FPU_cdb = ( d["fp_uops_executed"] ) / float(d["total_cycles"] )
pipeline_duty_cycle = 1.0 if pipe_d > 1.0 else pipe_d
IFU_duty_cycle = 1.0 if IFU_d > 1.0 else IFU_d
LSU_duty_cycle = 1.0 if LSU_d > 1.0 else LSU_d
MemManU_I_duty_cycle = 1.0 if MemManU_I_d > 1.0 else MemManU_I_d
MemManU_D_duty_cycle = 1.0 if MemManU_D_d > 1.0 else MemManU_D_d
ALU_duty_cycle = 1.0 if ALU_d > 1.0 else ALU_d
MUL_duty_cycle = 1.0 if MUL_d > 1.0 else MUL_d
FPU_duty_cycle = 1.0 if FPU_d > 1.0 else FPU_d
ALU_cdb_duty_cycle = 1.0 if ALU_cdb > 1.0 else ALU_cdb
MUL_cdb_duty_cycle = 1.0 if MUL_cdb > 1.0 else MUL_cdb
FPU_cdb_duty_cycle = 1.0 if FPU_cdb > 1.0 else FPU_cdb
# mcpat requires the duty cycles for max dynamic power AND for regular dynamic power
root.find(".//*[@id='system.core" + str(core_id) + "']/stat[@name='pipeline_duty_cycle']").set ('value',str(pipeline_duty_cycle))
root.find(".//*[@id='system.core" + str(core_id) + "']/stat[@name='IFU_duty_cycle']").set ('value',str(IFU_duty_cycle))
root.find(".//*[@id='system.core" + str(core_id) + "']/stat[@name='LSU_duty_cycle']").set ('value',str(LSU_duty_cycle))
root.find(".//*[@id='system.core" + str(core_id) + "']/stat[@name='MemManU_I_duty_cycle']").set ('value',str(MemManU_I_duty_cycle))
root.find(".//*[@id='system.core" + str(core_id) + "']/stat[@name='MemManU_D_duty_cycle']").set ('value',str(MemManU_D_duty_cycle))
root.find(".//*[@id='system.core" + str(core_id) + "']/stat[@name='ALU_duty_cycle']").set ('value',str(ALU_duty_cycle))
root.find(".//*[@id='system.core" + str(core_id) + "']/stat[@name='MUL_duty_cycle']").set ('value',str(MUL_duty_cycle))
root.find(".//*[@id='system.core" + str(core_id) + "']/stat[@name='FPU_duty_cycle']").set ('value',str(FPU_duty_cycle))
root.find(".//*[@id='system.core" + str(core_id) + "']/stat[@name='ALU_cdb_duty_cycle']").set ('value',str(ALU_cdb_duty_cycle))
root.find(".//*[@id='system.core" + str(core_id) + "']/stat[@name='MUL_cdb_duty_cycle']").set ('value',str(MUL_cdb_duty_cycle))
root.find(".//*[@id='system.core" + str(core_id) + "']/stat[@name='FPU_cdb_duty_cycle']").set ('value',str(FPU_cdb_duty_cycle))
root.find(".//*[@id='system.core" + str(core_id) + "']/stat[@name='ROB_reads']").set ('value',str(int(rob_reads)))
root.find(".//*[@id='system.core" + str(core_id) + "']/stat[@name='ROB_writes']").set ('value',str(int(rob_writes)))
root.find(".//*[@id='system.core" + str(core_id) + "']/stat[@name='rename_reads']").set ('value',str(int(rename_reads)))
root.find(".//*[@id='system.core" + str(core_id) + "']/stat[@name='rename_writes']").set ('value',str(int(rename_writes)))
root.find(".//*[@id='system.core" + str(core_id) + "']/stat[@name='fp_rename_reads']").set ('value',str(int(fp_rename_reads)))
root.find(".//*[@id='system.core" + str(core_id) + "']/stat[@name='fp_rename_writes']").set ('value',str(int(fp_rename_writes)))
root.find(".//*[@id='system.core" + str(core_id) + "']/stat[@name='inst_window_reads']").set ('value',str(int(inst_window_reads)))
root.find(".//*[@id='system.core" + str(core_id) + "']/stat[@name='inst_window_writes']").set ('value',str(int(inst_window_writes)))
root.find(".//*[@id='system.core" + str(core_id) + "']/stat[@name='inst_window_wakeup_accesses']").set ('value',str(int(inst_window_wakeup_accesses)))
root.find(".//*[@id='system.core" + str(core_id) + "']/stat[@name='fp_inst_window_reads']").set ('value',str(int(fp_inst_window_reads)))
root.find(".//*[@id='system.core" + str(core_id) + "']/stat[@name='fp_inst_window_writes']").set ('value',str(int(fp_inst_window_writes)))
root.find(".//*[@id='system.core" + str(core_id) + "']/stat[@name='fp_inst_window_wakeup_accesses']").set ('value',str(int(fp_inst_window_wakeup_accesses)))
root.find(".//*[@id='system.core" + str(core_id) + "']/stat[@name='int_regfile_reads']").set ('value',str(int(int_regfile_reads)))
root.find(".//*[@id='system.core" + str(core_id) + "']/stat[@name='float_regfile_reads']").set ('value',str(int(float_regfile_reads)))
root.find(".//*[@id='system.core" + str(core_id) + "']/stat[@name='int_regfile_writes']").set ('value',str(int(int_regfile_writes)))
root.find(".//*[@id='system.core" + str(core_id) + "']/stat[@name='float_regfile_writes']").set ('value',str(int(float_regfile_writes)))
root.find(".//*[@id='system.core" + str(core_id) + "']/stat[@name='ialu_accesses']").set ('value',str(int(ialu_accesses)))
root.find(".//*[@id='system.core" + str(core_id) + "']/stat[@name='fpu_accesses']").set ('value',str(int(fpu_accesses)))
root.find(".//*[@id='system.core" + str(core_id) + "']/stat[@name='mul_accesses']").set ('value',str(int(mul_accesses)))
root.find(".//*[@id='system.core" + str(core_id) + "']/stat[@name='cdb_alu_accesses']").set ('value',str(int(cdb_alu_accesses)))
root.find(".//*[@id='system.core" + str(core_id) + "']/stat[@name='cdb_mul_accesses']").set ('value',str(int(cdb_mul_accesses)))
root.find(".//*[@id='system.core" + str(core_id) + "']/stat[@name='cdb_fpu_accesses']").set ('value',str(int(cdb_fpu_accesses)))
# haswell doesnt keep track of icache reads, so assume that we have a read every 3 instructions
icache_reads = int(d["instructions"]) / 3
dcache_accesses = int(d["dcache_reads"]) + int(d["dcache_writes"])
#itlb stats
root.find(".//*[@id='system.core" + str(core_id) + ".itlb']/stat[@name='total_accesses']").set ('value',str(icache_reads))
root.find(".//*[@id='system.core" + str(core_id) + ".itlb']/stat[@name='total_misses']").set ('value',str(int(d["itlb_misses"])))
#dtlb stats
root.find(".//*[@id='system.core" + str(core_id) + ".dtlb']/stat[@name='total_accesses']").set ('value',str(dcache_accesses))
root.find(".//*[@id='system.core" + str(core_id) + ".dtlb']/stat[@name='total_misses']").set ('value',str(int(d["dtlb_misses"])))
#icache stats
root.find(".//*[@id='system.core" + str(core_id) + ".icache']/stat[@name='read_accesses']").set ('value',str(icache_reads))
root.find(".//*[@id='system.core" + str(core_id) + ".icache']/stat[@name='read_misses']").set ('value',str(int(d["icache_misses"])))
#dcache stats
root.find(".//*[@id='system.core" + str(core_id) + ".dcache']/stat[@name='read_accesses']").set ('value',str(int(d["dcache_reads"])))
root.find(".//*[@id='system.core" + str(core_id) + ".dcache']/stat[@name='read_misses']").set ('value',str(int(d["dcache_read_misses"])))
root.find(".//*[@id='system.core" + str(core_id) + ".dcache']/stat[@name='write_accesses']").set ('value',str(int(d["dcache_writes"])))
root.find(".//*[@id='system.core" + str(core_id) + ".dcache']/stat[@name='write_misses']").set ('value',str(int(d["dcache_write_misses"])))
#l2 stats
# lets say 1/4 are writes and 3/4 are reads
l2_reads = int(d["l2_accesses"]) * 0.75
l2_writes = int(d["l2_accesses"]) * 0.25
l2_write_misses = int(d["l2_misses"]) * 0.25
l2_read_misses = int(d["l2_misses"]) * 0.75
root.find(".//*[@id='system.L2" + str(core_id) + "']/stat[@name='read_accesses']").set ('value',str(l2_reads))
root.find(".//*[@id='system.L2" + str(core_id) + "']/stat[@name='read_misses']").set ('value',str(l2_read_misses))
root.find(".//*[@id='system.L2" + str(core_id) + "']/stat[@name='write_accesses']").set ('value',str(l2_writes))
root.find(".//*[@id='system.L2" + str(core_id) + "']/stat[@name='write_misses']").set ('value',str(l2_write_misses))
core_id +=1
tree.write(output_dir + "/config_" + str(file_num) + ".xml")
file_num = file_num +1
print "********** McPAT input File Generation Complete **********"
# Run in standalone script mode
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Calculates DRAM Energy/Power")
parser.add_argument("input_file", help="dram configuraiton file (in csv format)")
parser.add_argument("output_dir", help="read MB")
parser.add_argument("input_proc_model", help="write MB")
parser.add_argument("microarch", help="elapsed time in seconds")
parser.add_argument("bin_size", help="elapsed time in seconds",type=float)
parser.add_argument("NUM_CORES", help="elapsed time in seconds",type=int)
args = parser.parse_args()
generate_mcpat(args.input_file, args.output_dir, args.input_proc_model, args.microarch, args.bin_size, args.NUM_CORES, 0)
|
|
from ..sequential import Sequential
from ..graph import Graph
from ..node import StartNode, HiddenNode, EndNode
from . import Conv2D, MaxPooling, RELU, ELU, BatchNormalization, Sum, \
Concat, AvgPooling, Conv2D_Transpose, Dropout, BaseModel
from ..utils import same_nd, valid_nd, devalid_nd, desame_nd
import numpy as np
class VGG16(BaseModel):
@BaseModel.init_name_scope
def __init__(self):
'''
Reference:
Very Deep Convolutional Networks for Large-Scale Image Recognition
(https://arxiv.org/abs/1409.1556)
'''
filters = [64, 128, 256, 512]
layers = []
# block 1
layers.append(Conv2D(num_filters=filters[0], kernel_size=(3,3), stride=(1,1), padding='SAME'))
layers.append(BatchNormalization())
layers.append(RELU())
layers.append(Conv2D(num_filters=filters[0], kernel_size=(3,3), stride=(1,1), padding='SAME'))
layers.append(BatchNormalization())
layers.append(RELU())
layers.append(MaxPooling(poolsize=(2,2), stride=(2,2), padding='VALID'))
# block 2
layers.append(Conv2D(num_filters=filters[1], kernel_size=(3,3), stride=(1,1), padding='SAME'))
layers.append(BatchNormalization())
layers.append(RELU())
layers.append(Conv2D(num_filters=filters[1], kernel_size=(3,3), stride=(1,1), padding='SAME'))
layers.append(BatchNormalization())
layers.append(RELU())
layers.append(MaxPooling(poolsize=(2,2), stride=(2,2), padding='VALID'))
# block 3
layers.append(Conv2D(num_filters=filters[2], kernel_size=(3,3), stride=(1,1), padding='SAME'))
layers.append(BatchNormalization())
layers.append(RELU())
layers.append(Conv2D(num_filters=filters[2], kernel_size=(3,3), stride=(1,1), padding='SAME'))
layers.append(BatchNormalization())
layers.append(RELU())
layers.append(Conv2D(num_filters=filters[2], kernel_size=(3,3), stride=(1,1), padding='SAME'))
layers.append(BatchNormalization())
layers.append(RELU())
layers.append(MaxPooling(poolsize=(2,2), stride=(2,2), padding='VALID'))
# block 4
layers.append(Conv2D(num_filters=filters[3], kernel_size=(3,3), stride=(1,1), padding='SAME'))
layers.append(BatchNormalization())
layers.append(RELU())
layers.append(Conv2D(num_filters=filters[3], kernel_size=(3,3), stride=(1,1), padding='SAME'))
layers.append(BatchNormalization())
layers.append(RELU())
layers.append(Conv2D(num_filters=filters[3], kernel_size=(3,3), stride=(1,1), padding='SAME'))
layers.append(BatchNormalization())
layers.append(RELU())
layers.append(MaxPooling(poolsize=(2,2), stride=(2,2), padding='VALID'))
# block 5
layers.append(Conv2D(num_filters=filters[3], kernel_size=(3,3), stride=(1,1), padding='SAME'))
layers.append(BatchNormalization())
layers.append(RELU())
layers.append(Conv2D(num_filters=filters[3], kernel_size=(3,3), stride=(1,1), padding='SAME'))
layers.append(BatchNormalization())
layers.append(RELU())
layers.append(Conv2D(num_filters=filters[3], kernel_size=(3,3), stride=(1,1), padding='SAME'))
layers.append(BatchNormalization())
layers.append(RELU())
layers.append(MaxPooling(poolsize=(2,2), stride=(2,2), padding='VALID'))
self.startnode = StartNode(input_vars=[None])
out_hn = HiddenNode(prev=[self.startnode], layers=layers)
self.endnode = EndNode(prev=[out_hn])
class VGG19(BaseModel):
@BaseModel.init_name_scope
def __init__(self):
'''
Reference:
Very Deep Convolutional Networks for Large-Scale Image Recognition
(https://arxiv.org/abs/1409.1556)
'''
filters = [64, 128, 256, 512]
layers = []
# block 1
layers.append(Conv2D(num_filters=filters[0], kernel_size=(3,3), stride=(1,1), padding='SAME'))
layers.append(BatchNormalization())
layers.append(RELU())
layers.append(Conv2D(num_filters=filters[0], kernel_size=(3,3), stride=(1,1), padding='SAME'))
layers.append(BatchNormalization())
layers.append(RELU())
layers.append(MaxPooling(poolsize=(2,2), stride=(2,2), padding='VALID'))
# block 2
layers.append(Conv2D(num_filters=filters[1], kernel_size=(3,3), stride=(1,1), padding='SAME'))
layers.append(BatchNormalization())
layers.append(RELU())
layers.append(Conv2D(num_filters=filters[1], kernel_size=(3,3), stride=(1,1), padding='SAME'))
layers.append(BatchNormalization())
layers.append(RELU())
layers.append(MaxPooling(poolsize=(2,2), stride=(2,2), padding='VALID'))
# block 3
layers.append(Conv2D(num_filters=filters[2], kernel_size=(3,3), stride=(1,1), padding='SAME'))
layers.append(BatchNormalization())
layers.append(RELU())
layers.append(Conv2D(num_filters=filters[2], kernel_size=(3,3), stride=(1,1), padding='SAME'))
layers.append(BatchNormalization())
layers.append(RELU())
layers.append(Conv2D(num_filters=filters[2], kernel_size=(3,3), stride=(1,1), padding='SAME'))
layers.append(BatchNormalization())
layers.append(RELU())
layers.append(Conv2D(num_filters=filters[2], kernel_size=(3,3), stride=(1,1), padding='SAME'))
layers.append(BatchNormalization())
layers.append(RELU())
layers.append(MaxPooling(poolsize=(2,2), stride=(2,2), padding='VALID'))
# block 4
layers.append(Conv2D(num_filters=filters[3], kernel_size=(3,3), stride=(1,1), padding='SAME'))
layers.append(BatchNormalization())
layers.append(RELU())
layers.append(Conv2D(num_filters=filters[3], kernel_size=(3,3), stride=(1,1), padding='SAME'))
layers.append(BatchNormalization())
layers.append(RELU())
layers.append(Conv2D(num_filters=filters[3], kernel_size=(3,3), stride=(1,1), padding='SAME'))
layers.append(BatchNormalization())
layers.append(RELU())
layers.append(Conv2D(num_filters=filters[3], kernel_size=(3,3), stride=(1,1), padding='SAME'))
layers.append(BatchNormalization())
layers.append(RELU())
layers.append(MaxPooling(poolsize=(2,2), stride=(2,2), padding='VALID'))
# block 5
layers.append(Conv2D(num_filters=filters[3], kernel_size=(3,3), stride=(1,1), padding='SAME'))
layers.append(BatchNormalization())
layers.append(RELU())
layers.append(Conv2D(num_filters=filters[3], kernel_size=(3,3), stride=(1,1), padding='SAME'))
layers.append(BatchNormalization())
layers.append(RELU())
layers.append(Conv2D(num_filters=filters[3], kernel_size=(3,3), stride=(1,1), padding='SAME'))
layers.append(BatchNormalization())
layers.append(RELU())
layers.append(Conv2D(num_filters=filters[3], kernel_size=(3,3), stride=(1,1), padding='SAME'))
layers.append(BatchNormalization())
layers.append(RELU())
layers.append(MaxPooling(poolsize=(2,2), stride=(2,2), padding='VALID'))
self.startnode = StartNode(input_vars=[None])
out_hn = HiddenNode(prev=[self.startnode], layers=layers)
self.endnode = EndNode(prev=[out_hn])
class ResNetBase(BaseModel):
@BaseModel.init_name_scope
def __init__(self, config):
'''
Reference:
Deep Residual Learning for Image Recognition (https://arxiv.org/abs/1512.03385)
Args:
config (list of ints): a list of 4 number of layers for each identity block
'''
layers = []
layers.append(Conv2D(num_filters=64, kernel_size=(7,7), stride=(2,2), padding='SAME'))
layers.append(BatchNormalization())
layers.append(RELU())
layers.append(MaxPooling(poolsize=(3,3), stride=(2,2), padding='VALID'))
layers.append(ShortCutBlock(filters=[64, 64, 256], kernel_size=(3,3), stride=(1,1)))
layers.append(IdentityBlock(filters=[64, 64], nlayers=config[0]))
layers.append(ShortCutBlock(filters=[128, 128, 512], kernel_size=(3,3), stride=(2,2)))
layers.append(IdentityBlock(filters=[128, 128], nlayers=config[1]))
layers.append(ShortCutBlock(filters=[256, 256, 1024], kernel_size=(3,3), stride=(2,2)))
layers.append(IdentityBlock(filters=[256, 256], nlayers=config[2]))
layers.append(ShortCutBlock(filters=[512, 512, 2048], kernel_size=(3,3), stride=(2,2)))
layers.append(IdentityBlock(filters=[512, 512], nlayers=config[3]))
self.startnode = StartNode(input_vars=[None])
out_hn = HiddenNode(prev=[self.startnode], layers=layers)
self.endnode = EndNode(prev=[out_hn])
class ResNetSmall(ResNetBase):
@ResNetBase.init_name_scope
def __init__(self, config):
'''
Reference:
Deep Residual Learning for Image Recognition (https://arxiv.org/abs/1512.03385)
Args:
config (list of ints): a list of 2 number of layers for each identity block
'''
layers = []
layers.append(Conv2D(num_filters=64, kernel_size=(7,7), stride=(2,2), padding='SAME'))
layers.append(BatchNormalization())
layers.append(RELU())
layers.append(MaxPooling(poolsize=(3,3), stride=(2,2), padding='VALID'))
layers.append(ShortCutBlock(filters=[64, 64, 128], kernel_size=(3,3), stride=(1,1)))
layers.append(IdentityBlock(filters=[64, 64], nlayers=config[0]))
layers.append(ShortCutBlock(filters=[128, 128, 128], kernel_size=(3,3), stride=(2,2)))
layers.append(IdentityBlock(filters=[128, 128], nlayers=config[1]))
self.startnode = StartNode(input_vars=[None])
out_hn = HiddenNode(prev=[self.startnode], layers=layers)
self.endnode = EndNode(prev=[out_hn])
class ResNet50(ResNetBase):
def __init__(self, config=[2,3,5,2]):
super(ResNet50, self).__init__(config)
class ResNet101(ResNetBase):
def __init__(self, config=[2,3,22,2]):
super(ResNet101, self).__init__(config)
class ResNet152(ResNetBase):
def __init__(self, config=[2,7,35,2]):
super(ResNet101, self).__init__(config)
class ShortCutBlock(BaseModel):
@BaseModel.init_name_scope
def __init__(self, filters, kernel_size=(3,3), stride=(1,1)):
'''
Reference:
The shortcut block in Deep Residual Learning for Image Recognition (https://arxiv.org/abs/1512.03385)
Args:
filters (list of 3 ints): number of filters in different CNN layers
kernel_size (tuple of 2 ints)
stride (tuple of 2 ints)
'''
assert isinstance(filters, (list, tuple)) and len(filters) == 3
f1, f2, f3 = filters
layers = []
layers.append(Conv2D(num_filters=f1, kernel_size=(1,1), stride=stride, padding='VALID'))
layers.append(BatchNormalization())
layers.append(RELU())
layers.append(Conv2D(num_filters=f2, kernel_size=kernel_size, stride=(1,1), padding='SAME'))
layers.append(BatchNormalization())
layers.append(RELU())
layers.append(Conv2D(num_filters=f3, kernel_size=(1,1), stride=(1,1), padding='VALID'))
layers.append(BatchNormalization())
layers.append(RELU())
shortcuts = []
shortcuts.append(Conv2D(num_filters=f3, kernel_size=(1,1), stride=stride, padding='VALID'))
shortcuts.append(BatchNormalization())
shortcuts.append(RELU())
self.startnode = StartNode(input_vars=[None])
conv_hn = HiddenNode(prev=[self.startnode], layers=layers)
shortcuts_hn = HiddenNode(prev=[self.startnode], layers=shortcuts)
out_hn = HiddenNode(prev=[conv_hn, shortcuts_hn], input_merge_mode=Sum())
self.endnode = EndNode(prev=[out_hn])
self.output_channels = f3
class IdentityBlock(BaseModel):
@BaseModel.init_name_scope
def __init__(self, nlayers=2, filters=[32, 64]):
'''
Description:
one identity block of a resnet in the paper Deep Residual Learning
for Image Recognition (https://arxiv.org/abs/1512.03385)
Args:
nlayers (int): number recurrent cycles within one identity block
filters (list of 2 ints): number of filters within one identity block
'''
assert isinstance(filters, (list, tuple)) and len(filters) == 2
self.filters = filters
self.nlayers = nlayers
@BaseModel.init_name_scope
def __init_var__(self, state_below):
b, h, w, c = state_below.shape
c = int(c)
f1, f2 = self.filters
def identity_layer(in_hn):
layers = []
layers.append(Conv2D(num_filters=f1, kernel_size=(1,1), stride=(1,1), padding='VALID'))
layers.append(BatchNormalization())
layers.append(RELU())
layers.append(Conv2D(num_filters=f2, kernel_size=(3,3), stride=(1,1), padding='SAME'))
layers.append(BatchNormalization())
layers.append(RELU())
layers.append(Conv2D(num_filters=c, kernel_size=(1,1), stride=(1,1), padding='VALID'))
layers.append(BatchNormalization())
layers.append(RELU())
out_hn = HiddenNode(prev=[in_hn], layers=layers)
return out_hn
self.startnode = in_hn = StartNode(input_vars=[None])
for _ in range(self.nlayers):
out_hn = identity_layer(in_hn)
in_hn = HiddenNode(prev=[out_hn, in_hn], input_merge_mode=Sum())
self.endnode = EndNode(prev=[in_hn])
class DenseBlock(BaseModel):
@BaseModel.init_name_scope
def __init__(self, growth_rate, nlayers):
'''
Description:
one dense block from the densely connected CNN (Densely Connected
Convolutional Networks https://arxiv.org/abs/1608.06993)
Args:
growth_rate (int): number of filters to grow inside one denseblock
nlayers (int): number of layers in one block, one layer refers to
one group of batchnorm, relu and conv2d
'''
def _conv_layer(in_hn):
layers = []
layers.append(Conv2D(num_filters=growth_rate, kernel_size=(3,3), stride=(1,1), padding='SAME'))
layers.append(BatchNormalization())
layers.append(RELU())
out_hn = HiddenNode(prev=[in_hn], layers=layers)
out_hn = HiddenNode(prev=[in_hn, out_hn],
input_merge_mode=Concat(axis=-1))
return out_hn
self.startnode = in_hn = StartNode(input_vars=[None])
for _ in range(nlayers):
in_hn = _conv_layer(in_hn)
self.endnode = EndNode(prev=[in_hn])
class TransitionLayer(BaseModel):
@BaseModel.init_name_scope
def __init__(self, num_filters):
'''
Description:
The transition layer of densenet (Densely Connected Convolutional Networks https://arxiv.org/abs/1608.06993)
'''
layers = []
layers.append(Conv2D(num_filters=num_filters, kernel_size=(1,1), stride=(1,1), padding='SAME'))
layers.append(BatchNormalization())
layers.append(RELU())
layers.append(AvgPooling(poolsize=(2,2), stride=(2,2), padding='VALID'))
self.startnode = StartNode(input_vars=[None])
out_hn = HiddenNode(prev=[self.startnode], layers=layers)
self.endnode = EndNode(prev=[out_hn])
class DenseNet(BaseModel):
@BaseModel.init_name_scope
def __init__(self, ndense=3, growth_rate=12, nlayer1blk=12):
'''
Reference:
Densely Connected Convolutional Networks (https://arxiv.org/abs/1608.06993)
Args:
ndense (int): number of dense blocks
nlayer1blk (int): number of layers in one block, one layer refers to
one group of batchnorm, relu and conv2d
'''
layers = []
layers.append(Conv2D(num_filters=16, kernel_size=(3,3), stride=(1,1), padding='SAME'))
layers.append(BatchNormalization())
layers.append(RELU())
layers.append(DenseBlock(growth_rate=growth_rate, nlayers=nlayer1blk))
layers.append(TransitionLayer(num_filters=16))
for _ in range(ndense-1):
layers.append(DenseBlock(growth_rate=growth_rate, nlayers=nlayer1blk))
layers.append(TransitionLayer(num_filters=16))
layers.append(DenseBlock(growth_rate=growth_rate, nlayers=nlayer1blk))
# layers.append(AvgPooling(poolsize=dense.output_shape, stride=(1,1), padding='VALID'))
self.startnode = StartNode(input_vars=[None])
model_hn = HiddenNode(prev=[self.startnode], layers=layers)
self.endnode = EndNode(prev=[model_hn])
# TODO
# class FeaturePyramidNetwork(BaseModel):
# '''
# reference: Feature Pyramid Networks for Object Detection (https://arxiv.org/abs/1612.03144)
# '''
# pass
# TODO
# class PyramidPoolingModule(BaseModel):
# '''reference: Pyramid Scene Parsing Network (https://arxiv.org/abs/1612.01105)
# '''
# pass
class UNet(BaseModel):
@BaseModel.init_name_scope
def __init__(self, input_shape):
def _encode_block(in_hn, shape, out_ch):
blk = []
blk.append(MaxPooling(poolsize=(2,2), stride=(2,2), padding='VALID'))
shape = valid_nd(shape, kernel_size=(2,2), stride=(2,2))
blk.append(Conv2D(num_filters=out_ch, kernel_size=(3,3), stride=(1,1), padding='SAME'))
blk.append(ELU())
shape = same_nd(shape, kernel_size=(3,3), stride=(1,1))
blk.append(BatchNormalization())
blk.append(Conv2D(num_filters=out_ch, kernel_size=(3,3), stride=(1,1), padding='SAME'))
blk.append(ELU())
shape = same_nd(shape, kernel_size=(3,3), stride=(1,1))
blk.append(BatchNormalization())
out_hn = HiddenNode(prev=[in_hn], layers=blk)
return out_hn, shape
def _merge_decode_block(deblk_hn, blk_hn, out_ch):
blk = []
blk.append(Conv2D(num_filters=out_ch, kernel_size=(3,3), stride=(1,1), padding='SAME'))
blk.append(ELU())
blk.append(BatchNormalization())
blk.append(Conv2D(num_filters=out_ch, kernel_size=(3,3), stride=(1,1), padding='SAME'))
blk.append(ELU())
blk.append(BatchNormalization())
blk.append(Conv2D_Transpose(num_filters=out_ch,
kernel_size=(2,2), stride=(2,2), padding='SAME'))
blk.append(ELU())
blk.append(BatchNormalization())
out_hn = HiddenNode(prev=[deblk_hn, blk_hn],
input_merge_mode=Concat(axis=-1),
layers=blk)
return out_hn
# encoding
blk1 = []
blk1.append(Conv2D(num_filters=64, kernel_size=(3,3), stride=(1,1), padding='SAME'))
blk1.append(ELU())
shape = same_nd(input_shape, kernel_size=(3,3), stride=(1,1))
blk1.append(BatchNormalization())
blk1.append(Conv2D(num_filters=64, kernel_size=(3,3), stride=(1,1), padding='SAME'))
blk1.append(ELU())
b1_shape = same_nd(shape, kernel_size=(3,3), stride=(1,1))
blk1.append(BatchNormalization())
self.startnode = StartNode(input_vars=[None])
blk1_hn = HiddenNode(prev=[self.startnode], layers=blk1)
blk2_hn, b2_shape = _encode_block(blk1_hn, b1_shape, 128)
blk3_hn, b3_shape = _encode_block(blk2_hn, b2_shape, 256)
blk4_hn, b4_shape = _encode_block(blk3_hn, b3_shape, 512)
# downsampling + conv
deblk4 = []
deblk4.append(MaxPooling(poolsize=(2,2), stride=(2,2), padding='VALID'))
shape = valid_nd(b4_shape, kernel_size=(2,2), stride=(2,2))
deblk4.append(Conv2D(num_filters=1024, kernel_size=(3,3), stride=(1,1), padding='SAME'))
deblk4.append(ELU())
shape = same_nd(shape, kernel_size=(3,3), stride=(1,1))
deblk4.append(BatchNormalization())
deblk4.append(Conv2D(num_filters=1024, kernel_size=(3,3), stride=(1,1), padding='SAME'))
deblk4.append(ELU())
out_shape = same_nd(shape, kernel_size=(3,3), stride=(1,1))
deblk4.append(BatchNormalization())
# deconvolve
deblk4.append(Conv2D_Transpose(num_filters=1024,
kernel_size=(2,2), stride=(2,2), padding='SAME'))
deblk4.append(ELU())
deblk4.append(BatchNormalization())
# decode and merge
deblk4_hn = HiddenNode(prev=[blk4_hn], layers=deblk4)
deblk3_hn = _merge_decode_block(deblk4_hn, blk4_hn, out_ch=256)
deblk2_hn = _merge_decode_block(deblk3_hn, blk3_hn, out_ch=128)
deblk1_hn = _merge_decode_block(deblk2_hn, blk2_hn, out_ch=64)
# reduce channels
blk = []
blk.append(Conv2D(num_filters=32, kernel_size=(3,3), stride=(1,1), padding='SAME'))
blk.append(ELU())
blk.append(BatchNormalization())
blk.append(Conv2D(num_filters=16, kernel_size=(3,3), stride=(1,1), padding='SAME'))
blk.append(ELU())
blk.append(BatchNormalization())
deblk0_hn = HiddenNode(prev=[deblk1_hn, blk1_hn],
input_merge_mode=Concat(axis=-1),
layers=blk)
self.endnode = EndNode(prev=[deblk0_hn])
|
|
"""Add the effects of instrumental noise into the simulation.
This is separated out into multiple tasks. The first, :class:`ReceiverTemperature`
adds in the effects of instrumental noise bias into the data. The second,
:class:`SampleNoise`, takes a timestream which is assumed to be the expected (or
average) value and returns an observed time stream. The :class: `GaussianNoise`
adds in the effects of a Gaussian distributed noise into visibility data.
The :class: `GaussianNoiseDataset` replaces visibility data with Gaussian distributed noise,
using the variance of the noise estimate in the existing data.
"""
import numpy as np
from caput import config
from caput.time import STELLAR_S
from ..core import task, containers, io
from ..util import tools, random
class ReceiverTemperature(task.SingleTask):
"""Add a basic receiver temperature term into the data.
This class adds in an uncorrelated, frequency and time independent receiver
noise temperature to the data. As it is uncorrelated this will only affect
the auto-correlations. Note this only adds in the offset to the visibility,
to add the corresponding random fluctuations to subsequently use the
:class:`SampleNoise` task.
Attributes
----------
recv_temp : float
The receiver temperature in Kelvin.
"""
recv_temp = config.Property(proptype=float, default=0.0)
def process(self, data):
# Iterate over the products to find the auto-correlations and add the noise into them
for pi, prod in enumerate(data.prodstack):
# Great an auto!
if prod[0] == prod[1]:
data.vis[:, pi] += self.recv_temp
return data
class GaussianNoiseDataset(task.SingleTask, random.RandomTask):
"""Generates a Gaussian distributed noise dataset using the
the noise estimates of an existing dataset.
Attributes
----------
dataset : string
The dataset to fill with gaussian noise. If set to 'vis', will ensure
autos are real. If not set, will look for a default dataset in a list
of known containers.
"""
dataset = config.Property(proptype=str, default=None)
def process(self, data):
"""Generates a Gaussian distributed noise dataset,
given the provided dataset's visibility weights
Parameters
----------
data : :class:`VisContainer`
Any dataset which contains a vis and weight attribute.
Note the modification is done in place.
Returns
-------
data_noise : same as parameter `data`
The previous dataset with the visibility replaced with
a Gaussian distributed noise realisation.
"""
_default_dataset = {
containers.TimeStream: "vis",
containers.SiderealStream: "vis",
containers.HybridVisMModes: "vis",
containers.RingMap: "map",
containers.GridBeam: "beam",
containers.TrackBeam: "beam",
}
if self.dataset is None:
for cls, dataset in _default_dataset.items():
if isinstance(data, cls):
dataset_name = dataset
break
else:
raise ValueError(
f"No default dataset known for {type(data)} container."
)
else:
dataset_name = self.dataset
if not dataset_name in data:
raise config.CaputConfigError(
f"Dataset '{dataset_name}' does not exist in container {type(data)}."
)
# Distribute in something other than `stack`
data.redistribute("freq")
# Replace visibilities with noise
dset = data[dataset_name][:]
if np.iscomplexobj(dset):
random.complex_normal(
scale=tools.invert_no_zero(data.weight[:]) ** 0.5,
out=dset,
rng=self.rng,
)
else:
self.rng.standard_normal(out=dset)
dset *= tools.invert_no_zero(data.weight[:]) ** 0.5
# We need to loop to ensure the autos are real and have the correct variance
if dataset_name == "vis":
for si, prod in enumerate(data.prodstack):
if prod[0] == prod[1]:
# This is an auto-correlation
dset[:, si].real *= 2**0.5
dset[:, si].imag = 0.0
return data
class GaussianNoise(task.SingleTask, random.RandomTask):
"""Add Gaussian distributed noise to a visibility dataset.
Note that this is an approximation to the actual noise distribution good only
when T_recv >> T_sky and delta_time * delta_freq >> 1.
Attributes
----------
ndays : float
Multiplies the number of samples in each measurement.
set_weights : bool
Set the weights to the appropriate values.
add_noise : bool
Add Gaussian noise to the visibilities. By default this is True, but it may be
desirable to only set the weights.
recv_temp : bool
The temperature of the noise to add.
"""
recv_temp = config.Property(proptype=float, default=50.0)
ndays = config.Property(proptype=float, default=733.0)
set_weights = config.Property(proptype=bool, default=True)
add_noise = config.Property(proptype=bool, default=True)
def setup(self, manager=None):
"""Set the telescope instance if a manager object is given.
This is used to simulate noise for visibilities that are stacked
over redundant baselines.
Parameters
----------
manager : manager.ProductManager, optional
The telescope/manager used to set the `redundancy`. If not set,
`redundancy` is derived from the data.
"""
if manager is not None:
self.telescope = io.get_telescope(manager)
else:
self.telescope = None
def process(self, data):
"""Generate a noisy dataset.
Parameters
----------
data : :class:`containers.SiderealStream` or :class:`containers.TimeStream`
The expected (i.e. noiseless) visibility dataset. Note the modification
is done in place.
Returns
-------
data_noise : same as parameter `data`
The sampled (i.e. noisy) visibility dataset.
"""
data.redistribute("freq")
visdata = data.vis[:]
# Get the time interval
if isinstance(data, containers.SiderealStream):
dt = 240 * (data.ra[1] - data.ra[0]) * STELLAR_S
ntime = len(data.ra)
else:
dt = data.time[1] - data.time[0]
ntime = len(data.time)
# TODO: this assumes uniform channels
df = data.index_map["freq"]["width"][0] * 1e6
nfreq = data.vis.local_shape[0]
nprod = len(data.prodstack)
ninput = len(data.index_map["input"])
# Consider if this data is stacked over redundant baselines or not.
if (self.telescope is not None) and (nprod == self.telescope.nbase):
redundancy = self.telescope.redundancy
elif nprod == ninput * (ninput + 1) / 2:
redundancy = np.ones(nprod)
else:
raise ValueError("Unexpected number of products")
# Calculate the number of samples, this is a 1D array for the prod axis.
nsamp = int(self.ndays * dt * df) * redundancy
std = self.recv_temp / np.sqrt(nsamp)
if self.add_noise:
noise = random.complex_normal(
(nfreq, nprod, ntime),
scale=std[np.newaxis, :, np.newaxis],
rng=self.rng,
)
# Iterate over the products to find the auto-correlations and add the noise
for pi, prod in enumerate(data.prodstack):
# Auto: multiply by sqrt(2) because auto has twice the variance
if prod[0] == prod[1]:
visdata[:, pi].real += np.sqrt(2) * noise[:, pi].real
else:
visdata[:, pi] += noise[:, pi]
# Construct and set the correct weights in place
if self.set_weights:
for lfi, fi in visdata.enumerate(0):
data.weight[fi] = 1.0 / std[:, np.newaxis] ** 2
return data
class SampleNoise(task.SingleTask):
"""Add properly distributed noise to a visibility dataset.
This task draws properly (complex Wishart) distributed samples from an input
visibility dataset which is assumed to represent the expectation.
See http://link.springer.com/article/10.1007%2Fs10440-010-9599-x for a
discussion of the Bartlett decomposition for complex Wishart distributed
quantities.
Attributes
----------
sample_frac : float
Multiplies the number of samples in each measurement. For instance this
could be a duty cycle if the correlator was not keeping up, or could be
larger than one if multiple measurements were combined.
set_weights : bool
Set the weights to the appropriate values.
"""
sample_frac = config.Property(proptype=float, default=1.0)
seed = config.Property(proptype=int, default=None)
set_weights = config.Property(proptype=bool, default=True)
def process(self, data_exp):
"""Generate a noisy dataset.
Parameters
----------
data_exp : :class:`containers.SiderealStream` or :class:`containers.TimeStream`
The expected (i.e. noiseless) visibility dataset. Must be the full
triangle. Make sure you have added an instrumental noise bias if you
want instrumental noise.
Returns
-------
data_samp : same as parameter `data_exp`
The sampled (i.e. noisy) visibility dataset.
"""
from caput.time import STELLAR_S
from ..util import _fast_tools
data_exp.redistribute("freq")
nfeed = len(data_exp.index_map["input"])
# Get a reference to the base MPIArray. Attempting to do this in the
# loop fails if not all ranks enter the loop (as there is an implied MPI
# Barrier)
vis_data = data_exp.vis[:]
# Get the time interval
if isinstance(data_exp, containers.SiderealStream):
dt = 240 * (data_exp.ra[1] - data_exp.ra[0]) * STELLAR_S
else:
dt = data_exp.time[1] - data_exp.time[0]
# Iterate over frequencies
for lfi, fi in vis_data.enumerate(0):
# Get the frequency interval
df = data_exp.index_map["freq"]["width"][fi] * 1e6
# Calculate the number of samples
nsamp = int(self.sample_frac * dt * df)
# Iterate over time
for lti, ti in vis_data.enumerate(2):
# Unpack visibilites into full matrix
vis_utv = vis_data[lfi, :, lti].view(np.ndarray).copy()
vis_mat = np.zeros((nfeed, nfeed), dtype=vis_utv.dtype)
_fast_tools._unpack_product_array_fast(
vis_utv, vis_mat, np.arange(nfeed), nfeed
)
vis_samp = random.complex_wishart(vis_mat, nsamp, rng=self.rng) / nsamp
vis_data[lfi, :, lti] = vis_samp[np.triu_indices(nfeed)]
# Construct and set the correct weights in place
if self.set_weights:
autos = tools.extract_diagonal(vis_data[lfi], axis=0).real
weight_fac = nsamp**0.5 / autos
tools.apply_gain(
data_exp.weight[fi][np.newaxis, ...],
weight_fac[np.newaxis, ...],
out=data_exp.weight[fi][np.newaxis, ...],
)
return data_exp
|
|
#!/usr/bin/env python
# encoding: utf-8
import os
from modularodm import Q
from modularodm.exceptions import ModularOdmException
from framework.auth.core import User
from website import settings
from website.app import init_app
from website.conferences.model import Conference
def main():
init_app(set_backends=True, routes=False)
populate_conferences()
MEETING_DATA = {
'spsp2014': {
'name': 'Society for Personality and Social Psychology 2014',
'info_url': None,
'logo_url': None,
'active': False,
'admins': [],
'public_projects': True,
},
'asb2014': {
'name': 'Association of Southeastern Biologists 2014',
'info_url': 'http://www.sebiologists.org/meetings/talks_posters.html',
'logo_url': None,
'active': False,
'admins': [],
'public_projects': True,
},
'aps2014': {
'name': 'Association for Psychological Science 2014',
'info_url': 'http://centerforopenscience.org/aps/',
'logo_url': '/static/img/2014_Convention_banner-with-APS_700px.jpg',
'active': False,
'admins': [],
'public_projects': True,
},
'annopeer2014': {
'name': '#annopeer',
'info_url': None,
'logo_url': None,
'active': False,
'admins': [],
'public_projects': True,
},
'cpa2014': {
'name': 'Canadian Psychological Association 2014',
'info_url': None,
'logo_url': None,
'active': False,
'admins': [],
'public_projects': True,
},
'filaments2014': {
'name': 'National Radio Astronomy Observatory Filaments 2014',
'info_url': None,
'logo_url': 'https://science.nrao.edu/science/meetings/2014/'
'filamentary-structure/images/filaments2014_660x178.png',
'active': False,
'admins': [
'lvonschi@nrao.edu',
'presentations@osf.io',
# 'Dkim@nrao.edu',
],
'public_projects': True,
},
'bitss2014': {
'name': 'Berkeley Initiative for Transparency in the Social Sciences Research Transparency Forum 2014',
'info_url': None,
'logo_url': os.path.join(
settings.STATIC_URL_PATH,
'img',
'conferences',
'bitss.jpg',
),
'active': False,
'admins': [
'gkroll@berkeley.edu',
'presentations@osf.io',
'awais@berkeley.edu',
],
'public_projects': True,
},
'spsp2015': {
'name': 'Society for Personality and Social Psychology 2015',
'info_url': None,
'logo_url': 'http://spspmeeting.org/CMSPages/SPSPimages/spsp2015banner.jpg',
'active': True,
'admins': [
'meetings@spsp.org',
'presentations@osf.io',
],
},
'aps2015': {
'name': 'Association for Psychological Science 2015',
'info_url': None,
'logo_url': 'http://www.psychologicalscience.org/images/APS_2015_Banner_990x157.jpg',
'active': True,
'admins': [
'presentations@osf.io',
],
'public_projects': True,
},
'icps2015': {
'name': 'International Convention of Psychological Science 2015',
'info_url': None,
'logo_url': 'http://icps.psychologicalscience.org/wp-content/themes/deepblue/images/ICPS_Website-header_990px.jpg',
'active': True,
'admins': [
'presentations@osf.io',
],
'public_projects': True,
},
'mpa2015': {
'name': 'Midwestern Psychological Association 2015',
'info_url': None,
'logo_url': 'http://www.midwesternpsych.org/resources/Pictures/MPA%20logo.jpg',
'active': True,
'admins': [
'mpa@kent.edu',
'presentations@osf.io',
],
'public_projects': True,
},
'NCCC2015': {
'name': 'North Carolina Cognition Conference 2015',
'info_url': None,
'logo_url': None,
'active': True,
'admins': [
'aoverman@elon.edu',
'presentations@osf.io',
],
'public_projects': True,
},
'VPRSF2015': {
'name': 'Virginia Piedmont Regional Science Fair 2015',
'info_url': None,
'logo_url': 'http://vprsf.org/wp-content/themes/VPRSF/images/logo.png',
'active': True,
'admins': [
'director@vprsf.org',
'presentations@osf.io',
],
'public_projects': True,
},
'APRS2015': {
'name': 'UVA Annual Postdoctoral Research Symposium 2015',
'info_url': None,
'logo_url': 'http://s1.postimg.org/50qj9u6i7/GPA_Logo.jpg',
'active': True,
'admins': [
'mhurst@virginia.edu',
'presentations@osf.io',
],
'public_projects': True,
},
'ASB2015': {
'name': 'Association of Southeastern Biologists 2015',
'info_url': None,
'logo_url': 'http://www.sebiologists.org/wp/wp-content/uploads/2014/09/banner_image_Large.png',
'active': True,
'admins': [
'amorris.mtsu@gmail.com',
'presentations@osf.io',
],
'public_projects': True,
},
'TeaP2015': {
'name': 'Tagung experimentell arbeitender Psychologen 2015',
'info_url': None,
'logo_url': None,
'active': True,
'admins': [
'presentations@osf.io',
],
'public_projects': True,
},
'VSSEF2015': {
'name': 'Virginia State Science and Engineering Fair 2015',
'info_url': 'http://www.vmi.edu/conferences/vssef/vssef_home/',
'logo_url': 'http://www.vmi.edu/uploadedImages/Images/Headers/vssef4.jpg',
'active': True,
'admins': [],
'public_projects': True,
},
'RMPA2015': {
'name': 'Rocky Mountain Psychological Association 2015',
'info_url': 'http://www.rockymountainpsych.org/uploads/7/4/2/6/7426961/85th_annual_rmpa_conference_program_hr.pdf',
'logo_url': 'http://www.rockymountainpsych.org/uploads/7/4/2/6/7426961/header_images/1397234084.jpg',
'active': True,
'admins': [],
'public_projects': True,
},
'ARP2015': {
'name': 'Association for Research in Personality 2015',
'info_url': 'http://www.personality-arp.org/conference/',
'logo_url': 'http://www.personality-arp.org/wp-content/uploads/conference/st-louis-arp.jpg',
'active': True,
'admins': [],
'public_projects': True,
},
'SEP2015': {
'name': 'Society of Experimental Psychologists Meeting 2015',
'info_url': 'http://faculty.virginia.edu/Society_of_Experimental_Psychologists/',
'logo_url': 'http://www.sepsych.org/nav/images/SEP-header.gif',
'active': True,
'admins': [],
'public_projects': True,
},
'Reid2015': {
'name': 'L. Starling Reid Undergraduate Psychology Conference 2015',
'info_url': 'http://avillage.web.virginia.edu/Psych/Conference',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
},
'NEEPS2015': {
'name': 'Northeastern Evolutionary Psychology Conference 2015',
'info_url': 'http://neeps2015.weebly.com/',
'logo_url': None,
'active': True,
'admins': [],
'public_projects': True,
},
'VaACS2015': {
'name': 'Virginia Section American Chemical Society Student Poster Session 2015',
'info_url': 'http://virginia.sites.acs.org/',
'logo_url': 'http://virginia.sites.acs.org/Bulletin/15/UVA.jpg',
'active': True,
'admins': [],
'public_projects': True,
},
'MADSSCi2015': {
'name': 'Mid-Atlantic Directors and Staff of Scientific Cores & Southeastern Association of Shared Services 2015',
'info_url': 'http://madssci.abrf.org',
'logo_url': 'http://s24.postimg.org/qtc3baefp/2015madssci_seasr.png',
'active': True,
'admins': [],
'public_projects': True,
},
}
def populate_conferences():
for meeting, attrs in MEETING_DATA.iteritems():
meeting = meeting.strip()
admin_emails = attrs.pop('admins')
admin_objs = []
for email in admin_emails:
try:
user = User.find_one(Q('username', 'iexact', email))
admin_objs.append(user)
except ModularOdmException:
raise RuntimeError('Username {0!r} is not registered.'.format(email))
conf = Conference(
endpoint=meeting, admins=admin_objs, **attrs
)
try:
conf.save()
except ModularOdmException:
print('{0} Conference already exists. Updating existing record...'.format(meeting))
conf = Conference.find_one(Q('endpoint', 'eq', meeting))
for key, value in attrs.items():
setattr(conf, key, value)
conf.admins = admin_objs
changed_fields = conf.save()
if changed_fields:
print('Changed: {}'.format(changed_fields))
if __name__ == '__main__':
main()
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental.map_and_batch()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class MapAndBatchTest(test_base.DatasetTestBase, parameterized.TestCase):
@parameterized.named_parameters(
("Default", None, None, False),
("SequentialCalls", 1, None, False),
("ParallelCalls", 2, None, False),
("ParallelBatches", None, 10, False),
("DefaultNUMA", None, None, True),
("SequentialCallsNUMA", 1, None, True),
("ParallelCallsNUMA", 2, None, True),
("ParallelBatchesNUMA", None, 10, True),
)
def testMapAndBatch(self, num_parallel_calls, num_parallel_batches,
numa_aware):
"""Test a dataset that maps a TF function across its input elements."""
# The pipeline is TensorSliceDataset ->
# RepeatDataset(count) -> MapAndBatchDataset(square_3, batch_size).
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
def dataset_fn(batch_size, count, numa_aware=numa_aware):
dataset = dataset_ops.Dataset.from_tensor_slices(components).repeat(
count).apply(
batching.map_and_batch(
map_func=_map_fn,
batch_size=batch_size,
num_parallel_calls=num_parallel_calls,
num_parallel_batches=num_parallel_batches))
if numa_aware:
options = dataset_ops.Options()
options.experimental_numa_aware = True
dataset = dataset.with_options(options)
return dataset
# Batch of a finite input, where the batch_size divides the
# total number of elements.
dataset = dataset_fn(14, 28)
get_next = self.getNext(dataset)
self.assertEqual(
[[None] + list(c.shape[1:]) for c in components],
[shape.as_list()
for shape in dataset_ops.get_legacy_output_shapes(dataset)])
num_batches = (28 * 7) // 14
for i in range(num_batches):
result = self.evaluate(get_next())
for component, result_component in zip(components, result):
for j in range(14):
self.assertAllEqual(component[(i * 14 + j) % 7]**2,
result_component[j])
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Batch of a finite input, where the batch_size does not
# divide the total number of elements.
get_next = self.getNext(dataset_fn(8, 14))
# We expect (num_batches - 1) full-sized batches.
num_batches = int(math.ceil((14 * 7) / 8))
for i in range(num_batches - 1):
result = self.evaluate(get_next())
for component, result_component in zip(components, result):
for j in range(8):
self.assertAllEqual(component[(i * 8 + j) % 7]**2,
result_component[j])
result = self.evaluate(get_next())
for component, result_component in zip(components, result):
for j in range((14 * 7) % 8):
self.assertAllEqual(component[((num_batches - 1) * 8 + j) % 7]**2,
result_component[j])
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Batch of an empty input should fail straight away.
self.assertDatasetProduces(dataset_fn(8, 0), expected_output=[])
# Empty batch should be an initialization time error.
with self.assertRaises(errors.InvalidArgumentError):
self.assertDatasetProduces(dataset_fn(0, 14), expected_output=[])
@parameterized.named_parameters(
("Even", False, False),
("Uneven", True, False),
("EvenNUMA", False, True),
("UnevenNUMA", True, True),
)
def testMapAndBatchPartialBatch(self, drop_remainder, numa_aware):
dataset = (
dataset_ops.Dataset.range(10).apply(
batching.map_and_batch(
lambda x: array_ops.reshape(x * x, [1]),
batch_size=4,
drop_remainder=drop_remainder)))
if numa_aware:
options = dataset_ops.Options()
options.experimental_numa_aware = True
dataset = dataset.with_options(options)
if drop_remainder:
self.assertEqual(
[4, 1], dataset_ops.get_legacy_output_shapes(dataset).as_list())
else:
self.assertEqual(
[None, 1], dataset_ops.get_legacy_output_shapes(dataset).as_list())
expected_output = [[[0], [1], [4], [9]], [[16], [25], [36], [49]]]
if not drop_remainder:
expected_output.append([[64], [81]])
self.assertDatasetProduces(dataset, expected_output=expected_output)
@parameterized.named_parameters(
("Normal", False),
("NUMA", True),
)
def testMapAndBatchYieldsPartialBatch(self, numa_aware):
dataset = (
dataset_ops.Dataset.range(10).apply(
batching.map_and_batch(lambda x: array_ops.reshape(x * x, [1]), 4)))
if numa_aware:
options = dataset_ops.Options()
options.experimental_numa_aware = True
dataset = dataset.with_options(options)
self.assertEqual(
[None, 1], dataset_ops.get_legacy_output_shapes(dataset).as_list())
expected_output = [[[0], [1], [4], [9]], [[16], [25], [36], [49]],
[[64], [81]]]
self.assertDatasetProduces(dataset, expected_output=expected_output)
@parameterized.named_parameters(
("Normal", False),
("NUMA", True),
)
def testMapAndBatchParallelGetNext(self, numa_aware):
dataset = dataset_ops.Dataset.range(50000).apply(
batching.map_and_batch(lambda x: x, batch_size=100))
if numa_aware:
options = dataset_ops.Options()
options.experimental_numa_aware = True
dataset = dataset.with_options(options)
if context.executing_eagerly():
iterator = iter(dataset)
get_next = iterator._next_internal # pylint: disable=protected-access
else:
iterator = dataset_ops.make_one_shot_iterator(dataset)
get_next = iterator.get_next
elements = []
for _ in range(100):
elements.append(get_next)
for i in range(5):
got = self.evaluate([element() for element in elements])
got.sort(key=lambda x: x[0])
expected = []
for j in range(100):
expected.append(range(i * 10000 + j * 100, i * 10000 + (j + 1) * 100))
self.assertAllEqual(got, expected)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate([element() for element in elements])
@parameterized.named_parameters(
("Normal", False),
("NUMA", True),
)
def testMapAndBatchParallelGetNextDropRemainder(self, numa_aware):
dataset = dataset_ops.Dataset.range(49999).apply(
batching.map_and_batch(
lambda x: x, batch_size=100, drop_remainder=True))
if numa_aware:
options = dataset_ops.Options()
options.experimental_numa_aware = True
dataset = dataset.with_options(options)
if context.executing_eagerly():
iterator = iter(dataset)
get_next = iterator._next_internal # pylint: disable=protected-access
else:
iterator = dataset_ops.make_one_shot_iterator(dataset)
get_next = iterator.get_next
elements = []
for _ in range(100):
elements.append(get_next)
for i in range(4):
got = self.evaluate([element() for element in elements])
got.sort(key=lambda x: x[0])
expected = []
for j in range(100):
expected.append(range(i * 10000 + j * 100, i * 10000 + (j + 1) * 100))
self.assertAllEqual(got, expected)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate([element() for element in elements])
@parameterized.named_parameters(
("Normal", False),
("NUMA", True),
)
def testMapAndBatchSparse(self, numa_aware):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=[[0]], values=(i * [1]), dense_shape=[1])
dataset = dataset_ops.Dataset.range(10).apply(
batching.map_and_batch(_sparse, 5))
if numa_aware:
options = dataset_ops.Options()
options.experimental_numa_aware = True
dataset = dataset.with_options(options)
self.assertDatasetProduces(
dataset,
expected_output=[
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [1, 0], [2, 0], [3, 0], [4, 0]],
values=[i * 5, i * 5 + 1, i * 5 + 2, i * 5 + 3, i * 5 + 4],
dense_shape=[5, 1]) for i in range(2)
])
@parameterized.named_parameters(
("Normal", False),
("NUMA", True),
)
def testMapAndBatchFails(self, numa_aware):
"""Test a dataset that maps a TF function across its input elements."""
with self.assertRaisesRegexp(errors.InvalidArgumentError, "oops"):
dataset = dataset_ops.Dataset.from_tensors(
array_ops.check_numerics(
constant_op.constant(1.0) / constant_op.constant(0.0), "oops"))
dataset = dataset.apply(batching.map_and_batch(lambda x: x, 14))
if numa_aware:
options = dataset_ops.Options()
options.experimental_numa_aware = True
dataset = dataset.with_options(options)
get_next = self.getNext(dataset)
self.evaluate(get_next())
@parameterized.named_parameters(
("Normal", False),
("NUMA", True),
)
def testMapAndBatchShapeMismatch(self, numa_aware):
"""Test a dataset that maps a TF function across its input elements."""
def generator():
yield [1]
yield [2]
yield [3]
yield [[4, 5, 6]]
dataset = dataset_ops.Dataset.from_generator(
generator, output_types=dtypes.int32)
batch_size = 4
dataset = dataset.apply(batching.map_and_batch(lambda x: x, batch_size))
if numa_aware:
options = dataset_ops.Options()
options.experimental_numa_aware = True
dataset = dataset.with_options(options)
self.assertDatasetProduces(
dataset,
expected_error=(errors.InvalidArgumentError,
"number of elements does not match"))
@parameterized.named_parameters(
("Normal", False),
("NUMA", True),
)
def testMapAndBatchImplicitDispose(self, numa_aware):
# Tests whether a map and batch dataset will be cleaned up correctly when
# the pipeline does not run it until exhaustion.
# The pipeline is TensorSliceDataset -> RepeatDataset(1000) ->
# MapAndBatchDataset(f=square_3, batch_size=100).
components = (np.arange(1000),
np.array([[1, 2, 3]]) * np.arange(1000)[:, np.newaxis],
np.array(37.0) * np.arange(1000))
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
dataset = dataset_ops.Dataset.from_tensor_slices(components).repeat(
1000).apply(batching.map_and_batch(_map_fn, batch_size=100))
dataset = dataset.prefetch(5)
if numa_aware:
options = dataset_ops.Options()
options.experimental_numa_aware = True
dataset = dataset.with_options(options)
get_next = self.getNext(dataset)
for _ in range(3):
self.evaluate(get_next())
@parameterized.named_parameters(
("1", 0, False),
("2", 5, False),
("3", 10, False),
("4", 90, False),
("5", 95, False),
("6", 99, False),
("1NUMA", 0, True),
("2NUMA", 5, True),
("3NUMA", 10, True),
("4NUMA", 90, True),
("5NUMA", 95, True),
("6NUMA", 99, True),
)
def testMapAndBatchMapError(self, threshold, numa_aware):
def raising_py_fn(i):
if i >= threshold:
raise StopIteration()
else:
return i
dataset = dataset_ops.Dataset.range(100).apply(
batching.map_and_batch(
lambda x: script_ops.py_func(raising_py_fn, [x], dtypes.int64),
batch_size=10))
if numa_aware:
options = dataset_ops.Options()
options.experimental_numa_aware = True
dataset = dataset.with_options(options)
get_next = self.getNext(dataset)
for i in range(threshold // 10):
self.assertAllEqual([i * 10 + j for j in range(10)],
self.evaluate(get_next()))
if numa_aware:
if threshold % 10 != 0:
self.assertAllEqual(
[threshold // 10 * 10 + j for j in range(threshold % 10)],
self.evaluate(get_next()))
else:
for i in range(threshold // 10, 10):
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@parameterized.named_parameters(
("1", False, dtypes.bool, False),
("2", -42, dtypes.int8, False),
("3", -42, dtypes.int16, False),
("4", -42, dtypes.int32, False),
("5", -42, dtypes.int64, False),
("6", 42, dtypes.uint8, False),
("7", 42, dtypes.uint16, False),
("8", 42.0, dtypes.float16, False),
("9", 42.0, dtypes.float32, False),
("10", 42.0, dtypes.float64, False),
("11", b"hello", dtypes.string, False),
("1NUMA", False, dtypes.bool, True),
("2NUMA", -42, dtypes.int8, True),
("3NUMA", -42, dtypes.int16, True),
("4NUMA", -42, dtypes.int32, True),
("5NUMA", -42, dtypes.int64, True),
("6NUMA", 42, dtypes.uint8, True),
("7NUMA", 42, dtypes.uint16, True),
("8NUMA", 42.0, dtypes.float16, True),
("9NUMA", 42.0, dtypes.float32, True),
("10NUMA", 42.0, dtypes.float64, True),
("11NUMA", b"hello", dtypes.string, True),
)
def testMapAndBatchTypes(self, element, dtype, numa_aware):
def gen():
yield element
dataset = dataset_ops.Dataset.from_generator(gen, dtype).repeat(100).apply(
batching.map_and_batch(lambda x: x, batch_size=10))
if numa_aware:
options = dataset_ops.Options()
options.experimental_numa_aware = True
dataset = dataset.with_options(options)
get_next = self.getNext(dataset)
for _ in range(10):
self.assertAllEqual([element for _ in range(10)],
self.evaluate(get_next()))
@parameterized.named_parameters(
("Identity", None, lambda x: x, None),
("Replicate", None, lambda x: (x, x), None),
("Swap", (None, None), lambda x, y: (y, x), None),
("Project", (None, None), lambda x, y: x, None),
)
def testShortCircuit(self, structure, map_fn, num_parallel_calls):
dataset = self.structuredDataset(structure).repeat().apply(
batching.map_and_batch(map_fn, batch_size=10))
get_next = self.getNext(dataset)
if isinstance(structure, tuple):
expected = map_fn(
*self.evaluate(self.structuredElement(structure, shape=[10])))
else:
expected = map_fn(
self.evaluate(self.structuredElement(structure, shape=[10])))
self.assertAllEqual(expected, self.evaluate(get_next()))
def testShortCircuitCapturedInput(self):
captured_t = variables.Variable(42)
dataset = self.structuredDataset(None).repeat().apply(
batching.map_and_batch(lambda x: captured_t, batch_size=10))
self.evaluate(variables.global_variables_initializer())
get_next = self.getNext(dataset, requires_initialization=True)
self.assertAllEqual([42] * 10, self.evaluate(get_next()))
@parameterized.named_parameters(
("Normal", False),
("NUMA", True),
)
def testMapAndBatchControlFlow(self, numa_aware):
def map_fn(x):
previous_control_flow_v2_value = control_flow_util.ENABLE_CONTROL_FLOW_V2
control_flow_util.ENABLE_CONTROL_FLOW_V2 = True
return_value = control_flow_ops.cond(x < 50, lambda: x + 1, lambda: x * x)
control_flow_util.ENABLE_CONTROL_FLOW_V2 = previous_control_flow_v2_value
return return_value
dataset = dataset_ops.Dataset.range(100).apply(
batching.map_and_batch(map_fn, batch_size=10))
if numa_aware:
options = dataset_ops.Options()
options.experimental_numa_aware = True
dataset = dataset.with_options(options)
get_next = self.getNext(dataset)
for i in range(10):
if i < 5:
self.assertAllEqual([i * 10 + j + 1 for j in range(10)],
self.evaluate(get_next()))
else:
self.assertAllEqual(
[((i * 10) + j) * ((i * 10) + j) for j in range(10)],
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
if __name__ == "__main__":
test.main()
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run off-policy policy evaluation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
import numpy as np
import tensorflow.compat.v1 as tf
import dual_dice.algos.dual_dice as dual_dice
import dual_dice.algos.neural_dual_dice as neural_dual_dice
import dual_dice.gridworld.environments as gridworld_envs
import dual_dice.gridworld.policies as gridworld_policies
import dual_dice.transition_data as transition_data
FLAGS = flags.FLAGS
flags.DEFINE_integer('seed', 1, 'Initial NumPy random seed.')
flags.DEFINE_integer('num_seeds', 1, 'How many seeds to run.')
flags.DEFINE_integer('num_trajectories', 200,
'Number of trajectories to collect.')
flags.DEFINE_integer('max_trajectory_length', 400,
'Cutoff trajectory at this step.')
flags.DEFINE_float('alpha', 0.0,
'Higher alpha corresponds to using '
'behavior policy that is closer to target policy.')
flags.DEFINE_float('gamma', 0.995, 'Discount factor.')
flags.DEFINE_bool('tabular_obs', True, 'Use tabular observations?')
flags.DEFINE_bool('tabular_solver', True, 'Use tabular solver?')
flags.DEFINE_string('env_name', 'grid', 'Environment to evaluate on.')
flags.DEFINE_string('solver_name', 'dice', 'Type of solver to use.')
flags.DEFINE_string('save_dir', None, 'Directory to save results to.')
flags.DEFINE_float('function_exponent', 1.5,
'Exponent for f function in DualDICE.')
flags.DEFINE_bool('deterministic_env', False, 'assume deterministic env.')
flags.DEFINE_integer('batch_size', 512,
'batch_size for training models.')
flags.DEFINE_integer('num_steps', 200000,
'num_steps for training models.')
flags.DEFINE_integer('log_every', 500, 'log after certain number of steps.')
flags.DEFINE_float('nu_learning_rate', 0.0001, 'nu lr')
flags.DEFINE_float('zeta_learning_rate', 0.001, 'z lr')
flags.register_validator(
'solver_name',
lambda value: value in ['dice'],
message='Unknown solver.')
flags.register_validator(
'env_name',
lambda value: value in ['grid'],
message='Unknown environment.')
flags.register_validator(
'alpha', lambda value: 0 <= value <= 1, message='Invalid value.')
def get_env_and_policies(env_name, tabular_obs, alpha):
"""Get environment and policies."""
if env_name == 'grid':
length = 10
env = gridworld_envs.GridWalk(length, tabular_obs)
policy0 = gridworld_policies.get_behavior_gridwalk_policy(
env, tabular_obs, alpha)
policy1 = gridworld_policies.get_target_gridwalk_policy(env, tabular_obs)
env.discrete_actions = True
else:
ValueError('Environment is not supported.')
return env, policy0, policy1
def get_solver(solver_name, env, gamma, tabular_solver,
summary_writer, summary_prefix):
"""Create solver object."""
if tabular_solver:
if solver_name == 'dice':
return dual_dice.TabularDualDice(env.num_states, env.num_actions, gamma)
else:
raise ValueError('Solver is not supported.')
else:
neural_solver_params = neural_dual_dice.NeuralSolverParameters(
env.state_dim,
env.action_dim,
gamma,
discrete_actions=env.discrete_actions,
deterministic_env=FLAGS.deterministic_env,
nu_learning_rate=FLAGS.nu_learning_rate,
zeta_learning_rate=FLAGS.zeta_learning_rate,
batch_size=FLAGS.batch_size,
num_steps=FLAGS.num_steps,
log_every=FLAGS.log_every,
summary_writer=summary_writer,
summary_prefix=summary_prefix)
if solver_name == 'dice':
return neural_dual_dice.NeuralDualDice(
parameters=neural_solver_params,
function_exponent=FLAGS.function_exponent)
else:
raise ValueError('Solver is not supported.')
def count_state_frequency(data, num_states, gamma):
state_counts = np.zeros([num_states])
for transition_tuple in data.iterate_once():
state_counts[transition_tuple.state] += gamma ** transition_tuple.time_step
return state_counts / np.sum(state_counts)
def main(argv):
del argv
start_seed = FLAGS.seed
num_seeds = FLAGS.num_seeds
num_trajectories = FLAGS.num_trajectories
max_trajectory_length = FLAGS.max_trajectory_length
alpha = FLAGS.alpha
gamma = FLAGS.gamma
nu_learning_rate = FLAGS.nu_learning_rate
zeta_learning_rate = FLAGS.zeta_learning_rate
tabular_obs = FLAGS.tabular_obs
tabular_solver = FLAGS.tabular_solver
if tabular_solver and not tabular_obs:
raise ValueError('Tabular solver can only be used with tabular obs.')
env_name = FLAGS.env_name
solver_name = FLAGS.solver_name
save_dir = FLAGS.save_dir
hparam_format = ('{ENV}_{ALPHA}_{NUM_TRAJ}_{TRAJ_LEN}_'
'{N_LR}_{Z_LR}_{GAM}_{SOLVER}')
solver_str = (solver_name + tabular_solver * '-tab' +
'-%.1f' % FLAGS.function_exponent)
hparam_str = hparam_format.format(
ENV=env_name + tabular_obs * '-tab',
ALPHA=alpha,
NUM_TRAJ=num_trajectories,
TRAJ_LEN=max_trajectory_length,
GAM=gamma,
N_LR=nu_learning_rate,
Z_LR=zeta_learning_rate,
SOLVER=solver_str)
if save_dir:
summary_dir = os.path.join(save_dir, hparam_str)
if num_seeds == 1:
summary_dir = os.path.join(summary_dir, 'seed%d' % start_seed)
summary_writer = tf.summary.FileWriter(summary_dir)
else:
summary_writer = None
env, policy0, policy1 = get_env_and_policies(env_name, tabular_obs, alpha)
results = []
for seed in range(start_seed, start_seed + num_seeds):
print('Seed', seed)
if num_seeds == 1:
summary_prefix = ''
else:
summary_prefix = 'seed%d/' % seed
np.random.seed(seed)
# Off-policy data.
(behavior_data, behavior_avg_episode_rewards,
behavior_avg_step_rewards) = transition_data.collect_data(
env,
policy0,
num_trajectories,
max_trajectory_length,
gamma=gamma)
print('Behavior average episode rewards', behavior_avg_episode_rewards)
print('Behavior average step rewards', behavior_avg_step_rewards)
# Oracle on-policy data.
(target_data, target_avg_episode_rewards,
target_avg_step_rewards) = transition_data.collect_data(
env,
policy1,
num_trajectories,
max_trajectory_length,
gamma=gamma)
print('Target (oracle) average episode rewards', target_avg_episode_rewards)
print('Target (oracle) average step rewards', target_avg_step_rewards)
if tabular_obs:
behavior_state_frequency = count_state_frequency(behavior_data,
env.num_states, gamma)
target_state_frequency = count_state_frequency(target_data,
env.num_states, gamma)
empirical_density_ratio = (
target_state_frequency / (1e-8 + behavior_state_frequency))
print('Empirical state density ratio', empirical_density_ratio[:4], '...')
del target_data # Don't use oracle in later code.
# Get solver.
density_estimator = get_solver(solver_name, env, gamma, tabular_solver,
summary_writer, summary_prefix)
# Solve for estimated density ratios.
est_avg_rewards = density_estimator.solve(behavior_data, policy1)
# Close estimator properly.
density_estimator.close()
print('Estimated (solver: %s) average step reward' % solver_name,
est_avg_rewards)
results.append(
[behavior_avg_step_rewards, target_avg_step_rewards, est_avg_rewards])
if save_dir is not None:
filename = os.path.join(save_dir, '%s.npy' % hparam_str)
print('Saving results to %s' % filename)
if not tf.gfile.IsDirectory(save_dir):
tf.gfile.MkDir(save_dir)
with tf.gfile.GFile(filename, 'w') as f:
np.save(f, np.array(results))
print('Done!')
if __name__ == '__main__':
app.run(main)
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import copy
import mock
from oslo_serialization import jsonutils
from oslo_utils import units
from nova.compute import claims
from nova.compute import flavors
from nova.compute import power_state
from nova.compute import resource_tracker
from nova.compute import task_states
from nova.compute import vm_states
from nova import exception as exc
from nova import objects
from nova import test
_VIRT_DRIVER_AVAIL_RESOURCES = {
'vcpus': 4,
'memory_mb': 512,
'local_gb': 6,
'vcpus_used': 0,
'memory_mb_used': 0,
'local_gb_used': 0,
'hypervisor_type': 'fake',
'hypervisor_version': 0,
'hypervisor_hostname': 'fakehost',
'cpu_info': '',
'numa_topology': None,
}
_COMPUTE_NODE_FIXTURES = [
{
'id': 1,
# NOTE(jaypipes): Will be removed with the
# detach-compute-node-from-service blueprint
# implementation.
'service_id': 1,
'host': 'fake-host',
'service': None,
'vcpus': _VIRT_DRIVER_AVAIL_RESOURCES['vcpus'],
'memory_mb': _VIRT_DRIVER_AVAIL_RESOURCES['memory_mb'],
'local_gb': _VIRT_DRIVER_AVAIL_RESOURCES['local_gb'],
'vcpus_used': _VIRT_DRIVER_AVAIL_RESOURCES['vcpus_used'],
'memory_mb_used': _VIRT_DRIVER_AVAIL_RESOURCES['memory_mb_used'],
'local_gb_used': _VIRT_DRIVER_AVAIL_RESOURCES['local_gb_used'],
'hypervisor_type': 'fake',
'hypervisor_version': 0,
'hypervisor_hostname': 'fake-host',
'free_ram_mb': (_VIRT_DRIVER_AVAIL_RESOURCES['memory_mb'] -
_VIRT_DRIVER_AVAIL_RESOURCES['memory_mb_used']),
'free_disk_gb': (_VIRT_DRIVER_AVAIL_RESOURCES['local_gb'] -
_VIRT_DRIVER_AVAIL_RESOURCES['local_gb_used']),
'current_workload': 0,
'running_vms': 0,
'cpu_info': '{}',
'disk_available_least': 0,
'host_ip': 'fake-ip',
'supported_instances': None,
'metrics': None,
'pci_stats': None,
'extra_resources': None,
'stats': '{}',
'numa_topology': None
},
]
_SERVICE_FIXTURE = objects.Service(
id=1,
host='fake-host',
binary='nova-compute',
topic='compute',
report_count=1,
disabled=False,
disabled_reason='')
_INSTANCE_TYPE_FIXTURES = {
1: {
'id': 1,
'flavorid': 'fakeid-1',
'name': 'fake1.small',
'memory_mb': 128,
'vcpus': 1,
'root_gb': 1,
'ephemeral_gb': 0,
'swap': 0,
'rxtx_factor': 0,
'vcpu_weight': 1,
'extra_specs': {},
},
2: {
'id': 2,
'flavorid': 'fakeid-2',
'name': 'fake1.medium',
'memory_mb': 256,
'vcpus': 2,
'root_gb': 5,
'ephemeral_gb': 0,
'swap': 0,
'rxtx_factor': 0,
'vcpu_weight': 1,
'extra_specs': {},
},
}
# A collection of system_metadata attributes that would exist in instances
# that have the instance type ID matching the dictionary key.
_INSTANCE_TYPE_SYS_META = {
1: flavors.save_flavor_info({}, _INSTANCE_TYPE_FIXTURES[1]),
2: flavors.save_flavor_info({}, _INSTANCE_TYPE_FIXTURES[2]),
}
_MIGRATION_SYS_META = flavors.save_flavor_info(
{}, _INSTANCE_TYPE_FIXTURES[1], 'old_')
_MIGRATION_SYS_META = flavors.save_flavor_info(
_MIGRATION_SYS_META, _INSTANCE_TYPE_FIXTURES[2], 'new_')
_2MB = 2 * units.Mi / units.Ki
_INSTANCE_NUMA_TOPOLOGIES = {
'2mb': objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([1]), memory=_2MB, pagesize=0),
objects.InstanceNUMACell(
id=1, cpuset=set([3]), memory=_2MB, pagesize=0)]),
}
_NUMA_LIMIT_TOPOLOGIES = {
'2mb': objects.NUMATopologyLimits(id=0,
cpu_allocation_ratio=1.0,
ram_allocation_ratio=1.0),
}
_NUMA_PAGE_TOPOLOGIES = {
'2kb*8': objects.NUMAPagesTopology(size_kb=2, total=8, used=0)
}
_NUMA_HOST_TOPOLOGIES = {
'2mb': objects.NUMATopology(cells=[
objects.NUMACell(id=0, cpuset=set([1, 2]), memory=_2MB,
cpu_usage=0, memory_usage=0,
mempages=[_NUMA_PAGE_TOPOLOGIES['2kb*8']],
siblings=[], pinned_cpus=set([])),
objects.NUMACell(id=1, cpuset=set([3, 4]), memory=_2MB,
cpu_usage=0, memory_usage=0,
mempages=[_NUMA_PAGE_TOPOLOGIES['2kb*8']],
siblings=[], pinned_cpus=set([]))]),
}
_INSTANCE_FIXTURES = [
objects.Instance(
id=1,
host=None, # prevent RT trying to lazy-load this
node=None,
uuid='c17741a5-6f3d-44a8-ade8-773dc8c29124',
memory_mb=_INSTANCE_TYPE_FIXTURES[1]['memory_mb'],
vcpus=_INSTANCE_TYPE_FIXTURES[1]['vcpus'],
root_gb=_INSTANCE_TYPE_FIXTURES[1]['root_gb'],
ephemeral_gb=_INSTANCE_TYPE_FIXTURES[1]['ephemeral_gb'],
numa_topology=_INSTANCE_NUMA_TOPOLOGIES['2mb'],
instance_type_id=1,
vm_state=vm_states.ACTIVE,
power_state=power_state.RUNNING,
task_state=None,
os_type='fake-os', # Used by the stats collector.
project_id='fake-project', # Used by the stats collector.
),
objects.Instance(
id=2,
host=None,
node=None,
uuid='33805b54-dea6-47b8-acb2-22aeb1b57919',
memory_mb=_INSTANCE_TYPE_FIXTURES[2]['memory_mb'],
vcpus=_INSTANCE_TYPE_FIXTURES[2]['vcpus'],
root_gb=_INSTANCE_TYPE_FIXTURES[2]['root_gb'],
ephemeral_gb=_INSTANCE_TYPE_FIXTURES[2]['ephemeral_gb'],
numa_topology=None,
instance_type_id=2,
vm_state=vm_states.DELETED,
power_state=power_state.SHUTDOWN,
task_state=None,
os_type='fake-os',
project_id='fake-project-2',
),
]
_MIGRATION_FIXTURES = {
# A migration that has only this compute node as the source host
'source-only': objects.Migration(
id=1,
instance_uuid='f15ecfb0-9bf6-42db-9837-706eb2c4bf08',
source_compute='fake-host',
dest_compute='other-host',
source_node='fake-node',
dest_node='other-node',
old_instance_type_id=1,
new_instance_type_id=2,
status='migrating'
),
# A migration that has only this compute node as the dest host
'dest-only': objects.Migration(
id=2,
instance_uuid='f6ed631a-8645-4b12-8e1e-2fff55795765',
source_compute='other-host',
dest_compute='fake-host',
source_node='other-node',
dest_node='fake-node',
old_instance_type_id=1,
new_instance_type_id=2,
status='migrating'
),
# A migration that has this compute node as both the source and dest host
'source-and-dest': objects.Migration(
id=3,
instance_uuid='f4f0bfea-fe7e-4264-b598-01cb13ef1997',
source_compute='fake-host',
dest_compute='fake-host',
source_node='fake-node',
dest_node='fake-node',
old_instance_type_id=1,
new_instance_type_id=2,
status='migrating'
),
}
_MIGRATION_INSTANCE_FIXTURES = {
# source-only
'f15ecfb0-9bf6-42db-9837-706eb2c4bf08': objects.Instance(
id=101,
host=None, # prevent RT trying to lazy-load this
node=None,
uuid='f15ecfb0-9bf6-42db-9837-706eb2c4bf08',
memory_mb=_INSTANCE_TYPE_FIXTURES[1]['memory_mb'],
vcpus=_INSTANCE_TYPE_FIXTURES[1]['vcpus'],
root_gb=_INSTANCE_TYPE_FIXTURES[1]['root_gb'],
ephemeral_gb=_INSTANCE_TYPE_FIXTURES[1]['ephemeral_gb'],
numa_topology=None,
instance_type_id=1,
vm_state=vm_states.ACTIVE,
power_state=power_state.RUNNING,
task_state=task_states.RESIZE_MIGRATING,
system_metadata=_MIGRATION_SYS_META,
os_type='fake-os',
project_id='fake-project',
),
# dest-only
'f6ed631a-8645-4b12-8e1e-2fff55795765': objects.Instance(
id=102,
host=None, # prevent RT trying to lazy-load this
node=None,
uuid='f6ed631a-8645-4b12-8e1e-2fff55795765',
memory_mb=_INSTANCE_TYPE_FIXTURES[2]['memory_mb'],
vcpus=_INSTANCE_TYPE_FIXTURES[2]['vcpus'],
root_gb=_INSTANCE_TYPE_FIXTURES[2]['root_gb'],
ephemeral_gb=_INSTANCE_TYPE_FIXTURES[2]['ephemeral_gb'],
numa_topology=None,
instance_type_id=2,
vm_state=vm_states.ACTIVE,
power_state=power_state.RUNNING,
task_state=task_states.RESIZE_MIGRATING,
system_metadata=_MIGRATION_SYS_META,
os_type='fake-os',
project_id='fake-project',
),
# source-and-dest
'f4f0bfea-fe7e-4264-b598-01cb13ef1997': objects.Instance(
id=3,
host=None, # prevent RT trying to lazy-load this
node=None,
uuid='f4f0bfea-fe7e-4264-b598-01cb13ef1997',
memory_mb=_INSTANCE_TYPE_FIXTURES[2]['memory_mb'],
vcpus=_INSTANCE_TYPE_FIXTURES[2]['vcpus'],
root_gb=_INSTANCE_TYPE_FIXTURES[2]['root_gb'],
ephemeral_gb=_INSTANCE_TYPE_FIXTURES[2]['ephemeral_gb'],
numa_topology=None,
instance_type_id=2,
vm_state=vm_states.ACTIVE,
power_state=power_state.RUNNING,
task_state=task_states.RESIZE_MIGRATING,
system_metadata=_MIGRATION_SYS_META,
os_type='fake-os',
project_id='fake-project',
),
}
def overhead_zero(instance):
# Emulate that the driver does not adjust the memory
# of the instance...
return {
'memory_mb': 0
}
def setup_rt(hostname, nodename, virt_resources=_VIRT_DRIVER_AVAIL_RESOURCES,
estimate_overhead=overhead_zero):
"""Sets up the resource tracker instance with mock fixtures.
:param virt_resources: Optional override of the resource representation
returned by the virt driver's
`get_available_resource()` method.
:param estimate_overhead: Optional override of a function that should
return overhead of memory given an instance
object. Defaults to returning zero overhead.
"""
cond_api_mock = mock.MagicMock()
sched_client_mock = mock.MagicMock()
notifier_mock = mock.MagicMock()
vd = mock.MagicMock()
# Make sure we don't change any global fixtures during tests
virt_resources = copy.deepcopy(virt_resources)
vd.get_available_resource.return_value = virt_resources
vd.estimate_instance_overhead.side_effect = estimate_overhead
with contextlib.nested(
mock.patch('nova.conductor.API', return_value=cond_api_mock),
mock.patch('nova.scheduler.client.SchedulerClient',
return_value=sched_client_mock),
mock.patch('nova.rpc.get_notifier', return_value=notifier_mock)):
rt = resource_tracker.ResourceTracker(hostname, vd, nodename)
return (rt, sched_client_mock, vd)
class BaseTestCase(test.NoDBTestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
self.rt = None
self.flags(my_ip='fake-ip')
def _setup_rt(self, virt_resources=_VIRT_DRIVER_AVAIL_RESOURCES,
estimate_overhead=overhead_zero):
(self.rt, self.sched_client_mock,
self.driver_mock) = setup_rt(
'fake-host', 'fake-node', virt_resources, estimate_overhead)
self.cond_api_mock = self.rt.conductor_api
class TestUpdateAvailableResources(BaseTestCase):
def _update_available_resources(self):
# We test RT._update separately, since the complexity
# of the update_available_resource() function is high enough as
# it is, we just want to focus here on testing the resources
# parameter that update_available_resource() eventually passes
# to _update().
with mock.patch.object(self.rt, '_update') as update_mock:
self.rt.update_available_resource(mock.sentinel.ctx)
return update_mock
@mock.patch('nova.objects.Service.get_by_compute_host')
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_no_instances_no_migrations_no_reserved(self, get_mock, migr_mock,
get_cn_mock, service_mock):
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
self._setup_rt()
get_mock.return_value = []
migr_mock.return_value = []
get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0]
service_mock.return_value = _SERVICE_FIXTURE
update_mock = self._update_available_resources()
vd = self.driver_mock
vd.get_available_resource.assert_called_once_with('fake-node')
get_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node',
expected_attrs=[
'system_metadata',
'numa_topology'])
get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node')
migr_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node')
expected_resources = {
# host is added in update_available_resources()
# before calling _update()
'host': 'fake-host',
'host_ip': 'fake-ip',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
'hypervisor_hostname': 'fakehost',
'free_disk_gb': 6,
'hypervisor_version': 0,
'local_gb': 6,
'free_ram_mb': 512,
'memory_mb_used': 0,
'pci_device_pools': [],
'vcpus_used': 0,
'hypervisor_type': 'fake',
'local_gb_used': 0,
'memory_mb': 512,
'current_workload': 0,
'vcpus': 4,
'running_vms': 0
}
update_mock.assert_called_once_with(mock.sentinel.ctx,
expected_resources)
@mock.patch('nova.objects.Service.get_by_compute_host')
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_no_instances_no_migrations_reserved_disk_and_ram(
self, get_mock, migr_mock, get_cn_mock, service_mock):
self.flags(reserved_host_disk_mb=1024,
reserved_host_memory_mb=512)
self._setup_rt()
get_mock.return_value = []
migr_mock.return_value = []
get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0]
service_mock.return_value = _SERVICE_FIXTURE
update_mock = self._update_available_resources()
get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node')
expected_resources = {
# host is added in update_available_resources()
# before calling _update()
'host': 'fake-host',
'host_ip': 'fake-ip',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
'hypervisor_hostname': 'fakehost',
'free_disk_gb': 5, # 6GB avail - 1 GB reserved
'hypervisor_version': 0,
'local_gb': 6,
'free_ram_mb': 0, # 512MB avail - 512MB reserved
'memory_mb_used': 512, # 0MB used + 512MB reserved
'pci_device_pools': [],
'vcpus_used': 0,
'hypervisor_type': 'fake',
'local_gb_used': 1, # 0GB used + 1 GB reserved
'memory_mb': 512,
'current_workload': 0,
'vcpus': 4,
'running_vms': 0
}
update_mock.assert_called_once_with(mock.sentinel.ctx,
expected_resources)
@mock.patch('nova.objects.Service.get_by_compute_host')
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_some_instances_no_migrations(self, get_mock, migr_mock,
get_cn_mock, service_mock):
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
self._setup_rt()
get_mock.return_value = _INSTANCE_FIXTURES
migr_mock.return_value = []
get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0]
service_mock.return_value = _SERVICE_FIXTURE
update_mock = self._update_available_resources()
get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node')
expected_resources = {
# host is added in update_available_resources()
# before calling _update()
'host': 'fake-host',
'host_ip': 'fake-ip',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
'hypervisor_hostname': 'fakehost',
'free_disk_gb': 5, # 6 - 1 used
'hypervisor_version': 0,
'local_gb': 6,
'free_ram_mb': 384, # 512 - 128 used
'memory_mb_used': 128,
'pci_device_pools': [],
# NOTE(jaypipes): Due to the design of the ERT, which now is used
# track VCPUs, the actual used VCPUs isn't
# "written" to the resources dictionary that is
# passed to _update() like all the other
# resources are. Instead, _update()
# calls the ERT's write_resources() method, which
# then queries each resource handler plugin for the
# changes in its resource usage and the plugin
# writes changes to the supplied "values" dict. For
# this reason, all other resources except VCPUs
# are accurate here. :(
'vcpus_used': 0,
'hypervisor_type': 'fake',
'local_gb_used': 1,
'memory_mb': 512,
'current_workload': 0,
'vcpus': 4,
'running_vms': 1 # One active instance
}
update_mock.assert_called_once_with(mock.sentinel.ctx,
expected_resources)
@mock.patch('nova.objects.Service.get_by_compute_host')
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_orphaned_instances_no_migrations(self, get_mock, migr_mock,
get_cn_mock, service_mock):
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
self._setup_rt()
get_mock.return_value = []
migr_mock.return_value = []
get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0]
service_mock.return_value = _SERVICE_FIXTURE
# Orphaned instances are those that the virt driver has on
# record as consuming resources on the compute node, but the
# Nova database has no record of the instance being active
# on the host. For some reason, the resource tracker only
# considers orphaned instance's memory usage in its calculations
# of free resources...
orphaned_usages = {
'71ed7ef6-9d2e-4c65-9f4e-90bb6b76261d': {
# Yes, the return result format of get_per_instance_usage
# is indeed this stupid and redundant. Also note that the
# libvirt driver just returns an empty dict always for this
# method and so who the heck knows whether this stuff
# actually works.
'uuid': '71ed7ef6-9d2e-4c65-9f4e-90bb6b76261d',
'memory_mb': 64
}
}
vd = self.driver_mock
vd.get_per_instance_usage.return_value = orphaned_usages
update_mock = self._update_available_resources()
get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node')
expected_resources = {
# host is added in update_available_resources()
# before calling _update()
'host': 'fake-host',
'host_ip': 'fake-ip',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
'hypervisor_hostname': 'fakehost',
'free_disk_gb': 6,
'hypervisor_version': 0,
'local_gb': 6,
'free_ram_mb': 448, # 512 - 64 orphaned usage
'memory_mb_used': 64,
'pci_device_pools': [],
'vcpus_used': 0,
'hypervisor_type': 'fake',
'local_gb_used': 0,
'memory_mb': 512,
'current_workload': 0,
'vcpus': 4,
# Yep, for some reason, orphaned instances are not counted
# as running VMs...
'running_vms': 0
}
update_mock.assert_called_once_with(mock.sentinel.ctx,
expected_resources)
@mock.patch('nova.objects.Service.get_by_compute_host')
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_no_instances_source_migration(self, get_mock, get_inst_mock,
migr_mock, get_cn_mock,
service_mock):
# We test the behavior of update_available_resource() when
# there is an active migration that involves this compute node
# as the source host not the destination host, and the resource
# tracker does not have any instances assigned to it. This is
# the case when a migration from this compute host to another
# has been completed, but the user has not confirmed the resize
# yet, so the resource tracker must continue to keep the resources
# for the original instance type available on the source compute
# node in case of a revert of the resize.
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
self._setup_rt()
get_mock.return_value = []
migr_obj = _MIGRATION_FIXTURES['source-only']
migr_mock.return_value = [migr_obj]
get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0]
service_mock.return_value = _SERVICE_FIXTURE
# Migration.instance property is accessed in the migration
# processing code, and this property calls
# objects.Instance.get_by_uuid, so we have the migration return
inst_uuid = migr_obj.instance_uuid
get_inst_mock.return_value = _MIGRATION_INSTANCE_FIXTURES[inst_uuid]
update_mock = self._update_available_resources()
get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node')
expected_resources = {
# host is added in update_available_resources()
# before calling _update()
'host': 'fake-host',
'host_ip': 'fake-ip',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
'hypervisor_hostname': 'fakehost',
'free_disk_gb': 5,
'hypervisor_version': 0,
'local_gb': 6,
'free_ram_mb': 384, # 512 total - 128 for possible revert of orig
'memory_mb_used': 128, # 128 possible revert amount
'pci_device_pools': [],
'vcpus_used': 0,
'hypervisor_type': 'fake',
'local_gb_used': 1,
'memory_mb': 512,
'current_workload': 0,
'vcpus': 4,
'running_vms': 0
}
update_mock.assert_called_once_with(mock.sentinel.ctx,
expected_resources)
@mock.patch('nova.objects.Service.get_by_compute_host')
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_no_instances_dest_migration(self, get_mock, get_inst_mock,
migr_mock, get_cn_mock, service_mock):
# We test the behavior of update_available_resource() when
# there is an active migration that involves this compute node
# as the destination host not the source host, and the resource
# tracker does not yet have any instances assigned to it. This is
# the case when a migration to this compute host from another host
# is in progress, but the user has not confirmed the resize
# yet, so the resource tracker must reserve the resources
# for the possibly-to-be-confirmed instance's instance type
# node in case of a confirm of the resize.
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
self._setup_rt()
get_mock.return_value = []
migr_obj = _MIGRATION_FIXTURES['dest-only']
migr_mock.return_value = [migr_obj]
inst_uuid = migr_obj.instance_uuid
get_inst_mock.return_value = _MIGRATION_INSTANCE_FIXTURES[inst_uuid]
get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0]
service_mock.return_value = _SERVICE_FIXTURE
update_mock = self._update_available_resources()
get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node')
expected_resources = {
# host is added in update_available_resources()
# before calling _update()
'host': 'fake-host',
'host_ip': 'fake-ip',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
'hypervisor_hostname': 'fakehost',
'free_disk_gb': 1,
'hypervisor_version': 0,
'local_gb': 6,
'free_ram_mb': 256, # 512 total - 256 for possible confirm of new
'memory_mb_used': 256, # 256 possible confirmed amount
'pci_device_pools': [],
'vcpus_used': 0, # See NOTE(jaypipes) above about why this is 0
'hypervisor_type': 'fake',
'local_gb_used': 5,
'memory_mb': 512,
'current_workload': 0,
'vcpus': 4,
'running_vms': 0
}
update_mock.assert_called_once_with(mock.sentinel.ctx,
expected_resources)
@mock.patch('nova.objects.Service.get_by_compute_host')
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_some_instances_source_and_dest_migration(self, get_mock,
get_inst_mock, migr_mock,
get_cn_mock,
service_mock):
# We test the behavior of update_available_resource() when
# there is an active migration that involves this compute node
# as the destination host AND the source host, and the resource
# tracker has a few instances assigned to it, including the
# instance that is resizing to this same compute node. The tracking
# of resource amounts takes into account both the old and new
# resize instance types as taking up space on the node.
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
self._setup_rt()
migr_obj = _MIGRATION_FIXTURES['source-and-dest']
migr_mock.return_value = [migr_obj]
service_mock.return_value = _SERVICE_FIXTURE
inst_uuid = migr_obj.instance_uuid
# The resizing instance has already had its instance type
# changed to the *new* instance type (the bigger one, instance type 2)
resizing_instance = _MIGRATION_INSTANCE_FIXTURES[inst_uuid]
all_instances = _INSTANCE_FIXTURES + [resizing_instance]
get_mock.return_value = all_instances
get_inst_mock.return_value = resizing_instance
get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0]
update_mock = self._update_available_resources()
get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node')
expected_resources = {
# host is added in update_available_resources()
# before calling _update()
'host': 'fake-host',
'host_ip': 'fake-ip',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
'hypervisor_hostname': 'fakehost',
# 6 total - 1G existing - 5G new flav - 1G old flav
'free_disk_gb': -1,
'hypervisor_version': 0,
'local_gb': 6,
# 512 total - 128 existing - 256 new flav - 128 old flav
'free_ram_mb': 0,
'memory_mb_used': 512, # 128 exist + 256 new flav + 128 old flav
'pci_device_pools': [],
# See NOTE(jaypipes) above for reason why this isn't accurate until
# _update() is called.
'vcpus_used': 0,
'hypervisor_type': 'fake',
'local_gb_used': 7, # 1G existing, 5G new flav + 1 old flav
'memory_mb': 512,
'current_workload': 1, # One migrating instance...
'vcpus': 4,
'running_vms': 2
}
update_mock.assert_called_once_with(mock.sentinel.ctx,
expected_resources)
class TestInitComputeNode(BaseTestCase):
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
def test_no_op_init_compute_node(self, get_mock):
self._setup_rt()
capi = self.cond_api_mock
service_mock = capi.service_get_by_compute_host
create_mock = capi.compute_node_create
resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES)
compute_node = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
self.rt.compute_node = compute_node
self.rt._init_compute_node(mock.sentinel.ctx, resources)
self.assertFalse(service_mock.called)
self.assertFalse(get_mock.called)
self.assertFalse(create_mock.called)
self.assertFalse(self.rt.disabled)
@mock.patch('nova.objects.Service.get_by_compute_host')
def test_no_found_service_disabled(self, service_mock):
self._setup_rt()
service_mock.side_effect = exc.NotFound
resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES)
self.rt._init_compute_node(mock.sentinel.ctx, resources)
self.assertTrue(self.rt.disabled)
self.assertIsNone(self.rt.compute_node)
@mock.patch('nova.objects.Service.get_by_compute_host')
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
def test_compute_node_loaded(self, get_mock, service_mock):
self._setup_rt()
def fake_get_node(_ctx, host, node):
res = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
return res
capi = self.cond_api_mock
service_mock.return_value = _SERVICE_FIXTURE
get_mock.side_effect = fake_get_node
create_mock = capi.compute_node_create
resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES)
self.rt._init_compute_node(mock.sentinel.ctx, resources)
service_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host')
get_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node')
self.assertFalse(create_mock.called)
self.assertFalse(self.rt.disabled)
@mock.patch('nova.objects.Service.get_by_compute_host')
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
def test_compute_node_created_on_empty(self, get_mock, service_mock):
self._setup_rt()
def fake_create_node(_ctx, resources):
res = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
res.update(resources)
return res
capi = self.cond_api_mock
create_node_mock = capi.compute_node_create
create_node_mock.side_effect = fake_create_node
service_obj = _SERVICE_FIXTURE
service_mock.return_value = service_obj
get_mock.side_effect = exc.NotFound
resources = {
'host_ip': 'fake-ip',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
'hypervisor_hostname': 'fakehost',
'free_disk_gb': 6,
'hypervisor_version': 0,
'local_gb': 6,
'free_ram_mb': 512,
'memory_mb_used': 0,
'pci_device_pools': [],
'vcpus_used': 0,
'hypervisor_type': 'fake',
'local_gb_used': 0,
'memory_mb': 512,
'current_workload': 0,
'vcpus': 4,
'running_vms': 0,
'pci_passthrough_devices': '[]'
}
# We need to do this because _update() actually modifies
# the supplied dictionary :(
expected_resources = copy.deepcopy(resources)
# NOTE(pmurray): This will go away when the ComputeNode object is used
expected_resources['stats'] = '{}'
# NOTE(pmurray): no intial values are calculated before the initial
# creation. vcpus is derived from ERT resources, so this means its
# value will be 0
expected_resources['vcpus'] = 0
# NOTE(jaypipes): This will go away once
# detach-compute-node-from-service blueprint is done
expected_resources['service_id'] = 1
# NOTE(sbauza): ResourceTracker adds host field
expected_resources['host'] = 'fake-host'
# pci_passthrough_devices should is not held in compute nodes
del expected_resources['pci_passthrough_devices']
self.rt._init_compute_node(mock.sentinel.ctx, resources)
self.assertFalse(self.rt.disabled)
service_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host')
get_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node')
create_node_mock.assert_called_once_with(mock.sentinel.ctx,
expected_resources)
class TestUpdateComputeNode(BaseTestCase):
@mock.patch('nova.objects.Service.get_by_compute_host')
def test_existing_compute_node_updated_same_resources(self, service_mock):
self._setup_rt()
self.rt.compute_node = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
capi = self.cond_api_mock
create_node_mock = capi.compute_node_create
# This is the same set of resources as the fixture, deliberately. We
# are checking below to see that update_resource_stats() is not
# needlessly called when the resources don't actually change.
resources = {
# host is added in update_available_resources()
# before calling _update()
'host': 'fake-host',
'host_ip': 'fake-ip',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
'hypervisor_hostname': 'fakehost',
'free_disk_gb': 6,
'hypervisor_version': 0,
'local_gb': 6,
'free_ram_mb': 512,
'memory_mb_used': 0,
'pci_device_pools': [],
'vcpus_used': 0,
'hypervisor_type': 'fake',
'local_gb_used': 0,
'memory_mb': 512,
'current_workload': 0,
'vcpus': 4,
'running_vms': 0
}
orig_resources = copy.deepcopy(resources)
self.rt._update(mock.sentinel.ctx, resources)
self.assertFalse(self.rt.disabled)
self.assertFalse(service_mock.called)
self.assertFalse(create_node_mock.called)
# The above call to _update() will populate the
# RT.old_resources collection with the resources. Here, we check that
# if we call _update() again with the same resources, that
# the scheduler client won't be called again to update those
# (unchanged) resources for the compute node
self.sched_client_mock.reset_mock()
urs_mock = self.sched_client_mock.update_resource_stats
self.rt._update(mock.sentinel.ctx, orig_resources)
self.assertFalse(urs_mock.called)
@mock.patch('nova.objects.Service.get_by_compute_host')
def test_existing_compute_node_updated_new_resources(self, service_mock):
self._setup_rt()
self.rt.compute_node = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
capi = self.cond_api_mock
create_node_mock = capi.compute_node_create
# Deliberately changing local_gb_used, vcpus_used, and memory_mb_used
# below to be different from the compute node fixture's base usages.
# We want to check that the code paths update the stored compute node
# usage records with what is supplied to _update().
resources = {
# host is added in update_available_resources()
# before calling _update()
'host': 'fake-host',
'host_ip': 'fake-ip',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
'hypervisor_hostname': 'fakehost',
'free_disk_gb': 2,
'hypervisor_version': 0,
'local_gb': 6,
'free_ram_mb': 384,
'memory_mb_used': 128,
'pci_device_pools': [],
'vcpus_used': 2,
'hypervisor_type': 'fake',
'local_gb_used': 4,
'memory_mb': 512,
'current_workload': 0,
'vcpus': 4,
'running_vms': 0
}
expected_resources = copy.deepcopy(resources)
expected_resources['id'] = 1
expected_resources['stats'] = '{}'
self.rt.ext_resources_handler.reset_resources(resources,
self.rt.driver)
# This emulates the behavior that occurs in the
# RT.update_available_resource() method, which updates resource
# information in the ERT differently than all other resources.
self.rt.ext_resources_handler.update_from_instance(dict(vcpus=2))
self.rt._update(mock.sentinel.ctx, resources)
self.assertFalse(self.rt.disabled)
self.assertFalse(service_mock.called)
self.assertFalse(create_node_mock.called)
urs_mock = self.sched_client_mock.update_resource_stats
urs_mock.assert_called_once_with(mock.sentinel.ctx,
('fake-host', 'fake-node'),
expected_resources)
class TestInstanceClaim(BaseTestCase):
def setUp(self):
super(TestInstanceClaim, self).setUp()
self._setup_rt()
self.rt.compute_node = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
# not using mock.sentinel.ctx because instance_claim calls #elevated
self.ctx = mock.MagicMock()
self.elevated = mock.MagicMock()
self.ctx.elevated.return_value = self.elevated
self.instance = copy.deepcopy(_INSTANCE_FIXTURES[0])
def assertEqualNUMAHostTopology(self, expected, got):
attrs = ('cpuset', 'memory', 'id', 'cpu_usage', 'memory_usage')
if None in (expected, got):
if expected != got:
raise AssertionError("Topologies don't match. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
else:
return
if len(expected) != len(got):
raise AssertionError("Topologies don't match due to different "
"number of cells. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
for exp_cell, got_cell in zip(expected.cells, got.cells):
for attr in attrs:
if getattr(exp_cell, attr) != getattr(got_cell, attr):
raise AssertionError("Topologies don't match. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
def test_claim_disabled(self):
self.rt.compute_node = None
self.assertTrue(self.rt.disabled)
claim = self.rt.instance_claim(mock.sentinel.ctx, self.instance, None)
self.assertEqual(self.rt.host, self.instance.host)
self.assertEqual(self.rt.host, self.instance.launched_on)
self.assertEqual(self.rt.nodename, self.instance.node)
self.assertIsInstance(claim, claims.NopClaim)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_claim(self, migr_mock, pci_mock):
self.assertFalse(self.rt.disabled)
pci_mock.return_value = objects.InstancePCIRequests(requests=[])
disk_used = self.instance.root_gb + self.instance.ephemeral_gb
expected = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
expected.update({
'local_gb_used': disk_used,
'memory_mb_used': self.instance.memory_mb,
'free_disk_gb': expected['local_gb'] - disk_used,
"free_ram_mb": expected['memory_mb'] - self.instance.memory_mb,
'running_vms': 1,
# 'vcpus_used': 0, # vcpus are not claimed
'pci_device_pools': [],
})
with mock.patch.object(self.rt, '_update') as update_mock:
self.rt.instance_claim(self.ctx, self.instance, None)
update_mock.assert_called_once_with(self.elevated, expected)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_claim_limits(self, migr_mock, pci_mock):
self.assertFalse(self.rt.disabled)
pci_mock.return_value = objects.InstancePCIRequests(requests=[])
good_limits = {
'memory_mb': _COMPUTE_NODE_FIXTURES[0]['memory_mb'],
'disk_gb': _COMPUTE_NODE_FIXTURES[0]['local_gb'],
'vcpu': _COMPUTE_NODE_FIXTURES[0]['vcpus'],
}
for key in good_limits.keys():
bad_limits = copy.deepcopy(good_limits)
bad_limits[key] = 0
self.assertRaises(exc.ComputeResourcesUnavailable,
self.rt.instance_claim,
self.ctx, self.instance, bad_limits)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_claim_numa(self, migr_mock, pci_mock):
self.assertFalse(self.rt.disabled)
pci_mock.return_value = objects.InstancePCIRequests(requests=[])
self.instance.numa_topology = _INSTANCE_NUMA_TOPOLOGIES['2mb']
host_topology = _NUMA_HOST_TOPOLOGIES['2mb']
self.rt.compute_node['numa_topology'] = host_topology._to_json()
limits = {'numa_topology': _NUMA_LIMIT_TOPOLOGIES['2mb']}
expected_numa = copy.deepcopy(host_topology)
for cell in expected_numa.cells:
cell.memory_usage += _2MB
cell.cpu_usage += 1
with mock.patch.object(self.rt, '_update') as update_mock:
self.rt.instance_claim(self.ctx, self.instance, limits)
self.assertTrue(update_mock.called)
updated_compute_node = update_mock.call_args[0][1]
new_numa = updated_compute_node['numa_topology']
new_numa = objects.NUMATopology.obj_from_db_obj(new_numa)
self.assertEqualNUMAHostTopology(expected_numa, new_numa)
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
class TestResizeClaim(BaseTestCase):
def setUp(self):
super(TestResizeClaim, self).setUp()
self._setup_rt()
self.rt.compute_node = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
self.instance = copy.deepcopy(_INSTANCE_FIXTURES[0])
self.instance.system_metadata = _INSTANCE_TYPE_SYS_META[1]
self.flavor = _INSTANCE_TYPE_FIXTURES[1]
self.limits = {}
# not using mock.sentinel.ctx because resize_claim calls #elevated
self.ctx = mock.MagicMock()
self.elevated = mock.MagicMock()
self.ctx.elevated.return_value = self.elevated
# Initialise extensible resource trackers
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
with contextlib.nested(
mock.patch('nova.objects.InstanceList.get_by_host_and_node'),
mock.patch('nova.objects.MigrationList.'
'get_in_progress_by_host_and_node')
) as (inst_list_mock, migr_mock):
inst_list_mock.return_value = objects.InstanceList(objects=[])
migr_mock.return_value = objects.MigrationList(objects=[])
self.rt.update_available_resource(self.ctx)
def register_mocks(self, pci_mock, inst_list_mock, inst_by_uuid,
migr_mock):
pci_mock.return_value = objects.InstancePCIRequests(requests=[])
self.inst_list_mock = inst_list_mock
self.inst_by_uuid = inst_by_uuid
self.migr_mock = migr_mock
def audit(self, rt, instances, migrations, migr_inst):
self.inst_list_mock.return_value = \
objects.InstanceList(objects=instances)
self.migr_mock.return_value = \
objects.MigrationList(objects=migrations)
self.inst_by_uuid.return_value = migr_inst
rt.update_available_resource(self.ctx)
def assertEqual(self, expected, actual):
if type(expected) != dict or type(actual) != dict:
super(TestResizeClaim, self).assertEqual(expected, actual)
return
fail = False
for k, e in expected.items():
a = actual[k]
if e != a:
print("%s: %s != %s" % (k, e, a))
fail = True
if fail:
self.fail()
def adjust_expected(self, expected, flavor):
disk_used = flavor['root_gb'] + flavor['ephemeral_gb']
expected['free_disk_gb'] -= disk_used
expected['local_gb_used'] += disk_used
expected['free_ram_mb'] -= flavor['memory_mb']
expected['memory_mb_used'] += flavor['memory_mb']
expected['vcpus_used'] += flavor['vcpus']
@mock.patch('nova.objects.Flavor.get_by_id')
def test_claim(self, flavor_mock, pci_mock, inst_list_mock, inst_by_uuid,
migr_mock):
"""Resize self.instance and check that the expected quantities of each
resource have been consumed.
"""
self.register_mocks(pci_mock, inst_list_mock, inst_by_uuid, migr_mock)
self.driver_mock.get_host_ip_addr.return_value = "fake-ip"
flavor_mock.return_value = objects.Flavor(**self.flavor)
expected = copy.deepcopy(self.rt.compute_node)
self.adjust_expected(expected, self.flavor)
with mock.patch.object(self.rt, '_create_migration') as migr_mock:
migr_mock.return_value = _MIGRATION_FIXTURES['source-only']
claim = self.rt.resize_claim(
self.ctx, self.instance, self.flavor, None)
self.assertIsInstance(claim, claims.ResizeClaim)
self.assertEqual(expected, self.rt.compute_node)
def test_same_host(self, pci_mock, inst_list_mock, inst_by_uuid,
migr_mock):
"""Resize self.instance to the same host but with a different flavor.
Then abort the claim. Check that the same amount of resources are
available afterwards as we started with.
"""
self.register_mocks(pci_mock, inst_list_mock, inst_by_uuid, migr_mock)
migr_obj = _MIGRATION_FIXTURES['source-and-dest']
self.instance = _MIGRATION_INSTANCE_FIXTURES[migr_obj['instance_uuid']]
self.rt.instance_claim(self.ctx, self.instance, None)
expected = copy.deepcopy(self.rt.compute_node)
with mock.patch.object(self.rt, '_create_migration') as migr_mock:
migr_mock.return_value = migr_obj
claim = self.rt.resize_claim(self.ctx, self.instance,
_INSTANCE_TYPE_FIXTURES[1], None)
self.audit(self.rt, [self.instance], [migr_obj], self.instance)
self.assertNotEqual(expected, self.rt.compute_node)
claim.abort()
self.assertEqual(expected, self.rt.compute_node)
def test_revert_reserve_source(
self, pci_mock, inst_list_mock, inst_by_uuid, migr_mock):
"""Check that the source node of an instance migration reserves
resources until the migration has completed, even if the migration is
reverted.
"""
self.register_mocks(pci_mock, inst_list_mock, inst_by_uuid, migr_mock)
# Get our migrations, instances and itypes in a row
src_migr = _MIGRATION_FIXTURES['source-only']
src_instance = _MIGRATION_INSTANCE_FIXTURES[src_migr['instance_uuid']]
old_itype = _INSTANCE_TYPE_FIXTURES[src_migr['old_instance_type_id']]
dst_migr = _MIGRATION_FIXTURES['dest-only']
dst_instance = _MIGRATION_INSTANCE_FIXTURES[dst_migr['instance_uuid']]
new_itype = _INSTANCE_TYPE_FIXTURES[dst_migr['new_instance_type_id']]
# Set up the destination resource tracker
# update_available_resource to initialise extensible resource trackers
src_rt = self.rt
(dst_rt, _, _) = setup_rt("other-host", "other-node")
dst_rt.compute_node = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
inst_list_mock.return_value = objects.InstanceList(objects=[])
dst_rt.update_available_resource(self.ctx)
# Register the instance with dst_rt
expected = copy.deepcopy(dst_rt.compute_node)
del expected['stats']
dst_rt.instance_claim(self.ctx, dst_instance)
self.adjust_expected(expected, new_itype)
expected_stats = {'num_task_resize_migrating': 1,
'io_workload': 1,
'num_instances': 1,
'num_proj_fake-project': 1,
'num_vm_active': 1,
'num_os_type_fake-os': 1}
expected['current_workload'] = 1
expected['running_vms'] = 1
actual_stats = dst_rt.compute_node.pop('stats')
actual_stats = jsonutils.loads(actual_stats)
self.assertEqual(expected_stats, actual_stats)
self.assertEqual(expected, dst_rt.compute_node)
# Provide the migration via a mock, then audit dst_rt to check that
# the instance + migration resources are not double-counted
self.audit(dst_rt, [dst_instance], [dst_migr], dst_instance)
actual_stats = dst_rt.compute_node.pop('stats')
actual_stats = jsonutils.loads(actual_stats)
self.assertEqual(expected_stats, actual_stats)
self.assertEqual(expected, dst_rt.compute_node)
# Audit src_rt with src_migr
expected = copy.deepcopy(src_rt.compute_node)
self.adjust_expected(expected, old_itype)
self.audit(src_rt, [], [src_migr], src_instance)
self.assertEqual(expected, src_rt.compute_node)
# Flag the instance as reverting and re-audit
src_instance['vm_state'] = vm_states.RESIZED
src_instance['task_state'] = task_states.RESIZE_REVERTING
self.audit(src_rt, [], [src_migr], src_instance)
self.assertEqual(expected, src_rt.compute_node)
def test_dupe_filter(self, pci_mock, inst_list_mock, inst_by_uuid,
migr_mock):
self.register_mocks(pci_mock, inst_list_mock, inst_by_uuid, migr_mock)
migr_obj = _MIGRATION_FIXTURES['source-and-dest']
# This is good enough to prevent a lazy-load; value is unimportant
migr_obj['updated_at'] = None
self.instance = _MIGRATION_INSTANCE_FIXTURES[migr_obj['instance_uuid']]
self.audit(self.rt, [], [migr_obj, migr_obj], self.instance)
self.assertEqual(1, len(self.rt.tracked_migrations))
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensor utility functions.
@@assert_same_float_dtype
@@assert_scalar_int
@@convert_to_tensor_or_sparse_tensor
@@local_variable
@@reduce_sum_n
@@with_shape
@@with_same_shape
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
__all__ = [
'assert_same_float_dtype', 'assert_scalar_int',
'convert_to_tensor_or_sparse_tensor', 'local_variable', 'reduce_sum_n',
'with_shape', 'with_same_shape',
]
def _assert_same_base_type(items, expected_type=None):
"""Asserts all items are of the same base type.
Args:
items: List of graph items (e.g., `Variable`, `Tensor`, `SparseTensor`,
`Operation`, or `IndexedSlices`). Can include `None` elements, which
will be ignored.
expected_type: Expected type. If not specified, assert all items are
of the same base type.
Returns:
Validated type, or none if neither expected_type nor items provided.
Raises:
ValueError: If any types do not match.
"""
original_item_str = None
for item in items:
if item is not None:
item_type = item.dtype.base_dtype
if not expected_type:
expected_type = item_type
original_item_str = item.name if hasattr(item, 'name') else str(item)
elif expected_type != item_type:
raise ValueError('%s, type=%s, must be of the same type (%s)%s.' % (
item.name if hasattr(item, 'name') else str(item),
item_type, expected_type,
(' as %s' % original_item_str) if original_item_str else ''))
return expected_type
def assert_same_float_dtype(tensors=None, dtype=None):
"""Validate and return float type based on `tensors` and `dtype`.
For ops such as matrix multiplication, inputs and weights must be of the
same float type. This function validates that all `tensors` are the same type,
validates that type is `dtype` (if supplied), and returns the type. Type must
be `dtypes.float32` or `dtypes.float64`. If neither `tensors` nor
`dtype` is supplied, default to `dtypes.float32`.
Args:
tensors: Tensors of input values. Can include `None` elements, which will be
ignored.
dtype: Expected type.
Returns:
Validated type.
Raises:
ValueError: if neither `tensors` nor `dtype` is supplied, or result is not
float.
"""
if tensors:
dtype = _assert_same_base_type(tensors, dtype)
if not dtype:
dtype = dtypes.float32
elif not dtype.is_floating:
raise ValueError('Expected float, got %s.' % dtype)
return dtype
def assert_scalar_int(tensor):
"""Assert `tensor` is 0-D, of type `tf.int32` or `tf.int64`.
Args:
tensor: Tensor to test.
Returns:
`tensor`, for chaining.
Raises:
ValueError: if `tensor` is not 0-D, of type `tf.int32` or `tf.int64`.
"""
data_type = tensor.dtype
if data_type.base_dtype not in [dtypes.int32, dtypes.int64]:
raise ValueError('Unexpected type %s for %s.' % (data_type, tensor.name))
shape = tensor.get_shape()
if shape.ndims != 0:
raise ValueError('Unexpected shape %s for %s.' % (shape, tensor.name))
return tensor
# TODO(ptucker): Move to tf.variables?
def local_variable(initial_value, validate_shape=True, name=None):
"""Create variable and add it to `GraphKeys.LOCAL_VARIABLES` collection.
Args:
initial_value: See variables.Variable.__init__.
validate_shape: See variables.Variable.__init__.
name: See variables.Variable.__init__.
Returns:
New variable.
"""
return variables.Variable(
initial_value, trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
validate_shape=validate_shape, name=name)
def reduce_sum_n(tensors, name=None):
"""Reduce tensors to a scalar sum.
This reduces each tensor in `tensors` to a scalar via `tf.reduce_sum`, then
adds them via `tf.add_n`.
Args:
tensors: List of tensors, all of the same numeric type.
name: Tensor name, and scope for all other ops.
Returns:
Total loss tensor, or None if no losses have been configured.
Raises:
ValueError: if `losses` is missing or empty.
"""
if not tensors:
raise ValueError('No tensors provided.')
tensors = [math_ops.reduce_sum(t, name='%s/sum' % t.op.name) for t in tensors]
if len(tensors) == 1:
return tensors[0]
with ops.op_scope(tensors, name, 'reduce_sum_n') as scope:
return math_ops.add_n(tensors, name=scope)
def _all_equal(tensor0, tensor1):
with ops.op_scope([tensor0, tensor1], 'all_equal') as scope:
return math_ops.reduce_all(
math_ops.equal(tensor0, tensor1, name='equal'), name=scope)
def _is_rank(expected_rank, actual_tensor):
"""Returns whether actual_tensor's rank is expected_rank.
Args:
expected_rank: Integer defining the expected rank, or tensor of same.
actual_tensor: Tensor to test.
Returns:
New tensor.
"""
with ops.op_scope([actual_tensor], 'is_rank') as scope:
expected = ops.convert_to_tensor(expected_rank, name='expected')
actual = array_ops.rank(actual_tensor, name='actual')
return math_ops.equal(expected, actual, name=scope)
def _is_shape(expected_shape, actual_tensor, actual_shape=None):
"""Returns whether actual_tensor's shape is expected_shape.
Args:
expected_shape: Integer list defining the expected shape, or tensor of same.
actual_tensor: Tensor to test.
actual_shape: Shape of actual_tensor, if we already have it.
Returns:
New tensor.
"""
with ops.op_scope([actual_tensor], 'is_shape') as scope:
is_rank = _is_rank(array_ops.size(expected_shape), actual_tensor)
if actual_shape is None:
actual_shape = array_ops.shape(actual_tensor, name='actual')
shape_equal = _all_equal(
ops.convert_to_tensor(expected_shape, name='expected'),
actual_shape)
return math_ops.logical_and(is_rank, shape_equal, name=scope)
def _assert_shape_op(expected_shape, actual_tensor):
"""Asserts actual_tensor's shape is expected_shape.
Args:
expected_shape: List of integers defining the expected shape, or tensor of
same.
actual_tensor: Tensor to test.
Returns:
New assert tensor.
"""
with ops.op_scope([actual_tensor], 'assert_shape') as scope:
actual_shape = array_ops.shape(actual_tensor, name='actual')
is_shape = _is_shape(expected_shape, actual_tensor, actual_shape)
return logging_ops.Assert(
is_shape, [
'Wrong shape for %s [expected] [actual].' % actual_tensor.name,
expected_shape,
actual_shape
], name=scope)
def with_same_shape(expected_tensor, tensor):
"""Assert tensors are the same shape, from the same graph.
Args:
expected_tensor: Tensor with expected shape.
tensor: Tensor of actual values.
Returns:
Tuple of (actual_tensor, label_tensor), possibly with assert ops added.
"""
with ops.op_scope([expected_tensor, tensor], '%s/' % tensor.op.name):
tensor_shape = expected_tensor.get_shape()
expected_shape = (
tensor_shape.as_list() if tensor_shape.is_fully_defined()
else array_ops.shape(expected_tensor, name='expected_shape'))
return with_shape(expected_shape, tensor)
def _is_tensor(t):
return isinstance(t, (ops.Tensor, ops.SparseTensor, variables.Variable))
def with_shape(expected_shape, tensor):
"""Asserts tensor has expected shape.
If tensor shape and expected_shape, are fully defined, assert they match.
Otherwise, add assert op that will validate the shape when tensor is
evaluated, and set shape on tensor.
Args:
expected_shape: Expected shape to assert, as a 1D array of ints, or tensor
of same.
tensor: Tensor whose shape we're validating.
Returns:
tensor, perhaps with a dependent assert operation.
Raises:
ValueError: if tensor has an invalid shape.
"""
if isinstance(tensor, ops.SparseTensor):
raise ValueError('SparseTensor not supported.')
# Shape type must be 1D int32.
if _is_tensor(expected_shape):
if expected_shape.dtype.base_dtype != dtypes.int32:
raise ValueError(
'Invalid dtype %s for shape %s expected of tensor %s.' % (
expected_shape.dtype, expected_shape, tensor.name))
if isinstance(expected_shape, (list, tuple)):
if not expected_shape:
expected_shape = np.asarray([], dtype=np.int32)
else:
np_expected_shape = np.asarray(expected_shape)
expected_shape = (
np.asarray(expected_shape, dtype=np.int32)
if np_expected_shape.dtype == np.int64 else np_expected_shape)
if isinstance(expected_shape, np.ndarray):
if expected_shape.ndim > 1:
raise ValueError(
'Invalid rank %s for shape %s expected of tensor %s.' % (
expected_shape.ndim, expected_shape, tensor.name))
if expected_shape.dtype != np.int32:
raise ValueError(
'Invalid dtype %s for shape %s expected of tensor %s.' % (
expected_shape.dtype, expected_shape, tensor.name))
actual_shape = tensor.get_shape()
if not actual_shape.is_fully_defined() or _is_tensor(expected_shape):
with ops.op_scope([tensor], '%s/' % tensor.op.name):
if not _is_tensor(expected_shape) and (len(expected_shape) < 1):
# TODO(irving): Remove scalar special case
return array_ops.reshape(tensor, [])
with ops.control_dependencies([_assert_shape_op(expected_shape, tensor)]):
result = array_ops.identity(tensor)
if not _is_tensor(expected_shape):
result.set_shape(expected_shape)
return result
if (not _is_tensor(expected_shape) and
not actual_shape.is_compatible_with(expected_shape)):
if (len(expected_shape) < 1) and actual_shape.is_compatible_with([1]):
# TODO(irving): Remove scalar special case.
with ops.op_scope([tensor], '%s/' % tensor.op.name):
return array_ops.reshape(tensor, [])
raise ValueError('Invalid shape for tensor %s, expected %s, got %s.' % (
tensor.name, expected_shape, actual_shape))
return tensor
def convert_to_tensor_or_sparse_tensor(
value, dtype=None, name=None, as_ref=False):
"""Converts value to a `SparseTensor` or `Tensor`.
Args:
value: A `SparseTensor`, `SparseTensorValue`, or an object whose type has a
registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing, the
type is inferred from the type of `value`.
name: Optional name to use if a new `Tensor` is created.
as_ref: True if we want the result as a ref tensor. Only used if a new
`Tensor` is created.
Returns:
A `SparseTensor` or `Tensor` based on `value`.
Raises:
RuntimeError: If result type is incompatible with `dtype`.
"""
if dtype is not None:
dtype = dtypes.as_dtype(dtype)
if isinstance(value, ops.SparseTensorValue):
value = ops.SparseTensor.from_value(value)
if isinstance(value, ops.SparseTensor):
if dtype and not dtype.is_compatible_with(value.dtype):
raise RuntimeError(
'Sparse dtype: requested = %s, actual = %s' % (
dtype.name, value.dtype.name))
return value
return ops.convert_to_tensor(value, dtype=dtype, name=name, as_ref=as_ref)
|
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
#############################################################
# (C) Krzysztof Solek, Mielec 2012
#############################################################
import bpy
import os
###########################################################
#
# Global variables
#
###########################################################
obj_names=[] # names of meshes in "C-suitable" format
vtx = [] # list of dictionaries for each mesh
faces = [] # list of lists
vl = [] # list of vertices for each mesh
nl = [] # list of normals for each mesh
uvl = [] # list of UV coords for each mesh
obj_mtx=[] # list of local transformations for each object
obj_cnt = 0 # object count
max_vcnt= 0 # qty of vertices for biggest mesh
###########################################################
#
# Round values of the 3D vector
#
###########################################################
def r3d(v):
return round(v[0],6), round(v[1],6), round(v[2],6)
###########################################################
#
# Round values of the 2D vector
#
###########################################################
def r2d(v):
return round(v[0],6), round(v[1],6)
###########################################################
#
# Convert object name to be suitable for C definition
#
###########################################################
def clearName(name):
tmp=name; #.upper()
ret=""
for i in tmp:
if (i in " ./\-+#$%^!@"):
ret=ret+"_"
else:
ret=ret+i
return ret
###########################################################
#
# Build data for each object (MESH)
#
###########################################################
def buildData (obj, msh, name):
global obj_cnt
global obj_names # names of meshes in "C-suitable" format
global vtx # list of dictionaries for each mesh
global faces # list of lists
global vl # list of vertices for each mesh
global nl # list of normals for each mesh
global uvl # list of UV coords for each mesh
global obj_mtx # list of local transformations for each object
lvdic = {} # local dictionary
lfl = [] # lcoal faces index list
lvl = [] # local vertex list
lnl = [] # local normal list
luvl = [] # local uv list
lvcnt = 0 # local vertices count
isSmooth = False
hasUV = True # true by default, it will be verified below
print("Building for: %s\n"%obj.name)
if (len(msh.tessface_uv_textures)>0):
if (msh.tessface_uv_textures.active is None):
hasUV=False
else:
hasUV = False
if (hasUV):
activeUV = msh.tessface_uv_textures.active.data
obj_names.append(clearName(name))
obj_cnt+=1
for i,f in enumerate(msh.tessfaces):
isSmooth = f.use_smooth
tmpfaces = []
for j,v in enumerate(f.vertices):
vec = msh.vertices[v].co
vec = r3d(vec)
if (isSmooth): # use vertex normal
nor = msh.vertices[v].normal
else: # use face normal
nor = f.normal
nor = r3d(nor)
if (hasUV):
co = activeUV[i].uv[j]
co = r2d(co)
else:
co = (0.0, 0.0)
key = vec, nor, co
vinx = lvdic.get(key)
if (vinx is None): # vertex not found
lvdic[key] = lvcnt
lvl.append(vec)
lnl.append(nor)
luvl.append(co)
tmpfaces.append(lvcnt)
lvcnt+=1
else:
inx = lvdic[key]
tmpfaces.append(inx)
if (len(tmpfaces)==3):
lfl.append(tmpfaces)
else:
lfl.append([tmpfaces[0], tmpfaces[1], tmpfaces[2]])
lfl.append([tmpfaces[0], tmpfaces[2], tmpfaces[3]])
#update global lists and dictionaries
vtx.append(lvdic)
faces.append(lfl)
vl.append(lvl)
nl.append(lnl)
uvl.append(luvl)
obj_mtx.append(obj.matrix_local)
###########################################################
#
# Save data to C header file
#
###########################################################
def save(filename,scale_to=0):
defName = "_" + clearName( filename ).upper() + "_BLENDER_EXPORT_H_"
file = open(filename, "w", newline="\n")
file.write("#ifndef %s\n" % defName )
file.write("#define %s\n" % defName )
file.write("\n")
structDefinition = """
#ifndef _BLENDER_EXPORT_OBJECT_STRUCT_
#define _BLENDER_EXPORT_OBJECT_STRUCT_
struct BlenderExportedObject{
unsigned int numVertices;
const float *vertices;
const float *normals;
const float *texCoords;
unsigned int numIndices;
const unsigned short *indices;
const float *transform;
};
#endif
"""
file.write( structDefinition )
file.write("#define NUM_OBJECTS %d\n"%obj_cnt)
for index,name in enumerate(obj_names):
camelName = name[0].lower() + name[1:];
upperName = name.upper();
v = vl[ index ]
f = faces[ index ];
uv = uvl[ index ];
n = nl[ index ];
o = obj_mtx[ index ];
numberOfVerticesConstantName = "NUM_" + upperName + "_OBJECT_VERTEX";
file.write( "#define " )
file.write( numberOfVerticesConstantName )
file.write( " " )
file.write ("%d"%len(v))
file.write( "\n" )
numberOfIndicesConstantName = "NUM_" + upperName + "_OBJECT_INDEX";
file.write( "#define " )
file.write( numberOfIndicesConstantName )
file.write( " " )
file.write ("%d"%len(f))
file.write( " * 3\n" )
file.write( "\n" )
spans = [ (0,0), (0,0), (0,0) ];
for j in range(0,len(v)):
vv = v[j]
for axisIndex in range( 0, len( vv ) ):
point = vv[ axisIndex ]
if( j == 0 ):
spans[ axisIndex ] = ( point, point )
else:
( min, max ) = spans[ axisIndex ]
if( point < min ):
min = point
if( point > max ):
max = point
spans[ axisIndex ] = ( min, max )
maxDiff = 0
for axisIndex in range( 0, len( spans ) ):
( min, max ) = spans[ axisIndex ]
diff = max - min
if diff > maxDiff:
maxDiff = diff
if scale_to != 0:
targetSize = scale_to
scale = targetSize / maxDiff
print( "scale: %f -> %f" % (scale,scale_to) )
else:
scale = 1
verticesConstantName = camelName + "Vertices"
file.write( "static const float " + verticesConstantName + "[ " + numberOfVerticesConstantName + " * 3 ] = {\n\t" )
for j in range(0,len(v)):
vv = v[j]
for axisIndex in range( 0, len( vv ) ):
point = vv[ axisIndex ]
( min, max ) = spans[ axisIndex ]
#point -= min
point *= scale
file.write ("%ff, "%point)
file.write( "\n};\n" )
file.write( "\n" )
textureCoordinatesConstantName = camelName + "TexCoords"
file.write( "static const float " + textureCoordinatesConstantName + "[ " + numberOfVerticesConstantName + " * 2 ] = {\n\t" )
for j in range(0,len(uv)):
file.write ("%ff, %ff, "%tuple(uv[j]))
file.write( "\n};\n" )
file.write( "\n" )
normalsConstantName = camelName + "Normals"
file.write( "static const float " + normalsConstantName + "[ " + numberOfVerticesConstantName + " * 3 ] = {\n\t" )
for j in range(0,len(n)):
file.write ("%ff, %ff, %ff, "%tuple(n[j]))
file.write( "\n};\n" )
file.write( "\n" )
indicesConstantName = camelName + "Indices"
file.write( "static const unsigned short " + indicesConstantName + "[ " + numberOfIndicesConstantName + " ] = {\n\t" )
for j in range(0,len(f)):
file.write ("%d, %d, %d, "%tuple(f[j]))
file.write( "\n};\n" )
file.write( "\n" )
transformConstantName = camelName + "Transform"
file.write( "static const float " + transformConstantName + "[ 16 ] = {\n" )
file.write("\t%ff, %ff, %ff, %ff,\n"%tuple(o.col[0]))
file.write("\t%ff, %ff, %ff, %ff,\n"%tuple(o.col[1]))
file.write("\t%ff, %ff, %ff, %ff,\n"%tuple(o.col[2]))
file.write("\t%ff, %ff, %ff, %ff,\n"%tuple(o.col[3]))
file.write( "\n};\n" )
file.write( "\n" )
file.write( "static const BlenderExportedObject %sObject = {\n" % camelName )
file.write( "\t%s,\n" % numberOfVerticesConstantName )
file.write( "\t%s,\n" % verticesConstantName )
file.write( "\t%s,\n" % normalsConstantName )
file.write( "\t%s,\n" % textureCoordinatesConstantName )
file.write( "\t%s,\n" % numberOfIndicesConstantName )
file.write( "\t%s,\n" % indicesConstantName )
file.write( "\t%s,\n" % transformConstantName )
file.write( "};\n" );
file.write( "\n" );
file.write ("#endif")
file.close()
###########################################################
#
# Export MESH object. By default export whole scene
#
###########################################################
def export(filename="untitled.h", entire_scene=True, scale_to=0 ):
global obj_cnt
global obj_names # names of meshes in "C-suitable" format
global vtx # list of dictionaries for each mesh
global faces # list of lists
global vl # list of vertices for each mesh
global nl # list of normals for each mesh
global uvl # list of UV coords for each mesh
global obj_mtx # list of local transformations for each object
print("--------------------------------------------------\n")
print("Starting script:\n")
print(filename)
# clear all gloabl variables
obj_names=[] # names of meshes in "C-suitable" format
vtx = [] # list of dictionaries for each mesh
faces = [] # list of lists
vl = [] # list of vertices for each mesh
nl = [] # list of normals for each mesh
uvl = [] # list of UV coords for each mesh
obj_mtx=[] # list of local transformations for each object
obj_cnt = 0 # object count
max_vcnt= 0 # qty of vertices for biggest mesh
sc = bpy.context.scene # export MESHes from active scene
if (entire_scene):
for o in sc.objects:
if (o.type=="MESH" or o.type=="CURVE"): # export ONLY meshes. and curves, too?
msh = o.to_mesh(sc,True,"PREVIEW") # prepare MESH
buildData(o, msh, o.name)
bpy.data.meshes.remove(msh)
else:
o = sc.objects.active
msh = o.to_mesh(sc,True,'PREVIEW')
buildData(o, msh, o.name)
bpy.data.meshes.remove(msh)
save(filename,scale_to)
print("Done\n")
return {'FINISHED'}
|
|
#!/usr/bin/env python -Es
"""
Script to set up a custom genome for bcbio-nextgen
"""
import argparse
from argparse import ArgumentParser
import os
import toolz as tz
from bcbio.utils import safe_makedir, file_exists, chdir
from bcbio.pipeline import config_utils
from bcbio.distributed.transaction import file_transaction
from bcbio.provenance import do
from bcbio.install import (REMOTES, get_cloudbiolinux, SUPPORTED_GENOMES, SUPPORTED_INDEXES,
_get_data_dir)
from bcbio.galaxy import loc
from fabric.api import *
import subprocess
import sys
import shutil
import yaml
import gffutils
from gffutils.iterators import DataIterator
import tempfile
SEQ_DIR = "seq"
RNASEQ_DIR = "rnaseq"
SRNASEQ_DIR = "srnaseq"
ERCC_BUCKET = "bcbio-data.s3.amazonaws.com/"
def gff3_to_gtf(gff3_file):
dialect = {'field separator': '; ',
'fmt': 'gtf',
'keyval separator': ' ',
'leading semicolon': False,
'multival separator': ',',
'quoted GFF2 values': True,
'order': ['gene_id', 'transcript_id'],
'repeated keys': False,
'trailing semicolon': True}
out_file = os.path.splitext(gff3_file)[0] + ".gtf"
if file_exists(out_file):
return out_file
print "Converting %s to %s." %(gff3_file, out_file)
db = gffutils.create_db(gff3_file, ":memory:")
with file_transaction(out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for feature in DataIterator(db.features_of_type("exon"), dialect=dialect):
transcript_id = feature["Parent"][0]
gene_id = db[transcript_id]["Parent"][0]
attr = {"transcript_id": transcript_id, "gene_id": gene_id}
attributes = gffutils.attributes.Attributes(attr)
feature.attributes = attributes
print >> out_handle, feature
return out_file
def _index_w_command(dir_name, command, ref_file, ext=None):
index_name = os.path.splitext(os.path.basename(ref_file))[0]
if ext is not None: index_name += ext
build_path = os.path.join(os.path.dirname(ref_file), os.pardir)
out_dir = os.path.join(build_path, dir_name)
index_path = os.path.join(out_dir, index_name)
if not env.safe_exists(out_dir):
env.safe_run("mkdir %s" % out_dir)
subprocess.check_call(command.format(ref_file=ref_file,
index_name=index_path), shell=True)
return index_path
def setup_base_directories(genome_dir, name, build, gtf=None):
name_dir = os.path.join(genome_dir, name)
safe_makedir(name_dir)
build_dir = os.path.join(name_dir, build)
safe_makedir(build_dir)
seq_dir = os.path.join(build_dir, SEQ_DIR)
safe_makedir(seq_dir)
if gtf:
gtf_dir = os.path.join(build_dir, RNASEQ_DIR)
safe_makedir(gtf_dir)
return build_dir
def install_fasta_file(build_dir, fasta, build):
out_file = os.path.join(build_dir, SEQ_DIR, build + ".fa")
if not os.path.exists(out_file):
shutil.copyfile(fasta, out_file)
return out_file
def install_gtf_file(build_dir, gtf, build):
out_file = os.path.join(build_dir, RNASEQ_DIR, "ref-transcripts.gtf")
if not os.path.exists(out_file):
shutil.copyfile(gtf, out_file)
return out_file
def install_srna(species, gtf):
out_file = os.path.join(SRNASEQ_DIR, "srna-transcripts.gtf")
safe_makedir(SRNASEQ_DIR)
if not os.path.exists(out_file):
shutil.copyfile(gtf, out_file)
try:
from seqcluster import install
except ImportError:
raise ImportError("install seqcluster first, please.")
with chdir(SRNASEQ_DIR):
hairpin, miRNA = install._install_mirbase()
cmd = ("grep -A 2 {species} {hairpin} | grep -v '\-\-$' | tr U T > hairpin.fa")
do.run(cmd.format(**locals()), "set precursor.")
cmd = ("grep -A 1 {species} {miRNA} > miRNA.str")
do.run(cmd.format(**locals()), "set miRNA.")
shutil.rmtree("mirbase")
return out_file
def append_ercc(gtf_file, fasta_file):
ercc_fa = ERCC_BUCKET + "ERCC92.fasta.gz"
tmp_fa = tempfile.NamedTemporaryFile(delete=False, suffix=".gz").name
append_fa_cmd = "wget {ercc_fa} -O {tmp_fa}; gzip -cd {tmp_fa} >> {fasta_file}"
print append_fa_cmd.format(**locals())
subprocess.check_call(append_fa_cmd.format(**locals()), shell=True)
ercc_gtf = ERCC_BUCKET + "ERCC92.gtf.gz"
tmp_gtf = tempfile.NamedTemporaryFile(delete=False, suffix=".gz").name
append_gtf_cmd = "wget {ercc_gtf} -O {tmp_gtf}; gzip -cd {tmp_gtf} >> {gtf_file}"
print append_gtf_cmd.format(**locals())
subprocess.check_call(append_gtf_cmd.format(**locals()), shell=True)
if __name__ == "__main__":
description = ("Set up a custom genome for bcbio-nextgen. This will "
"place the genome under name/build in the genomes "
"directory in your bcbio-nextgen installation.")
parser = ArgumentParser(description=description)
parser.add_argument("-f", "--fasta", required=True,
help="FASTA file of the genome.")
parser.add_argument("--gff3", default=False, action='store_true',
help="File is a GFF3 file.")
parser.add_argument("-g", "--gtf", default=None,
help="GTF file of the transcriptome")
parser.add_argument("-n", "--name", required=True,
help="Name of organism, for example Hsapiens.")
parser.add_argument("-b", "--build", required=True,
help="Build of genome, for example hg19.")
parser.add_argument("-i", "--indexes", choices=SUPPORTED_INDEXES, nargs="*",
default=["seq"], help="Space separated list of indexes to make")
parser.add_argument("--ercc", action='store_true', default=False,
help="Add ERCC spike-ins.")
parser.add_argument("--mirbase", help="species in mirbase for smallRNAseq data.")
parser.add_argument("--srna_gtf", help="gtf to use for smallRNAseq data.")
args = parser.parse_args()
if not all([args.mirbase, args.srna_gtf]) and any([args.mirbase, args.srna_gtf]):
raise ValueError("--mirbase and --srna_gtf both need a value.")
env.hosts = ["localhost"]
os.environ["PATH"] += os.pathsep + os.path.dirname(sys.executable)
cbl = get_cloudbiolinux(REMOTES)
sys.path.insert(0, cbl["dir"])
genomemod = __import__("cloudbio.biodata", fromlist=["genomes"])
# monkey patch cloudbiolinux to use this indexing command instead
genomes = getattr(genomemod, 'genomes')
genomes._index_w_command = _index_w_command
fabmod = __import__("cloudbio", fromlist=["fabutils"])
fabutils = getattr(fabmod, 'fabutils')
fabutils.configure_runsudo(env)
system_config = os.path.join(_get_data_dir(), "galaxy", "bcbio_system.yaml")
with open(system_config) as in_handle:
config = yaml.load(in_handle)
env.picard_home = config_utils.get_program("picard", config, ptype="dir")
genome_dir = os.path.abspath(os.path.join(_get_data_dir(), "genomes"))
args.fasta = os.path.abspath(args.fasta)
args.gtf = os.path.abspath(args.gtf) if args.gtf else None
if args.gff3:
args.gtf = gff3_to_gtf(args.gtf)
# always make a sequence dictionary
if "seq" not in args.indexes:
args.indexes.append("seq")
env.system_install = genome_dir
prepare_tx = os.path.join(cbl["dir"], "utils", "prepare_tx_gff.py")
print "Creating directories using %s as the base." % (genome_dir)
build_dir = setup_base_directories(genome_dir, args.name, args.build, args.gtf)
os.chdir(build_dir)
print "Genomes will be installed into %s." % (build_dir)
fasta_file = install_fasta_file(build_dir, args.fasta, args.build)
print "Installed genome as %s." % (fasta_file)
if args.gtf:
if "bowtie2" not in args.indexes:
args.indexes.append("bowtie2")
gtf_file = install_gtf_file(build_dir, args.gtf, args.build)
print "Installed GTF as %s." % (gtf_file)
if args.ercc:
print "Appending ERCC sequences to %s and %s." % (gtf_file, fasta_file)
append_ercc(gtf_file, fasta_file)
indexed = {}
for index in args.indexes:
print "Creating the %s index." % (index)
index_fn = genomes.get_index_fn(index)
if not index_fn:
print "Do not know how to make the index %s, skipping." % (index)
continue
indexed[index] = index_fn(fasta_file)
indexed["samtools"] = fasta_file
if args.gtf:
"Preparing transcriptome."
with chdir(os.path.join(build_dir, os.pardir)):
cmd = ("{sys.executable} {prepare_tx} --genome-dir {genome_dir} --gtf {gtf_file} {args.name} {args.build}")
subprocess.check_call(cmd.format(**locals()), shell=True)
if args.mirbase:
"Preparing smallRNA data."
with chdir(os.path.join(build_dir)):
install_srna(args.mirbase, args.srna_gtf)
base_dir = os.path.normpath(os.path.dirname(fasta_file))
resource_file = os.path.join(base_dir, "%s-resources.yaml" % args.build)
print "Dumping genome resources to %s." % resource_file
resource_dict = {"version": 1}
if args.gtf:
transcripts = ["rnaseq", "transcripts"]
mask = ["rnaseq", "transcripts_mask"]
index = ["rnaseq", "transcriptome_index", "tophat"]
dexseq = ["rnaseq", "dexseq"]
refflat = ["rnaseq", "refflat"]
rRNA_fa = ["rnaseq", "rRNA_fa"]
resource_dict = tz.update_in(resource_dict, transcripts,
lambda x: "../rnaseq/ref-transcripts.gtf")
resource_dict = tz.update_in(resource_dict, mask,
lambda x: "../rnaseq/ref-transcripts-mask.gtf")
resource_dict = tz.update_in(resource_dict, index,
lambda x: "../rnaseq/tophat/%s_transcriptome.ver" % args.build)
resource_dict = tz.update_in(resource_dict, refflat,
lambda x: "../rnaseq/ref-transcripts.refFlat")
resource_dict = tz.update_in(resource_dict, dexseq,
lambda x: "../rnaseq/ref-transcripts.dexseq.gff3")
resource_dict = tz.update_in(resource_dict, rRNA_fa,
lambda x: "../rnaseq/rRNA.fa")
if args.mirbase:
srna_gtf = ["srnaseq", "srna-transcripts"]
srna_mirbase = ["srnaseq", "mirbase"]
resource_dict = tz.update_in(resource_dict, srna_gtf,
lambda x: "../srnaseq/srna-transcripts.gtf")
resource_dict = tz.update_in(resource_dict, srna_mirbase,
lambda x: "../srnaseq/hairpin.fa")
# write out resource dictionarry
with file_transaction(resource_file) as tx_resource_file:
with open(tx_resource_file, "w") as out_handle:
out_handle.write(yaml.dump(resource_dict, default_flow_style=False))
print "Updating Galaxy .loc files."
galaxy_base = os.path.join(_get_data_dir(), "galaxy")
for index, index_file in indexed.items():
loc.update_loc_file(galaxy_base, index, args.build, index_file)
|
|
#!/usr/bin/env python
"""
This is an Ansible dynamic inventory for OpenStack.
It requires your OpenStack credentials to be set in clouds.yaml or your shell
environment.
"""
from __future__ import print_function
import argparse
import json
import os
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
from keystoneauth1.exceptions.catalog import EndpointNotFound
import shade
def base_openshift_inventory(cluster_hosts):
'''Set the base openshift inventory.'''
inventory = {}
masters = [server.name for server in cluster_hosts
if server.metadata['host-type'] == 'master']
etcd = [server.name for server in cluster_hosts
if server.metadata['host-type'] == 'etcd']
if not etcd:
etcd = masters
infra_hosts = [server.name for server in cluster_hosts
if server.metadata['host-type'] == 'node' and
server.metadata['sub-host-type'] == 'infra']
app = [server.name for server in cluster_hosts
if server.metadata['host-type'] == 'node' and
server.metadata['sub-host-type'] == 'app']
cns = [server.name for server in cluster_hosts
if server.metadata['host-type'] == 'cns']
load_balancers = [server.name for server in cluster_hosts
if server.metadata['host-type'] == 'lb']
# NOTE: everything that should go to the `[nodes]` group:
nodes = list(set(masters + etcd + infra_hosts + app + cns))
# NOTE: all OpenShift nodes, including `[lb]`, `[nfs]`, etc.:
osev3 = list(set(nodes + load_balancers))
inventory['OSEv3'] = {'hosts': osev3, 'vars': {}}
inventory['openstack_nodes'] = {'hosts': nodes}
inventory['openstack_master_nodes'] = {'hosts': masters}
inventory['openstack_etcd_nodes'] = {'hosts': etcd}
inventory['openstack_infra_nodes'] = {'hosts': infra_hosts}
inventory['openstack_compute_nodes'] = {'hosts': app}
inventory['openstack_cns_nodes'] = {'hosts': cns}
inventory['lb'] = {'hosts': load_balancers}
inventory['localhost'] = {'ansible_connection': 'local'}
return inventory
def get_docker_storage_mountpoints(volumes):
'''Check volumes to see if they're being used for docker storage'''
docker_storage_mountpoints = {}
for volume in volumes:
if volume.metadata.get('purpose') == "openshift_docker_storage":
for attachment in volume.attachments:
if attachment.server_id in docker_storage_mountpoints:
docker_storage_mountpoints[attachment.server_id].append(attachment.device)
else:
docker_storage_mountpoints[attachment.server_id] = [attachment.device]
return docker_storage_mountpoints
def _get_hostvars(server, docker_storage_mountpoints):
ssh_ip_address = server.public_v4 or server.private_v4
hostvars = {
'ansible_host': ssh_ip_address
}
public_v4 = server.public_v4 or server.private_v4
if public_v4:
hostvars['public_v4'] = server.public_v4
hostvars['openshift_public_ip'] = server.public_v4
# TODO(shadower): what about multiple networks?
if server.private_v4:
hostvars['private_v4'] = server.private_v4
hostvars['openshift_ip'] = server.private_v4
# NOTE(shadower): Yes, we set both hostname and IP to the private
# IP address for each node. OpenStack doesn't resolve nodes by
# name at all, so using a hostname here would require an internal
# DNS which would complicate the setup and potentially introduce
# performance issues.
hostvars['openshift_hostname'] = server.metadata.get(
'openshift_hostname', server.private_v4)
hostvars['openshift_public_hostname'] = server.name
if server.metadata['host-type'] == 'cns':
hostvars['glusterfs_devices'] = ['/dev/nvme0n1']
group_name = server.metadata.get('openshift_node_group_name')
hostvars['openshift_node_group_name'] = group_name
# check for attached docker storage volumes
if 'os-extended-volumes:volumes_attached' in server:
if server.id in docker_storage_mountpoints:
hostvars['docker_storage_mountpoints'] = ' '.join(
docker_storage_mountpoints[server.id])
return hostvars
def build_inventory():
'''Build the dynamic inventory.'''
cloud = shade.openstack_cloud()
# Use an environment variable to optionally skip returning the app nodes.
show_compute_nodes = os.environ.get('OPENSTACK_SHOW_COMPUTE_NODES', 'true').lower() == "true"
# TODO(shadower): filter the servers based on the `OPENSHIFT_CLUSTER`
# environment variable.
cluster_hosts = [
server for server in cloud.list_servers()
if 'metadata' in server and 'clusterid' in server.metadata and
(show_compute_nodes or server.metadata.get('sub-host-type') != 'app')]
inventory = base_openshift_inventory(cluster_hosts)
inventory['_meta'] = {'hostvars': {}}
# Some clouds don't have Cinder. That's okay:
try:
volumes = cloud.list_volumes()
except EndpointNotFound:
volumes = []
# cinder volumes used for docker storage
docker_storage_mountpoints = get_docker_storage_mountpoints(volumes)
for server in cluster_hosts:
inventory['_meta']['hostvars'][server.name] = _get_hostvars(
server,
docker_storage_mountpoints)
stout = _get_stack_outputs(cloud)
if stout is not None:
try:
inventory['localhost'].update({
'openshift_openstack_api_lb_provider':
stout['api_lb_provider'],
'openshift_openstack_api_lb_port_id':
stout['api_lb_vip_port_id'],
'openshift_openstack_api_lb_sg_id':
stout['api_lb_sg_id']})
except KeyError:
pass # Not an API load balanced deployment
try:
inventory['OSEv3']['vars'][
'openshift_master_cluster_hostname'] = stout['private_api_ip']
except KeyError:
pass # Internal LB not specified
inventory['localhost']['openshift_openstack_private_api_ip'] = \
stout.get('private_api_ip')
inventory['localhost']['openshift_openstack_public_api_ip'] = \
stout.get('public_api_ip')
inventory['localhost']['openshift_openstack_public_router_ip'] = \
stout.get('public_router_ip')
try:
inventory['OSEv3']['vars'] = _get_kuryr_vars(cloud, stout)
except KeyError:
pass # Not a kuryr deployment
return inventory
def _get_stack_outputs(cloud_client):
"""Returns a dictionary with the stack outputs"""
cluster_name = os.getenv('OPENSHIFT_CLUSTER', 'openshift-cluster')
stack = cloud_client.get_stack(cluster_name)
if stack is None or stack['stack_status'] not in (
'CREATE_COMPLETE', 'UPDATE_COMPLETE'):
return None
data = {}
for output in stack['outputs']:
data[output['output_key']] = output['output_value']
return data
def _get_kuryr_vars(cloud_client, data):
"""Returns a dictionary of Kuryr variables resulting of heat stacking"""
settings = {}
settings['kuryr_openstack_pod_subnet_id'] = data['pod_subnet']
if 'pod_subnet_pool' in data:
settings['kuryr_openstack_pod_subnet_pool_id'] = data[
'pod_subnet_pool']
settings['kuryr_openstack_pod_router_id'] = data['pod_router']
settings['kuryr_openstack_worker_nodes_subnet_id'] = data['vm_subnet']
settings['kuryr_openstack_service_subnet_id'] = data['service_subnet']
settings['kuryr_openstack_pod_sg_id'] = data['pod_access_sg_id']
settings['kuryr_openstack_pod_project_id'] = (
cloud_client.current_project_id)
settings['kuryr_openstack_api_lb_ip'] = data['private_api_ip']
settings['kuryr_openstack_auth_url'] = cloud_client.auth['auth_url']
settings['kuryr_openstack_username'] = cloud_client.auth['username']
settings['kuryr_openstack_password'] = cloud_client.auth['password']
if 'user_domain_id' in cloud_client.auth:
settings['kuryr_openstack_user_domain_name'] = (
cloud_client.auth['user_domain_id'])
else:
settings['kuryr_openstack_user_domain_name'] = (
cloud_client.auth['user_domain_name'])
# FIXME(apuimedo): consolidate kuryr controller credentials into the same
# vars the openstack playbook uses.
settings['kuryr_openstack_project_id'] = cloud_client.current_project_id
if 'project_domain_id' in cloud_client.auth:
settings['kuryr_openstack_project_domain_name'] = (
cloud_client.auth['project_domain_id'])
else:
settings['kuryr_openstack_project_domain_name'] = (
cloud_client.auth['project_domain_name'])
return settings
def output_inventory(inventory, output_file):
"""Outputs inventory into a file in ini format"""
config = ConfigParser.ConfigParser(allow_no_value=True)
host_meta_vars = _get_host_meta_vars_as_dict(inventory)
for key in sorted(inventory.keys()):
if key == 'localhost':
config.add_section('localhost')
config.set('localhost', 'localhost')
config.add_section('localhost:vars')
for var, value in inventory['localhost'].items():
config.set('localhost:vars', var, value)
elif key not in ('localhost', '_meta'):
if 'hosts' in inventory[key]:
config.add_section(key)
for host in inventory[key]['hosts']:
if host in host_meta_vars.keys():
config.set(key, host + " " + host_meta_vars[host])
else:
config.set(key, host)
if 'vars' in inventory[key]:
config.add_section(key + ":vars")
for var, value in inventory[key]['vars'].items():
config.set(key + ":vars", var, value)
with open(output_file, 'w') as configfile:
config.write(configfile)
def _get_host_meta_vars_as_dict(inventory):
"""parse host meta vars from inventory as dict"""
host_meta_vars = {}
if '_meta' in inventory.keys():
if 'hostvars' in inventory['_meta']:
for host in inventory['_meta']['hostvars'].keys():
host_meta_vars[host] = ' '.join(
'{}={}'.format(key, val) for key, val in inventory['_meta']['hostvars'][host].items())
return host_meta_vars
def parse_args():
"""parse arguments to script"""
parser = argparse.ArgumentParser(description="Create ansible inventory.")
parser.add_argument('--static', type=str, default='',
help='File to store a static inventory in.')
parser.add_argument('--list', action="store_true", default=False,
help='List inventory.')
return parser.parse_args()
if __name__ == '__main__':
if parse_args().static:
output_inventory(build_inventory(), parse_args().static)
else:
print(json.dumps(build_inventory(), indent=4, sort_keys=True))
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/client/mailer.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import codecs
import csv
import logging
import mimetypes
import ipaddress
import os
import random
import smtplib
import socket
import sys
import threading
import time
from king_phisher import templates
from king_phisher import utilities
from king_phisher.client import gui_utilities
from king_phisher.ssh_forward import SSHTCPForwarder
from gi.repository import GLib
from smoke_zephyr.utilities import parse_server
if sys.version_info[0] < 3:
from email import Encoders as encoders
import urllib
import urlparse
urllib.parse = urlparse
from email.MIMEBase import MIMEBase
from email.MIMEImage import MIMEImage
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
else:
from email import encoders
import urllib.parse
from email.mime.base import MIMEBase
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
__all__ = ['format_message', 'guess_smtp_server_address', 'MailSenderThread']
make_uid = lambda: utilities.random_string(16)
template_environment = templates.MessageTemplateEnvironment()
def format_message(template, config, first_name=None, last_name=None, uid=None, target_email=None):
"""
Take a message from a template and format it to be sent by replacing
variables and processing other template directives. If the *uid* parameter
is not set, then the message is formatted to be previewed.
:param str template: The message template.
:param dict config: The King Phisher client configuration.
:param str first_name: The first name of the message's recipient.
:param str last_name: The last name of the message's recipient.
:param str uid: The messages unique identifier.
:param str target_email: The message's destination email address.
:return: The formatted message.
:rtype: str
"""
if uid == None:
template_environment.set_mode(template_environment.MODE_PREVIEW)
first_name = ('Alice' if not isinstance(first_name, str) else first_name)
last_name = ('Liddle' if not isinstance(last_name, str) else last_name)
target_email = ('aliddle@wonderland.com' if not isinstance(target_email, str) else target_email)
uid = (uid or config['server_config'].get('server.secret_id') or make_uid())
template = template_environment.from_string(template)
template_vars = {}
template_client_vars = {}
template_client_vars['first_name'] = first_name
template_client_vars['last_name'] = last_name
template_client_vars['email_address'] = target_email
template_client_vars['company_name'] = config.get('mailer.company_name', 'Wonderland Inc.')
template_client_vars['message_id'] = uid
template_vars['client'] = template_client_vars
template_vars['uid'] = uid
webserver_url = config.get('mailer.webserver_url', '')
webserver_url = urllib.parse.urlparse(webserver_url)
tracking_image = config['server_config']['server.tracking_image']
template_vars['webserver'] = webserver_url.netloc
tracking_url = urllib.parse.urlunparse((webserver_url.scheme, webserver_url.netloc, tracking_image, '', 'id=' + uid, ''))
webserver_url = urllib.parse.urlunparse((webserver_url.scheme, webserver_url.netloc, webserver_url.path, '', '', ''))
template_vars['tracking_dot_image_tag'] = "<img src=\"{0}\" style=\"display:none\" />".format(tracking_url)
template_vars_url = {}
template_vars_url['rickroll'] = 'http://www.youtube.com/watch?v=oHg5SJYRHA0'
template_vars_url['webserver'] = webserver_url + '?id=' + uid
template_vars_url['webserver_raw'] = webserver_url
template_vars_url['tracking_dot'] = tracking_url
template_vars['url'] = template_vars_url
template_vars.update(template_environment.standard_variables)
return template.render(template_vars)
def guess_smtp_server_address(host, forward_host=None):
"""
Guess the IP address of the SMTP server that will be connected to given the
SMTP host information and an optional SSH forwarding host. If a hostname is
in use it will be resolved to an IP address, either IPv4 or IPv6 and in that
order. If a hostname resolves to multiple IP addresses, None will be
returned. This function is intended to guess the SMTP servers IP address
given the client configuration so it can be used for SPF record checks.
:param str host: The SMTP server that is being connected to.
:param str forward_host: An optional host that is being used to tunnel the connection.
:return: The IP address of the SMTP server.
:rtype: None, :py:class:`ipaddress.IPv4Address`, :py:class:`ipaddress.IPv6Address`
"""
host = host.rsplit(':', 1)[0]
if utilities.is_valid_ip_address(host):
ip = ipaddress.ip_address(host)
if not ip.is_loopback:
return ip
else:
info = None
for family in (socket.AF_INET, socket.AF_INET6):
try:
info = socket.getaddrinfo(host, 1, family)
except socket.gaierror:
continue
info = set(list([r[4][0] for r in info]))
if len(info) != 1:
return
break
if info:
ip = ipaddress.ip_address(info.pop())
if not ip.is_loopback:
return ip
if forward_host:
return guess_smtp_server_address(forward_host)
return
class MailSenderThread(threading.Thread):
"""
The King Phisher threaded email message sender. This object manages
the sending of emails for campaigns and supports pausing the sending of
messages which can later be resumed by unpausing. This object reports
its information to the GUI through an optional
:py:class:`.MailSenderSendTab` instance, these two objects
are very interdependent.
"""
def __init__(self, config, target_file, rpc, tab=None):
"""
:param dict config: The King Phisher client configuration.
:param str target_file: The CSV formatted file to read message targets from.
:param tab: The GUI tab to report information to.
:type tab: :py:class:`.MailSenderSendTab`
:param rpc: The client's connected RPC instance.
:type rpc: :py:class:`.KingPhisherRPCClient`
"""
super(MailSenderThread, self).__init__()
self.daemon = True
self.logger = logging.getLogger('KingPhisher.Client.' + self.__class__.__name__)
self.config = config
self.target_file = target_file
"""The name of the target file in CSV format."""
self.tab = tab
"""The optional :py:class:`.MailSenderSendTab` instance for reporting status messages to the GUI."""
self.rpc = rpc
self._ssh_forwarder = None
self.smtp_connection = None
"""The :py:class:`smtplib.SMTP` connection instance."""
self.smtp_server = parse_server(self.config['smtp_server'], 25)
self.running = threading.Event()
"""A :py:class:`threading.Event` object indicating if emails are being sent."""
self.paused = threading.Event()
"""A :py:class:`threading.Event` object indicating if the email sending operation is or should be paused."""
self.should_exit = threading.Event()
self.max_messages_per_minute = float(self.config.get('smtp_max_send_rate', 0.0))
self._mime_attachments = None
def tab_notify_sent(self, emails_done, emails_total):
"""
Notify the tab that messages have been sent.
:param int emails_done: The number of emails that have been sent.
:param int emails_total: The total number of emails that are going to be sent.
"""
if isinstance(self.tab, gui_utilities.GladeGObject):
GLib.idle_add(lambda x: self.tab.notify_sent(*x), (emails_done, emails_total))
def tab_notify_status(self, message):
"""
Handle a status message regarding the message sending operation.
:param str message: The notification message.
"""
self.logger.info(message.lower())
if isinstance(self.tab, gui_utilities.GladeGObject):
GLib.idle_add(self.tab.notify_status, message + '\n')
def tab_notify_stopped(self):
"""
Notify the tab that the message sending operation has stopped.
"""
if isinstance(self.tab, gui_utilities.GladeGObject):
GLib.idle_add(self.tab.notify_stopped)
def server_ssh_connect(self):
"""
Connect to the remote SMTP server over SSH and configure port
forwarding with :py:class:`.SSHTCPForwarder` for tunneling SMTP
traffic.
:return: The connection status.
:rtype: bool
"""
server = parse_server(self.config['ssh_server'], 22)
username = self.config['ssh_username']
password = self.config['ssh_password']
remote_server = parse_server(self.config['smtp_server'], 25)
local_port = random.randint(2000, 6000)
try:
self._ssh_forwarder = SSHTCPForwarder(server, username, password, local_port, remote_server, preferred_private_key=self.config.get('ssh_preferred_key'))
self._ssh_forwarder.start()
time.sleep(0.5)
except Exception:
self.logger.warning('failed to connect to remote ssh server', exc_info=True)
return False
self.smtp_server = ('localhost', local_port)
return True
def server_smtp_connect(self):
"""
Connect to the configured SMTP server.
:return: The connection status.
:rtype: bool
"""
if self.config.get('smtp_ssl_enable', False):
SmtpClass = smtplib.SMTP_SSL
else:
SmtpClass = smtplib.SMTP
try:
self.smtp_connection = SmtpClass(*self.smtp_server, timeout=10)
except socket.error:
self.logger.warning('received a socket.error while connecting to the SMTP server')
except smtplib.smtplib.SMTPException:
self.logger.warning('received an SMTPException while connecting to the SMTP server')
else:
return True
return False
def server_smtp_disconnect(self):
"""Clean up and close the connection to the remote SMTP server."""
if self.smtp_connection:
try:
self.smtp_connection.quit()
except smtplib.SMTPServerDisconnected:
pass
self.smtp_connection = None
self.tab_notify_status('Disconnected from the SMTP server')
def server_smtp_reconnect(self):
"""
Disconnect from the remote SMTP server and then attempt to open
a new connection to it.
:return: The reconnection status.
:rtype: bool
"""
if self.smtp_connection:
try:
self.smtp_connection.quit()
except smtplib.SMTPServerDisconnected:
pass
self.smtp_connection = None
while not self.server_smtp_connect():
self.tab_notify_status('Failed to reconnect to the SMTP server')
if not self.process_pause(True):
return False
return True
def count_emails(self):
"""
Count the emails contained in the target CSV file.
:return: The number of targets in the file.
:rtype: int
"""
targets = 0
target_file_h = open(self.target_file, 'rU')
csv_reader = csv.DictReader(target_file_h, ['first_name', 'last_name', 'email_address'])
for target in csv_reader:
if not utilities.is_valid_email_address(target['email_address']):
continue
targets += 1
target_file_h.close()
return targets
def run(self):
emails_done = 0
emails_total = self.count_emails()
max_messages_per_connection = self.config.get('mailer.max_messages_per_connection', 5)
self.running.set()
self.should_exit.clear()
self.paused.clear()
self._prepare_env()
self._mime_attachments = self._get_mime_attachments()
self.logger.debug("loaded {0:,} MIME attachments".format(len(self._mime_attachments)))
target_file_h = open(self.target_file, 'rU')
csv_reader = csv.DictReader(target_file_h, ['first_name', 'last_name', 'email_address', 'department'])
for target in csv_reader:
if not utilities.is_valid_email_address(target['email_address']):
if target['email_address']:
self.logger.warning('skipping invalid email address: ' + target['email_address'])
else:
self.logger.warning('skipping blank email address')
continue
iteration_time = time.time()
if self.should_exit.is_set():
self.tab_notify_status('Sending emails cancelled')
break
if not self.process_pause():
break
if emails_done > 0 and (emails_done % max_messages_per_connection):
self.server_smtp_reconnect()
uid = make_uid()
emails_done += 1
self.tab_notify_status("Sending email {0:,} of {1:,} to {2} with UID: {3}".format(emails_done, emails_total, target['email_address'], uid))
msg = self.create_email(target['first_name'], target['last_name'], target['email_address'], uid)
if not self._try_send_email(target['email_address'], msg):
break
self.tab_notify_sent(emails_done, emails_total)
campaign_id = self.config['campaign_id']
company_name = self.config.get('mailer.company_name', '')
department = target['department']
if department is not None:
department = department.strip()
if department == '':
department = None
self.rpc('campaign/message/new', campaign_id, uid, target['email_address'], company_name, target['first_name'], target['last_name'], department)
if self.max_messages_per_minute:
iteration_time = (time.time() - iteration_time)
sleep_time = (60.0 / float(self.max_messages_per_minute)) - iteration_time
while sleep_time > 0:
sleep_chunk = min(sleep_time, 0.5)
time.sleep(sleep_chunk)
if self.should_exit.is_set():
break
sleep_time -= sleep_chunk
target_file_h.close()
self._mime_attachments = None
self.tab_notify_status("Finished sending emails, successfully sent {0:,} emails".format(emails_done))
self.server_smtp_disconnect()
if self._ssh_forwarder:
self._ssh_forwarder.stop()
self._ssh_forwarder = None
self.tab_notify_status('Disconnected from the SSH server')
self.tab_notify_stopped()
return
def process_pause(self, set_pause=False):
"""
Pause sending emails if a pause request has been set.
:param bool set_pause: Whether to request a pause before processing it.
:return: Whether or not the sending operation was cancelled during the pause.
:rtype: bool
"""
if set_pause:
if isinstance(self.tab, gui_utilities.GladeGObject):
gui_utilities.glib_idle_add_wait(lambda: self.tab.pause_button.set_property('active', True))
else:
self.pause()
if self.paused.is_set():
self.tab_notify_status('Paused sending emails, waiting to resume')
self.running.wait()
self.paused.clear()
if self.should_exit.is_set():
self.tab_notify_status('Sending emails cancelled')
return False
self.tab_notify_status('Resuming sending emails')
self.max_messages_per_minute = float(self.config.get('smtp_max_send_rate', 0.0))
return True
def create_email(self, first_name, last_name, target_email, uid):
"""
Create a MIME email to be sent from a set of parameters.
:param str first_name: The first name of the message's recipient.
:param str last_name: The last name of the message's recipient.
:param str target_email: The message's destination email address.
:param str uid: The message's unique identifier.
:return: The new MIME message.
:rtype: :py:class:`email.MIMEMultipart.MIMEMultipart`
"""
msg = MIMEMultipart()
msg.replace_header('Content-Type', 'multipart/related')
msg['Subject'] = self.config['mailer.subject']
if self.config.get('mailer.reply_to_email'):
msg.add_header('reply-to', self.config['mailer.reply_to_email'])
if self.config.get('mailer.source_email_alias'):
msg['From'] = "\"{0}\" <{1}>".format(self.config['mailer.source_email_alias'], self.config['mailer.source_email'])
else:
msg['From'] = self.config['mailer.source_email']
msg['To'] = target_email
importance = self.config.get('mailer.importance', 'Normal')
if importance != 'Normal':
msg['Importance'] = importance
sensitivity = self.config.get('mailer.sensitivity', 'Normal')
if sensitivity != 'Normal':
msg['Sensitivity'] = sensitivity
msg.preamble = 'This is a multi-part message in MIME format.'
msg_alt = MIMEMultipart('alternative')
msg.attach(msg_alt)
with codecs.open(self.config['mailer.html_file'], 'r', encoding='utf-8') as file_h:
msg_template = file_h.read()
formatted_msg = format_message(msg_template, self.config, first_name=first_name, last_name=last_name, uid=uid, target_email=target_email)
msg_body = MIMEText(formatted_msg, 'html', 'utf-8')
msg_alt.attach(msg_body)
# process attachments
if isinstance(self._mime_attachments, (list, tuple)):
attachfiles = self._mime_attachments
else:
attachfiles = self._get_mime_attachments()
for attachfile in attachfiles:
msg.attach(attachfile)
return msg
def _get_mime_attachments(self):
attachments = []
if self.config.get('mailer.attachment_file'):
attachment = self.config['mailer.attachment_file']
attachfile = MIMEBase(*mimetypes.guess_type(attachment))
attachfile.set_payload(open(attachment, 'rb').read())
encoders.encode_base64(attachfile)
attachfile.add_header('Content-Disposition', "attachment; filename=\"{0}\"".format(os.path.basename(attachment)))
attachments.append(attachfile)
for attachment_file, attachment_name in template_environment.attachment_images.items():
attachfile = MIMEImage(open(attachment_file, 'rb').read())
attachfile.add_header('Content-ID', "<{0}>".format(attachment_name))
attachfile.add_header('Content-Disposition', "inline; filename=\"{0}\"".format(attachment_name))
attachments.append(attachfile)
return attachments
def _prepare_env(self):
with codecs.open(self.config['mailer.html_file'], 'r', encoding='utf-8') as file_h:
msg_template = file_h.read()
template_environment.set_mode(template_environment.MODE_ANALYZE)
format_message(msg_template, self.config, uid=make_uid())
template_environment.set_mode(template_environment.MODE_SEND)
def _try_send_email(self, *args, **kwargs):
message_sent = False
while not message_sent:
for _ in range(0, 3):
try:
self.send_email(*args, **kwargs)
message_sent = True
break
except smtplib.SMTPException:
self.tab_notify_status('Failed to send message')
time.sleep(1)
if not message_sent:
self.server_smtp_disconnect()
if not self.process_pause(True):
return False
self.server_smtp_reconnect()
return True
def send_email(self, target_email, msg):
"""
Send an email using the connected SMTP server.
:param str target_email: The email address to send the message to.
:param msg: The formatted message to be sent.
:type msg: :py:class:`email.MIMEMultipart.MIMEMultipart`
"""
source_email = self.config['mailer.source_email_smtp']
self.smtp_connection.sendmail(source_email, target_email, msg.as_string())
def pause(self):
"""
Sets the :py:attr:`~.MailSenderThread.running` and
:py:attr:`~.MailSenderThread.paused` flags correctly to indicate
that the object is paused.
"""
self.running.clear()
self.paused.set()
def unpause(self):
"""
Sets the :py:attr:`~.MailSenderThread.running` and
:py:attr:`~.MailSenderThread.paused` flags correctly to indicate
that the object is no longer paused.
"""
self.running.set()
def stop(self):
"""
Requests that the email sending operation stop. It can not be
resumed from the same position. This function blocks until the
stop request has been processed and the thread exits.
"""
self.should_exit.set()
self.unpause()
if self.is_alive():
self.join()
def missing_files(self):
"""
Return a list of all missing or unreadable files which are referenced by
the message template.
:return: The list of unusable files.
:rtype: list
"""
missing = []
attachment = self.config.get('mailer.attachment_file')
if attachment and not os.access(attachment, os.R_OK):
missing.append(attachment)
msg_template = self.config['mailer.html_file']
if not os.access(msg_template, os.R_OK):
missing.append(msg_template)
return missing
self._prepare_env()
for attachment in template_environment.attachment_images.keys():
if not os.access(attachment, os.R_OK):
missing.append(attachment)
return missing
|
|
from __future__ import print_function
import MimecastV2
from CommonServerPython import *
# Parameters for Get arguments test
policy_data = {
'description': 'new',
'fromPart': 'bla bla',
'fromType': 'free_mail_domains',
'fromValue': 'gmail.com',
'toType': 'email_domain',
'toValue': 'gmail.com',
'option': 'no_action',
'policy_id': 'IDFROMMIMECAST'
}
policy_args = {
'description': 'new',
'fromPart': 'bla bla',
'fromType': 'free_mail_domains',
'fromValue': 'gmail.com',
'toType': 'email_domain',
'toValue': 'gmail.com'
}
get_args_response = (policy_args, 'no_action')
# Parameters for Update policy test
policy_obj = {
'description': 'new new',
'from': {
'emailDomain': 'gmail.com',
'type': 'free_mail_domains'
},
'to': {
'emailDomain': 'gmail.com',
'type': 'email_domain'
}
}
update_two_args = {'fromType': 'free_mail_domains', 'description': 'new new'}
update_all_args = {'fromType': 'free_mail_domains', 'fromValue': 'gmail.com', 'toType': 'email_domain',
'toValue': 'gmail.com', 'description': 'new new'}
update_policy_req_response = {
'policy': policy_obj,
'option': 'no_action',
'id': 'IDFROMMIMECAST'
}
set_empty_value_args_res_list = [update_two_args, 'no_action', 'IDFROMMIMECAST']
set_empty_value_args_res_list_all = [update_all_args, 'no_action', 'IDFROMMIMECAST']
demisto_args = {'policy_id': 'IDFROMMIMECAST'}
def test_get_arguments_for_policy_command():
res = MimecastV2.get_arguments_for_policy_command(policy_data)
assert get_args_response == res
def test_update_policy(mocker):
mocker.patch.object(MimecastV2, 'get_arguments_for_policy_command', return_value=get_args_response)
mocker.patch.object(MimecastV2, 'set_empty_value_args_policy_update', return_value=set_empty_value_args_res_list)
mocker.patch.object(MimecastV2, 'create_or_update_policy_request', return_value=update_policy_req_response)
mocker.patch.object(demisto, 'args', return_value=demisto_args)
res = MimecastV2.update_policy()
assert res['Contents']['Description'] == 'new new'
assert res['Contents']['Sender']['Type'] == 'free_mail_domains'
mocker.patch.object(MimecastV2, 'get_arguments_for_policy_command', return_value=get_args_response)
mocker.patch.object(MimecastV2, 'set_empty_value_args_policy_update',
return_value=set_empty_value_args_res_list_all)
mocker.patch.object(MimecastV2, 'create_or_update_policy_request', return_value=update_policy_req_response)
mocker.patch.object(demisto, 'args', return_value=demisto_args)
res = MimecastV2.update_policy()
assert res['Contents']['Description'] == 'new new'
assert res['Contents']['Sender']['Type'] == 'free_mail_domains'
assert res['Contents']['Sender']['Domain'] == 'gmail.com'
assert res['Contents']['Receiver']['Type'] == 'email_domain'
assert res['Contents']['Receiver']['Domain'] == 'gmail.com'
INCIDENT_API_RESPONSE = {
'fail': [
],
'meta': {
'status': 200
},
'data': [
{
'code': 'TR-CSND1A7780-00045-M',
'successful': 0,
'create': '2020-05-25T10:01:53+0000',
'modified': '2020-05-25T10:01:53+0000',
'identified': 3,
'failed': 0,
'reason': 'test',
'id': 'test-id',
'type': 'manual',
'searchCriteria': {
'start': '2020-04-25T10:01:53+0000',
'end': '2020-05-25T22:01:53+0000',
'messageId': 'test message id'
},
'restored': 0
}
]
}
EXPECTED_MARKDOWN_RESPONSE = """### Incident test-id has been created
#### Code: TR-CSND1A7780-00045-M
#### Type: manual
#### Reason: test
#### The number of messages identified based on the search criteria: 3
#### The number successfully remediated messages: 0
#### The number of messages that failed to remediate: 0
#### The number of messages that were restored from the incident: 0
|End date|Message ID|
|---|---|
| 2020-05-25T22:01:53+0000 | test message id |
"""
def test_mimecast_incident_api_response_to_markdown():
actual_response = MimecastV2.mimecast_incident_api_response_to_markdown(INCIDENT_API_RESPONSE, 'create')
assert actual_response == EXPECTED_MARKDOWN_RESPONSE
EXPECTED_CONTEXT_RESPONSE = {
'Mimecast.Incident(val.ID && val.ID == obj.ID)': {
'Reason': 'test',
'Code': 'TR-CSND1A7780-00045-M',
'FailedRemediatedMessages': 0,
'IdentifiedMessages': 3,
'MessagesRestored': 0,
'LastModified': '2020-05-25T10:01:53+0000',
'SearchCriteria': {
'StartDate': '2020-04-25T10:01:53+0000',
'EndDate': '2020-05-25T22:01:53+0000',
'FileHash': None,
'To': None,
'MessageID': 'test message id',
'From': None
},
'Type': 'manual',
'ID': 'test-id',
'SuccessfullyRemediatedMessages': 0
}
}
def test_mimecast_incident_api_response_to_context():
actual_response = MimecastV2.mimecast_incident_api_response_to_context(INCIDENT_API_RESPONSE)
assert actual_response == EXPECTED_CONTEXT_RESPONSE
add_member_req_response = {'data': [{'emailAddress': 'test@gmail.com', 'folderId': 'folder_id'}]}
get_group_members_req_response = {'data': [{'groupMembers': {}}]}
def test_mimecast_add_remove_member_to_group_with_email(mocker):
"""Unit test
Given
- add_remove_member_to_group command
- command args - email and group id.
- command raw response
When
- mock the server response to create_add_remove_group_member_request.
- mock the server response to create_get_group_members_request
Then
Validate the content of the HumanReadable.
"""
mocker.patch.object(demisto, 'args', return_value={'group_id': '1234', 'email_address': 'test@gmail.com'})
mocker.patch.object(MimecastV2, 'create_add_remove_group_member_request', return_value=add_member_req_response)
mocker.patch.object(MimecastV2, 'create_get_group_members_request', return_value=get_group_members_req_response)
readable, _, _ = MimecastV2.add_remove_member_to_group('add')
assert readable == 'test@gmail.com had been added to group ID folder_id'
add_member_req_response_no_email = {'data': [{'folderId': 'folder_id'}]}
def test_mimecast_add_remove_member_to_group_with_domain(mocker):
"""Unit test
Given
- add_remove_member_to_group command
- command args - domain and group id.
- command raw response
When
- mock the server response to create_add_remove_group_member_request.
- mock the server response to create_get_group_members_request
Then
Validate the content of the HumanReadable.
"""
mocker.patch.object(demisto, 'args', return_value={'group_id': '1234', 'domain': 'test.com'})
mocker.patch.object(MimecastV2, 'create_add_remove_group_member_request',
return_value=add_member_req_response_no_email)
mocker.patch.object(MimecastV2, 'create_get_group_members_request', return_value=get_group_members_req_response)
readable, _, _ = MimecastV2.add_remove_member_to_group('add')
assert readable == 'Address had been added to group ID folder_id'
CREATE_MANAGED_URL_SUCCESSFUL_MOCK = {
"fail": [],
"meta": {
"status": 200
},
"data": [
{
"comment": "None",
"domain": "www.test.com",
"queryString": "",
"disableRewrite": False,
"port": -1,
"disableUserAwareness": False,
"disableLogClick": False,
"action": "permit",
"path": "",
"matchType": "explicit",
"scheme": "https",
"id": "fake_id"
}
]
}
def test_create_managed_url(mocker):
"""Unit test
Given
- create_managed_url command
- the url does not exist
- command args - url, action, matchType, disableRewrite, disableUserAwareness, disableLogClick
- command raw response
When
- mock the server response to create_managed_url_request.
Then
Validate the content of the command result.
"""
args = {
'url': 'https://www.test.com',
'action': 'permit',
'matchType': 'explicit',
'disableRewrite': 'false',
'disableUserAwareness': 'false',
'disableLogClick': 'false'
}
expected_context = {
'Mimecast.URL(val.ID && val.ID == obj.ID)':
[{'Domain': 'www.test.com',
'disableRewrite': False,
'disableLogClick': False,
'Action': 'permit',
'Path': '',
'matchType': 'explicit',
'ID': 'fake_id'}]}
mocker.patch.object(demisto, 'args', return_value=args)
mocker.patch.object(MimecastV2, 'create_managed_url_request',
return_value=CREATE_MANAGED_URL_SUCCESSFUL_MOCK['data'][0])
results = MimecastV2.create_managed_url()
assert 'Managed URL https://www.test.com created successfully!' in results.get('HumanReadable')
assert expected_context == results.get('EntryContext')
def test_add_users_under_group_in_context_dict__dict(mocker):
"""
Given
- Users list
- Group id
_ Integration context with `group` key with a single group in it
When
- adding users under group in context dict as part of `mimecast-get-group-members` command
Then
Returns a valid outputs
"""
context = {'Mimecast': {'Group': {'ID': 'groupID', 'Users': []}}}
users_list = [
{'Domain': u'demistodev.com', 'Name': u'', 'EmailAddress': u'testing@demistodev.com', 'InternalUser': True,
'Type': u'created_manually', 'IsRemoved': False}]
expected = [{'ID': 'groupID', 'Users': [
{'Domain': u'demistodev.com', 'Name': u'', 'EmailAddress': u'testing@demistodev.com', 'InternalUser': True,
'Type': u'created_manually', 'IsRemoved': False}]}]
mocker.patch.object(demisto, 'context', return_value=context)
result = MimecastV2.add_users_under_group_in_context_dict(users_list, 'groupID')
assert result == expected
def test_add_users_under_group_in_context_dict__list(mocker):
"""
Given
- Users list
- Group id
_ Integration context with `group` key with list of groups in it
When
- adding users under group in context dict as part of `mimecast-get-group-members` command
Then
Returns a valid outpus
"""
context = {'Mimecast': {'Group': [{
'ID': 'groupID',
'Users': []},
{'ID': 'groupID2',
'Users': []}]}
}
users_list = [
{'Domain': u'demistodev.com', 'Name': u'', 'EmailAddress': u'testing@demistodev.com', 'InternalUser': True,
'Type': u'created_manually', 'IsRemoved': False}]
expected = [{'ID': 'groupID', 'Users': [
{'Domain': u'demistodev.com', 'Name': u'', 'EmailAddress': u'testing@demistodev.com', 'InternalUser': True,
'Type': u'created_manually', 'IsRemoved': False}]}, {'ID': 'groupID2', 'Users': []}]
mocker.patch.object(demisto, 'context', return_value=context)
result = MimecastV2.add_users_under_group_in_context_dict(users_list, 'groupID')
assert result == expected
|
|
import distutils.sysconfig
import logging as log
import platform
import zipfile
import sys
from distutils.command.build import build as _build
from distutils.command.build_py import build_py as _build_py
from distutils.command.install_data import install_data as _install_data
from distutils.errors import DistutilsSetupError
from distutils.spawn import find_executable
from distutils.sysconfig import get_python_lib
from multiprocessing import cpu_count
from subprocess import Popen
import os
import re
from Cython.Distutils import build_ext as _build_ext
from setuptools import setup
from setuptools.extension import Extension
from shutil import rmtree, copytree, copy
# urlretrieve has a different location in Python 2 and Python 3
import urllib
if hasattr(urllib, "urlretrieve"):
urlretrieve = urllib.urlretrieve
else:
import urllib.request
urlretrieve = urllib.request.urlretrieve
def run_process(cmds):
p = Popen(cmds)
p.wait()
return p.returncode
def append_cmake_list(l, var):
if var:
l.extend(var.split(";"))
def append_cmake_lib_list(l, var):
if var:
l.extend(map(strip_lib, var.split(";")))
# Strip library prefixes and suffixes to prevent linker confusion
def strip_lib(filename):
filename = re.sub(r"^(?:lib)?(.*)\.(?:so|a|dylib)$", r"\1", filename)
filename = re.sub(r"^(.*)\.lib$", r"\1", filename)
return filename
def get_env(build_dir):
# Get environmental variables first
ENV = dict(os.environ)
# Get values listed in the CMakeCache.txt file (if existant)
try:
var_regex = r"^([^:]+):([^=]+)=(.*)$"
cache_path = os.path.join(build_dir, "CMakeCache.txt")
with open(cache_path, "r") as cache_file:
for line in cache_file:
line = line.strip()
m = re.match(var_regex, line)
if m:
ENV[m.group(1)] = m.group(3)
except:
pass
# Get values passed on the command line
i = 0
for i, arg in enumerate(sys.argv[1:]):
try:
key, value = arg.split("=", 1)
except ValueError:
break
ENV[key] = value
del sys.argv[1:i+1]
return ENV
log.basicConfig(stream=sys.stdout, level=log.INFO)
# Find the current directory
try:
this_file = __file__
except NameError:
this_file = sys.argv[0]
ORIG_DIR = os.getcwd()
SCRIPT_DIR = os.path.dirname(os.path.abspath(this_file))
if ORIG_DIR.rstrip('/').endswith('python'):
BUILD_DIR = ORIG_DIR.rstrip('/').rstrip('python')
PYTHON_DIR = ORIG_DIR
else:
BUILD_DIR = ORIG_DIR
PYTHON_DIR = ORIG_DIR + '/python'
ENV = get_env(BUILD_DIR)
# Find the paths
BUILT_EXTENSIONS = False
CMAKE_PATH = ENV.get("CMAKE", find_executable("cmake"))
MAKE_PATH = ENV.get("MAKE", find_executable("make"))
MAKE_FLAGS = ENV.get("MAKE_FLAGS", "-j %d" % cpu_count()).split()
CC_PATH = ENV.get("CC", find_executable("gcc"))
CXX_PATH = ENV.get("CXX", find_executable("g++"))
INSTALL_PREFIX = os.path.join(get_python_lib(), os.pardir, os.pardir, os.pardir)
PYTHON = sys.executable
# Try to find Eigen
EIGEN3_INCLUDE_DIR = ENV.get("EIGEN3_INCLUDE_DIR") # directory where eigen is saved
# The cmake directory and Python directory are different in manual install, so
# will break if relative path is specified. Try moving up if path is specified
# but not found
if (EIGEN3_INCLUDE_DIR is not None and
not os.path.isdir(EIGEN3_INCLUDE_DIR) and
os.path.isdir(os.path.join(os.pardir, EIGEN3_INCLUDE_DIR))):
EIGEN3_INCLUDE_DIR = os.path.join(os.pardir, EIGEN3_INCLUDE_DIR)
EIGEN3_DOWNLOAD_URL = ENV.get("EIGEN3_DOWNLOAD_URL", "https://bitbucket.org/eigen/eigen/get/b2e267dc99d4.zip")
# EIGEN3_DOWNLOAD_URL = ENV.get("EIGEN3_DOWNLOAD_URL", "https://bitbucket.org/eigen/eigen/get/3.3.4.tar.bz2")
# Remove the "-Wstrict-prototypes" compiler option, which isn't valid for C++.
cfg_vars = distutils.sysconfig.get_config_vars()
CFLAGS = cfg_vars.get("CFLAGS")
if CFLAGS is not None:
cfg_vars["CFLAGS"] = CFLAGS.replace("-Wstrict-prototypes", "")
# For Cython extensions
LIBRARIES = ["dynet"]
LIBRARY_DIRS = ["."]
COMPILER_ARGS = []
EXTRA_LINK_ARGS = []
RUNTIME_LIB_DIRS = []
INCLUDE_DIRS = []
DATA_FILES=[]
# Add all environment variables from CMake for Cython extensions
append_cmake_lib_list(LIBRARIES, ENV.get("CUDA_CUBLAS_FILES"))
append_cmake_list(LIBRARY_DIRS, ENV.get("CUDA_CUBLAS_DIRS"))
CMAKE_INSTALL_PREFIX = ENV.get("CMAKE_INSTALL_PREFIX", INSTALL_PREFIX)
LIBS_INSTALL_DIR = CMAKE_INSTALL_PREFIX + "/lib/"
PROJECT_SOURCE_DIR = ENV.get("PROJECT_SOURCE_DIR", SCRIPT_DIR) # location of the main dynet directory
PROJECT_BINARY_DIR = ENV.get("PROJECT_BINARY_DIR", BUILD_DIR) # path where dynet is built
DYNET_LIB_DIR = PROJECT_BINARY_DIR + "/dynet/"
if ENV.get("MSVC") == "1":
COMPILER_ARGS[:] = ["-DNOMINMAX", "/EHsc"]
DYNET_LIB_DIR += "/Release/"
# For MSVC, we compile dynet as a static lib, so we need to also link in the
# other libraries it depends on:
append_cmake_lib_list(LIBRARIES, ENV.get("LIBS"))
append_cmake_list(LIBRARY_DIRS, ENV.get("MKL_LINK_DIRS")) # Add the MKL dirs, if MKL is being used
append_cmake_lib_list(LIBRARIES, ENV.get("CUDA_RT_FILES"))
append_cmake_list(LIBRARY_DIRS, ENV.get("CUDA_RT_DIRS"))
DATA_FILES += [DYNET_LIB_DIR + lib + ".lib" for lib in LIBRARIES]
else:
COMPILER_ARGS[:] = ["-std=c++11", "-Wno-unused-function"]
RUNTIME_LIB_DIRS.extend([DYNET_LIB_DIR, LIBS_INSTALL_DIR])
# in some OSX systems, the following extra flags are needed:
if platform.system() == "Darwin":
COMPILER_ARGS.extend(["-stdlib=libc++", "-mmacosx-version-min=10.7"])
EXTRA_LINK_ARGS.append("-Wl,-rpath," + LIBS_INSTALL_DIR)
if "--skip-build" not in sys.argv: # Include libdynet.dylib unless doing manual install
DATA_FILES += [os.path.join(LIBS_INSTALL_DIR, "lib%s.dylib" % lib) for lib in LIBRARIES]
else:
EXTRA_LINK_ARGS.append("-Wl,-rpath=%r" % LIBS_INSTALL_DIR + ",--no-as-needed")
LIBRARY_DIRS.insert(0, DYNET_LIB_DIR)
INCLUDE_DIRS[:] = filter(None, [PROJECT_SOURCE_DIR, EIGEN3_INCLUDE_DIR])
TARGET = [Extension(
"_dynet", # name of extension
[PYTHON_DIR + "/_dynet.pyx"], # filename of our Pyrex/Cython source
language="c++", # this causes Pyrex/Cython to create C++ source
include_dirs=INCLUDE_DIRS,
libraries=LIBRARIES,
library_dirs=LIBRARY_DIRS,
extra_link_args=EXTRA_LINK_ARGS,
extra_compile_args=COMPILER_ARGS,
runtime_library_dirs=RUNTIME_LIB_DIRS,
)]
class build(_build):
user_options = [
("build-dir=", None, "New or existing DyNet build directory."),
("skip-build", None, "Assume DyNet C++ library is already built."),
]
def __init__(self, *args, **kwargs):
self.build_dir = None
self.skip_build = False
_build.__init__(self, *args, **kwargs)
def initialize_options(self):
py_version = "%s.%s" % (sys.version_info[0], sys.version_info[1])
unicode_suffix = "u" if sys.version_info[0] == 2 and sys.maxunicode > 65536 else ""
build_name = "py%s%s-%s" % (py_version, unicode_suffix, platform.architecture()[0])
self.build_dir = os.path.join(SCRIPT_DIR, "build", build_name)
_build.initialize_options(self)
def run(self):
global BUILD_DIR, BUILT_EXTENSIONS, EIGEN3_INCLUDE_DIR
BUILD_DIR = os.path.abspath(self.build_dir)
if EIGEN3_INCLUDE_DIR is None:
EIGEN3_INCLUDE_DIR = os.path.join(BUILD_DIR, "eigen")
EIGEN3_INCLUDE_DIR = os.path.abspath(EIGEN3_INCLUDE_DIR)
log.info("CMAKE_PATH=%r" % CMAKE_PATH)
log.info("MAKE_PATH=%r" % MAKE_PATH)
log.info("MAKE_FLAGS=%r" % " ".join(MAKE_FLAGS))
log.info("EIGEN3_INCLUDE_DIR=%r" % EIGEN3_INCLUDE_DIR)
log.info("EIGEN3_DOWNLOAD_URL=%r" % EIGEN3_DOWNLOAD_URL)
log.info("CC_PATH=%r" % CC_PATH)
log.info("CXX_PATH=%r" % CXX_PATH)
log.info("SCRIPT_DIR=%r" % SCRIPT_DIR)
log.info("BUILD_DIR=%r" % BUILD_DIR)
log.info("INSTALL_PREFIX=%r" % INSTALL_PREFIX)
log.info("PYTHON=%r" % PYTHON)
if CMAKE_PATH is not None:
run_process([CMAKE_PATH, "--version"])
if CXX_PATH is not None:
run_process([CXX_PATH, "--version"])
# This will generally be called by the pip install
if not self.skip_build:
if CMAKE_PATH is None:
raise DistutilsSetupError("`cmake` not found, and `CMAKE` is not set.")
if MAKE_PATH is None:
raise DistutilsSetupError("`make` not found, and `MAKE` is not set.")
if CC_PATH is None:
raise DistutilsSetupError("`gcc` not found, and `CC` is not set.")
if CXX_PATH is None:
raise DistutilsSetupError("`g++` not found, and `CXX` is not set.")
# Prepare folders
if not os.path.isdir(BUILD_DIR):
log.info("Creating build directory " + BUILD_DIR)
os.makedirs(BUILD_DIR)
os.chdir(BUILD_DIR)
if os.path.isdir(EIGEN3_INCLUDE_DIR):
log.info("Found eigen in " + EIGEN3_INCLUDE_DIR)
else:
try:
# Can use BZ2 or zip, right now using zip
# log.info("Fetching Eigen...")
# urlretrieve(EIGEN3_DOWNLOAD_URL, "eigen.tar.bz2")
# log.info("Unpacking Eigen...")
# tfile = tarfile.open("eigen.tar.bz2", 'r')
# tfile.extractall('eigen')
log.info("Fetching Eigen...")
urlretrieve(EIGEN3_DOWNLOAD_URL, "eigen.zip")
log.info("Unpacking Eigen...")
#BitBucket packages everything in a tarball with a changing root directory, so grab the only child
with zipfile.ZipFile("eigen.zip") as zfile:
for zipinfo in zfile.infolist():
try:
i = zipinfo.filename.index("/")
zipinfo.filename = zipinfo.filename[i+1:]
zfile.extract(zipinfo, "eigen")
except ValueError:
pass
EIGEN3_INCLUDE_DIR = os.path.join(BUILD_DIR, "eigen")
except:
raise DistutilsSetupError("Could not download Eigen from %r" % EIGEN3_DOWNLOAD_URL)
os.environ["CXX"] = CXX_PATH
os.environ["CC"] = CC_PATH
# Build module
cmake_cmd = [
CMAKE_PATH,
SCRIPT_DIR,
"-DCMAKE_INSTALL_PREFIX=%r" % INSTALL_PREFIX,
"-DEIGEN3_INCLUDE_DIR=%r" % EIGEN3_INCLUDE_DIR,
"-DPYTHON=%r" % PYTHON,
]
for env_var in ("BACKEND", "CUDNN_ROOT"):
value = ENV.get(env_var)
if value is not None:
cmake_cmd.append("-D" + env_var + "=%r" % value)
log.info("Configuring...")
if run_process(cmake_cmd) != 0:
raise DistutilsSetupError(" ".join(cmake_cmd))
make_cmd = [MAKE_PATH] + MAKE_FLAGS
log.info("Compiling...")
if run_process(make_cmd) != 0:
raise DistutilsSetupError(" ".join(make_cmd))
make_cmd = [MAKE_PATH, "install"]
log.info("Installing...")
if run_process(make_cmd) != 0:
raise DistutilsSetupError(" ".join(make_cmd))
if platform.system() == "Darwin": # macOS
for filename in DATA_FILES:
new_install_name = "@loader_path/" + os.path.basename(filename)
install_name_tool_cmd = ["install_name_tool", "-id", new_install_name, filename]
log.info("fixing install_name for %s to %r" % (filename, new_install_name))
if run_process(install_name_tool_cmd) != 0:
raise DistutilsSetupError(" ".join(install_name_tool_cmd))
# This will generally be called by the manual install
elif not os.path.isdir(EIGEN3_INCLUDE_DIR):
raise RuntimeError("Could not find Eigen in EIGEN3_INCLUDE_DIR={}. If doing manual install, please set the EIGEN3_INCLUDE_DIR variable with the absolute path to Eigen manually. If doing install via pip, please file an issue on github.com/clab/dynet".format(EIGEN3_INCLUDE_DIR))
BUILT_EXTENSIONS = True # because make calls build_ext
_build.run(self)
class build_py(_build_py):
def run(self):
os.chdir(os.path.join(BUILD_DIR, "python"))
log.info("Building Python files...")
_build_py.run(self)
class install_data(_install_data):
def run(self):
self.data_files = [(p, f) if self.is_wheel(p) else
(get_python_lib(), f) if platform.system() == "Darwin" else
(p, []) for p, f in self.data_files]
_install_data.run(self)
def is_wheel(self, path):
return os.path.basename(os.path.abspath(os.path.join(self.install_dir, path))) == "wheel"
class build_ext(_build_ext):
def run(self):
if BUILT_EXTENSIONS:
INCLUDE_DIRS.append(EIGEN3_INCLUDE_DIR)
LIBRARY_DIRS.append(BUILD_DIR + "/dynet/")
log.info("Building Cython extensions...")
log.info("INCLUDE_DIRS=%r" % " ".join(INCLUDE_DIRS))
log.info("LIBRARIES=%r" % " ".join(LIBRARIES))
log.info("LIBRARY_DIRS=%r" % " ".join(LIBRARY_DIRS))
log.info("COMPILER_ARGS=%r" % " ".join(COMPILER_ARGS))
log.info("EXTRA_LINK_ARGS=%r" % " ".join(EXTRA_LINK_ARGS))
log.info("RUNTIME_LIB_DIRS=%r" % " ".join(RUNTIME_LIB_DIRS))
_build_ext.run(self)
if os.path.abspath(".") != SCRIPT_DIR:
log.info("Copying built extensions...")
for d in os.listdir("build"):
target_dir = os.path.join(SCRIPT_DIR, "build", d)
rmtree(target_dir, ignore_errors=True)
try:
copytree(os.path.join("build", d), target_dir)
except OSError as e:
log.info("Cannot copy %s %s" % (os.path.join("build",d), e))
try:
import pypandoc
long_description = pypandoc.convert("README.md", "rst")
long_description = "\n".join(line for line in long_description.splitlines() if "<#" not in line)
except:
long_description = ""
setup(
name="dyNET",
# version="0.0.0",
install_requires=["cython", "numpy"],
description="The Dynamic Neural Network Toolkit",
long_description=long_description,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Environment :: MacOS X",
"Environment :: Win32 (MS Windows)",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX",
"Operating System :: POSIX :: Linux",
"Operating System :: Microsoft",
"Operating System :: Microsoft :: Windows",
"Programming Language :: C++",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
author="Graham Neubig",
author_email="dynet-users@googlegroups.com",
url="https://github.com/clab/dynet",
download_url="https://github.com/clab/dynet/releases",
license="Apache 2.0",
cmdclass={"build": build, "build_py": build_py, "install_data": install_data, "build_ext": build_ext},
ext_modules=TARGET,
py_modules=["dynet", "dynet_viz", "dynet_config"],
data_files=[(os.path.join("..", ".."), DATA_FILES)],
)
|
|
# -*- coding: utf-8 -*-
from __future__ import division
from math import sqrt
from dataloader import loadJsonObjectToDict
import pickle
import os
import json
PENALTY_RATIO = 9
def sim_tanimoto(prefs, personA, personB):
keys_a = set(prefs[personA])
keys_b = set(prefs[personB])
intersection = keys_a & keys_b
unionDict = dict(prefs[personA], **prefs[personB])
return len(intersection)/len(unionDict)
def sim_euclid(prefs, personA, personB):
si = {} #Dict for shared item
for item in prefs[personA]:
if item in prefs[personB]:
si[item] = 1
#Zero shared item -> not similar at all
if len(si) == 0: return 0
sum_of_squares = sum([pow(prefs[personA][item] - prefs[personB][item], 2) for item in si])
r = 1/(1+sqrt(sum_of_squares))
return r
def sim_pearson(prefs, personA, personB):
si = {} #Dict for shared item
for item in prefs[personA]:
if item in prefs[personB]:
si[item] = 1
n = len(si)
if n == 0: return 0
#sum
sumA = sum([prefs[personA][item] for item in si])
sumB = sum([prefs[personB][item] for item in si])
#sum sqrt
sumASqrt = sum([pow(prefs[personA][item], 2) for item in si])
sumBSqrt = sum([pow(prefs[personB][item], 2) for item in si])
#power of sum
pSum = sum(prefs[personA][it] * prefs[personB][it] for it in si)
#pearson Formula 4
num = pSum - (sumA*sumB/n)
den = sqrt((sumASqrt - pow(sumA, 2)/n) * (sumBSqrt - pow(sumB, 2)/n))
if den == 0: return 0
r = num/den
return r
def sim_combine(prefs, personA, personB):
return (sim_euclid(prefs, personA, personB) + sim_tanimoto(prefs, personA, personB) * PENALTY_RATIO)/(PENALTY_RATIO + 1)
def topMatches(prefs, person, n=5, similarity = sim_pearson):
#scores = [(sim_pearson(prefs, person, other) * sim_euclid(prefs, person, other), other) for other in prefs if other != person]
scores = [(similarity(prefs, person, other), other) for other in prefs if other != person]
scores.sort()
scores.reverse()
return scores[0:n]
def getRecommandations(prefs, person,similarity = sim_pearson):
totals = {}
simSums = {}
for other in prefs:
if other == person : continue
sim = similarity(prefs, person, other)
if sim <= 0: continue
for item in prefs[other]:
if item not in prefs[person] or prefs[person][item] ==0:
totals.setdefault(item, 0)
totals[item] += prefs[other][item] * sim
simSums.setdefault(item, 0)
simSums[item] += sim
rankings = [(total/simSums[item], item) for item, total in totals.items()]
rankings.sort()
rankings.reverse()
return rankings
def transformPrefs(prefs):
result = {}
for person in prefs:
for item in prefs[person]:
result.setdefault(item,{})
result[item][person] = prefs[person][item]
return result
def calculationSimilarItem(prefs, simFunction, dumpedfilePath, n=10):
result = {}
if os.path.exists(dumpedfilePath):
print('find preprocessed data, loading directly...')
with open(dumpedfilePath, 'rb') as f:
result = pickle.load(f)
return result
itemPrefs = transformPrefs(prefs)
c = 0
for item in itemPrefs:
c+=1
if c%100 == 0: print('%d/%d'%(c,len(itemPrefs)))
scores = topMatches(itemPrefs, item, n=n, similarity=simFunction)
result[item] = scores
with open(dumpedfilePath, 'wb') as f:
pickle.dump(result,f)
return result
def getRecommandedItems(prefs, itemMatch, userRating):
userRating = userRating
scores = {}
totalSim = {}
for (item, rating) in userRating.items():
#print item.encode("UTF-8")
for (similarity, itemSim) in itemMatch[item]:
if itemSim in userRating or similarity <= 0: continue
scores.setdefault(itemSim,0)
scores[itemSim] += similarity*rating
totalSim.setdefault(itemSim,0)
totalSim[itemSim] += similarity
rankings =[(score/totalSim[item], item) for item,score in scores.items()]
rankings.sort()
rankings.reverse()
return rankings
def readUserPrefs(userRatingPath):
userRating = {}
if os.path.exists(userRatingPath):
f = open(userRatingPath, 'r')
for line in f:
txtSeg = line.split()
userRating[txtSeg[0]] = float(txtSeg[1])
return userRating
#TestCode
def ItemBasedReco():
#Load scrapy data into {User -> Book -> Note} Dict
loadedData = loadJsonObjectToDict("./data/test.json")
# Read User prefs
userRatingPath = "./UserPrefs.txt"
userRating = readUserPrefs(userRatingPath)
#Using Euclid for Calculating Similarity
#Calculate Top10 Matche book for each book with similarity point
li = calculationSimilarItem(loadedData, sim_euclid, "./data/CalculatedItemSim" +"Euclid" + ".pkl")
#Get the Recommandations
re = getRecommandedItems(loadedData, li, userRating)
#Print recommandation
print("------------------ Item Based: Sim Euclid --------------------")
for tl in re[0:15]:
print (str(tl[0]) + ":" + tl[1])
#Using Euclid for Calculating Similarity
#Calculate Top10 Matche book for each book with similarity point
li = calculationSimilarItem(loadedData, sim_tanimoto, "./data/CalculatedItemSim" +"Tanimoto" + ".pkl")
#Get the Recommandations
re = getRecommandedItems(loadedData, li, userRating)
#Print recommandation
print("------------------ Item Based: Sim Tanimoto --------------------")
for tl in re[0:15]:
print (str(tl[0]) + ":" + tl[1])
#Using Euclid for Calculating Similarity
#Calculate Top10 Matche book for each book with similarity point
li = calculationSimilarItem(loadedData, sim_pearson,"./data/CalculatedItemSim" +"Pearson" + ".pkl")
#Get the Recommandations
re = getRecommandedItems(loadedData, li, userRating)
#Print recommandation
print("------------------ Item Based: Sim Pearson --------------------")
for tl in re[0:15]:
print (str(tl[0]) + ":" + tl[1])
#Using Euclid for Calculating Similarity
#Calculate Top10 Matche book for each book with similarity point
li = calculationSimilarItem(loadedData,sim_combine, "./data/CalculatedItemSim" +"Combine" + ".pkl")
#Get the Recommandations
re = getRecommandedItems(loadedData, li, userRating)
#Print recommandation
print("------------------ Item Based: Sim Tanimoto * 10 + Sim Euclid --------------------")
for tl in re[0:15]:
print (str(tl[0]) + ":" + tl[1])
def UserBasedReco():
#Load scrapy data into {User -> Book -> Note} Dict
loadedData = loadJsonObjectToDict("./data/test.json")
# Read User prefs
userRatingPath = "./UserPrefs.txt"
userRating = readUserPrefs(userRatingPath)
loadedData['Me'] = userRating
re = getRecommandations(loadedData,'Me',sim_euclid)
print("------------------ User Based: Sim Euclid --------------------")
for tl in re[0:15]:
print (str(tl[0]) + ":" + tl[1])
re = getRecommandations(loadedData,'Me',sim_pearson)
print("------------------ User Based: Sim Pearson --------------------")
for tl in re[0:15]:
print (str(tl[0]) + ":" + tl[1])
re = getRecommandations(loadedData,'Me',sim_tanimoto)
print("------------------ User Based: Sim Tanimoto --------------------")
for tl in re[0:15]:
print (str(tl[0]) + ":" + tl[1])
re = getRecommandations(loadedData,'Me',sim_combine)
print("------------------ User Based: Sim Tanimoto * 10 + Sim Euclid --------------------")
for tl in re[0:15]:
print (str(tl[0]) + ":" + tl[1])
if __name__ == '__main__':
UserBasedReco()
ItemBasedReco()
|
|
# detached process management (used in Phenix GUI)
# This is intended to imitate running a process using libtbx.thread_utils,
# but split across truly independent processes (potentially on different
# systems). I use something like this to start a server:
#
# easy_run.call("libtbx.start_process run.pkl &")
# FIXME this duplicates code in libtbx.thread_utils; it is also not especially
# well tested except to the extent it is used daily in the Phenix GUI
from __future__ import division
from libtbx.utils import Sorry, Abort, multi_out, host_and_user
from libtbx import easy_pickle
from libtbx import adopt_init_args, group_args
import libtbx.load_env
import libtbx.phil
import cStringIO
import traceback
import signal
import stat
import time
import os
import sys
process_master_phil = libtbx.phil.parse("""
run_file = None
.type = path
prefix = None
.type = str
output_dir = None
.type = path
tmp_dir = None
.type = path
debug = False
.type = bool
timeout = 200
.type = int
buffer_stdout = False
.type = bool
fsync = True
.type = bool
""")
class simple_target (object) :
def __init__ (self, args, output_dir=None) :
adopt_init_args(self, locals())
if output_dir is None :
self.output_dir = os.getcwd()
def __call__ (self) :
return True
class target_with_save_result (object) :
def __init__ (self, args, file_name, output_dir=None, log_file=None,
job_title=None) :
assert (isinstance(file_name, str))
assert (isinstance(args, list) or isinstance(args, tuple))
assert (output_dir is None) or (isinstance(output_dir, str))
assert (log_file is None) or (isinstance(log_file, str))
adopt_init_args(self, locals())
if (output_dir is None) :
self.output_dir = os.getcwd()
self._out = None
def __call__ (self) :
if (self.log_file is not None) :
log = open(self.log_file, "w")
new_out = multi_out()
new_out.register("log", log)
new_out.register("stdout", sys.stdout)
sys.stdout = new_out
self._out = new_out
result = self.run()
easy_pickle.dump(self.file_name, result)
if (self._out is not None) and (not getattr(self._out, "closed", False)) :
self._out.flush()
# FIXME
#self._out.close()
return result
def run (self) :
raise NotImplementedError()
class detached_process_driver (object) :
def __init__ (self, target) :
adopt_init_args(self, locals())
def __call__ (self) :
result = self.target()
return result
class detached_process_driver_mp (detached_process_driver) :
def __call__ (self, args, kwds, child_conn) :
result = self.target()
return result
class detached_base (object) :
def __init__ (self, params) :
adopt_init_args(self, locals())
self._accumulated_callbacks = []
if params.prefix is None :
params.prefix = ""
if params.tmp_dir is not None :
self.set_file_names(params.tmp_dir)
elif params.output_dir is not None :
self.set_file_names(params.output_dir)
else :
self.set_file_names(os.getcwd())
def set_file_names (self, tmp_dir) :
prefix = os.path.join(tmp_dir, self.params.prefix)
self.start_file = os.path.join(tmp_dir, prefix + ".libtbx_start")
self.stdout_file = prefix + ".libtbx_stdout"
self.error_file = prefix + ".libtbx_error"
self.stop_file = prefix + ".libtbx_STOP"
self.abort_file = prefix + ".libtbx_abort"
self.result_file = prefix + ".libtbx_result"
self.info_file = prefix + ".libtbx_info"
self.state_file = prefix + ".libtbx_state"
self.info_lock = self.info_file + ".LOCK"
self.state_lock = self.state_file + ".LOCK"
self.prefix = prefix
def isAlive (self) :
return False
def callback_start (self, data) :
pass
def callback_stdout (self, data) :
pass
def callback_error (self, error, traceback_info) :
pass
def callback_abort (self) :
pass
def callback_final (self, result) :
pass
def callback_other (self, status) :
pass
class stdout_redirect (object) :
def __init__ (self, handler) :
adopt_init_args(self, locals())
def write (self, data) :
self.handler.callback_stdout(data)
def flush (self) :
pass
def close (self) :
pass
class detached_process_server (detached_base) :
def __init__ (self, target, *args, **kwds) :
detached_base.__init__(self, *args, **kwds)
self.target = target
assert hasattr(self.target, "__call__")
# XXX support for libtbx.queueing_system_utils.generic.Job
def __call__ (self, *args, **kwds) :
return self.run()
def run (self) :
self.callback_start()
self._stdout = multi_out()
self._tmp_stdout = open(self.stdout_file, "w")
self._stdout.register("Communication log", self._tmp_stdout)
old_stdout = sys.stdout
sys.stdout = stdout_redirect(self)
import libtbx.callbacks
libtbx.call_back.register_handler(self.callback_wrapper)
try :
return_value = self.target()
except Abort : # FIXME why is this not working properly?
self.callback_abort()
except Exception, e :
print >> sys.stderr, type(e).__name__
if (type(e).__name__ == "Abort") :
self.callback_abort()
else :
if e.__class__.__module__ == "Boost.Python" :
e = RuntimeError("Boost.Python.%s: %s" % (e.__class__.__name__,
str(e)))
elif hasattr(e, "reset_module") :
e.reset_module()
traceback_str = "\n".join(traceback.format_tb(sys.exc_info()[2]))
self.callback_error(e, traceback_str)
else :
#time.sleep(1)
self.callback_final(return_value)
sys.stdout = old_stdout
def callback_wrapper (self, message, data, accumulate=True, cached=True) :
if cached :
self.callback_other(data=group_args(
message=message,
data=data,
accumulate=accumulate,
cached=cached))
def callback_start (self, data=None) :
info = host_and_user()
assert (info.pid is not None)
f = open(self.start_file, "w")
host_name = info.get_host_name()
if (host_name is None) and (sys.platform == "darwin") :
host_name = os.uname()[1]
f.write("%s %d" % (host_name, info.pid))
f.close()
def callback_stdout (self, data) :
self._stdout.write(data)
self._stdout.flush()
if self.params.fsync :
os.fsync(self._tmp_stdout.fileno())
if os.path.isfile(self.stop_file) :
raise Abort()
def callback_error (self, error, traceback_info) :
self.cleanup()
easy_pickle.dump(self.error_file, (error, traceback_info))
def callback_abort (self) :
self.cleanup()
easy_pickle.dump(self.abort_file, True)
def callback_final (self, result) :
self.cleanup()
easy_pickle.dump(self.result_file, result)
def callback_pause (self) : # TODO
pass
def callback_resume (self) : # TODO
pass
def callback_other (self, data) :
if not data.cached :
return
if data.accumulate :
self._accumulated_callbacks.append(data)
touch_file(self.info_lock)
easy_pickle.dump(self.info_file, self._accumulated_callbacks)
os.remove(self.info_lock)
else :
touch_file(self.state_lock)
easy_pickle.dump(self.state_file, data)
os.remove(self.state_lock)
def cleanup (self) :
self._stdout.flush()
self._stdout.close()
# TODO pause/resume?
class detached_process_client (detached_base) :
def __init__ (self, *args, **kwds) :
detached_base.__init__(self, *args, **kwds)
self._logfile = None
self._info_mtime = 0.0 # time.time()
self._state_mtime = 0.0 # time.time()
self.running = False
self.finished = False
self._process_host = None
self._process_pid = None
self.update_progress = True
def isAlive (self) :
return (not self.finished)
def is_started (self) :
return self.running
def run (self) :
timeout = self.params.timeout
while True :
self.update()
if self.finished :
break
else :
time.sleep(timeout * 0.001)
return True
def update (self) :
if not self.running and os.path.exists(self.start_file) :
self.running = True
data = open(self.start_file, "r").read()
try :
host, pid = data.split()
self._process_host = host
self._process_pid = int(pid)
except Exception, e :
print "Error acquiring runtime info:"
print e
self.callback_start(data)
if self.update_progress :
self.check_stdout()
self.check_status()
if os.path.exists(self.error_file) :
try :
(error, traceback_info) = easy_pickle.load(self.error_file)
except EOFError :
pass
else :
self.callback_error(error, traceback_info)
elif os.path.exists(self.abort_file) :
self.callback_abort()
elif os.path.exists(self.result_file) :
try :
result = easy_pickle.load(self.result_file)
except EOFError :
print "EOFError trying to load result file!"
else :
time.sleep(1)
self.check_stdout()
self.check_status()
self.callback_final(result)
else :
self.finished = False
return
self.finished = True
def check_stdout (self) :
if self._logfile is None and os.path.exists(self.stdout_file) :
self._logfile = open(self.stdout_file, "r", 0)
if self._logfile is not None :
last = self._logfile.tell()
data = self._logfile.read()
if data == '' :
self._logfile.seek(last)
else :
self.callback_stdout(data)
def reset_logfile (self) :
if self._logfile is not None :
self._logfile.seek(0)
def check_status (self) :
if os.path.exists(self.info_file) :
mtime = os.path.getmtime(self.info_file)
if mtime > self._info_mtime and not os.path.isfile(self.info_lock) :
self._info_mtime = mtime
try :
accumulated_status = easy_pickle.load(self.info_file)
except KeyboardInterrupt :
raise
except EOFError :
pass
except Exception, e :
print e
else :
n_cb = len(accumulated_status)
n_cb_old = len(self._accumulated_callbacks)
for i in range(n_cb_old, n_cb) :
new_cb = accumulated_status[i]
self._accumulated_callbacks.append(new_cb)
self.callback_other(new_cb)
if os.path.exists(self.state_file) :
mtime = os.path.getmtime(self.state_file)
if mtime > self._state_mtime and not os.path.isfile(self.state_lock) :
self._state_mtime = mtime
try :
current_status = easy_pickle.load(self.state_file)
except KeyboardInterrupt :
raise
except EOFError :
pass
except Exception, e :
print e
else :
self.callback_other(current_status)
# XXX in practice the phenix GUI only sets force=True for jobs running on the
# same machine; qdel is pretty thorough anyway.
def abort (self, force=None) :
touch_file(self.stop_file)
if (force) and (not None in [self._process_host, self._process_pid]) :
info = host_and_user()
if (info.get_host_name() == self._process_host) :
os.kill(self._process_pid)
self.running = False
self.callback_abort()
# XXX See also libtbx.thread_utils implementation
def send_signal (self, signal_number) : # XXX experimental
"""
Signals the process using os.kill, which despite the name, can also
pause or resume processes on Unix.
"""
assert (self._process_pid is not None) and (sys.platform != "win32")
try :
os.kill(self._process_pid, signal_number)
except OSError, e :
print e
#self.callback_abort()
return False
else :
return True
def pause (self) : # XXX experimental, Unix only
if (self.send_signal(signal.SIGSTOP)) :
self.callback_pause()
def resume (self) : # XXX experimental, Unix only
if (self.send_signal(signal.SIGCONT)) :
self.callback_resume()
def callback_pause (self) : # TODO
pass
def callback_resume (self) : # TODO
pass
def purge_files (self) :
files = ["start","stdout","error","stop","abort","result","info","state"]
for fn in files :
file_name = getattr(self, "%s_file" % fn)
if os.path.exists(file_name) :
try :
os.remove(file_name)
except Exception, e :
print e
def touch_file (file_name) :
f = open(file_name, "w").close()
def write_params (params, file_name) :
param_phil = process_master_phil.format(python_object=params)
f = open(file_name, "w")
param_phil.show(out=f)
f.close()
def write_run_script (file_name, cmds) :
f = open(file_name, "w")
os.fchmod(f.fileno(),
stat.S_IRUSR|stat.S_IWUSR|stat.S_IXUSR|stat.S_IRGRP|stat.S_IROTH)
f.write("#!/bin/sh\n\n")
use_cctbx_setpaths = True
if "PHENIX" in os.environ and not "PHENIX_CUSTOM_ENV" in os.environ :
env_file = os.path.join(os.environ["PHENIX"], "phenix_env.sh")
if os.path.isfile(env_file) :
f.write("source %s\n" % env_file)
use_cctbx_setpaths = False
if use_cctbx_setpaths :
f.write("source %s\n" % libtbx.env.under_build("setpaths.sh"))
f.write("%s" % " ".join(cmds))
f.close()
# XXX command-line launcher
def run (args) :
user_phil = []
for arg in args :
if os.path.isfile(arg) :
file_name = os.path.abspath(arg)
base, ext = os.path.splitext(file_name)
if ext in [".params", ".eff", ".def", ".phil"] :
user_phil.append(libtbx.phil.parse(file_name=file_name))
elif ext in [".pkl", ".pickle"] :
input_string = "run_file = %s" % arg
user_phil.append(libtbx.phil.parse(input_string))
else :
try :
arg_phil = libtbx.phil.parse(arg)
except RuntimeError, e :
print e
else :
user_phil.append(arg_phil)
working_phil = process_master_phil.fetch(sources=user_phil)
params = working_phil.extract()
if params.run_file is None :
working_phil.show()
raise Sorry("Pickled target function run_file not defined.")
target = easy_pickle.load(params.run_file)
server = detached_process_server(target, params=params)
server.run()
########################################################################
# testing classes (see tst_runtime_utils.py for usage)
class simple_client (detached_process_client) :
def __init__ (self, *args, **kwds) :
self.n_cb = 0
self.out = cStringIO.StringIO()
detached_process_client.__init__(self, *args, **kwds)
def callback_error (self, error, traceback_info) :
raise error
def callback_aborted (self) :
raise Sorry("aborted as planned.")
def callback_stdout (self, data) :
self.out.write(data)
#for line in data.splitlines() :
def callback_other (self, data) :
self.n_cb += 1
def callback_final (self, result) :
self.result = result
class simple_run (object) :
def __init__ (self, output_dir) :
adopt_init_args(self, locals())
def __call__ (self) :
pu_total = 0
for run in range(0, 4) :
(x, n) = (0.1 * (run+1) , 20000)
mu = 10.0
pu = 0.0
pol =[0] * 100
r = range(0,100)
libtbx.call_back("run %d" % run, None, accumulate=True)
time.sleep(1)
for i in range(0,n):
for j in r:
pol[j] = mu = (mu + 2.0) / 2.0
su = 0.0
for j in r:
su = x * su + pol[j]
pu = pu + su
pu_total += pu
libtbx.call_back("current_total", pu, accumulate=False)
print "current is %f" % pu
return pu_total
class simple_func (object) :
def __init__ (self, x) :
self.x = x
def __call__ (self) :
print self.x
|
|
"""
forms for django-form-utils
Time-stamp: <2010-04-28 02:57:16 carljm forms.py>
"""
from copy import deepcopy
from django import forms
from django.forms.util import flatatt, ErrorDict
from django.utils.safestring import mark_safe
class Fieldset(object):
"""
An iterable Fieldset with a legend and a set of BoundFields.
"""
def __init__(self, form, name, boundfields, legend='', classes='', description=''):
self.form = form
self.boundfields = boundfields
if legend is None: legend = name
self.legend = legend and mark_safe(legend)
self.classes = classes
self.description = mark_safe(description)
self.name = name
def _errors(self):
return ErrorDict(((k, v) for (k, v) in self.form.errors.iteritems()
if k in [f.name for f in self.boundfields]))
errors = property(_errors)
def __iter__(self):
for bf in self.boundfields:
yield _mark_row_attrs(bf, self.form)
def __repr__(self):
return "%s('%s', %s, legend='%s', classes='%s', description='%s')" % (
self.__class__.__name__, self.name,
[f.name for f in self.boundfields], self.legend, self.classes, self.description)
class FieldsetCollection(object):
def __init__(self, form, fieldsets):
self.form = form
self.fieldsets = fieldsets
self._cached_fieldsets = []
def __len__(self):
return len(self.fieldsets) or 1
def __iter__(self):
if not self._cached_fieldsets:
self._gather_fieldsets()
for field in self._cached_fieldsets:
yield field
def __getitem__(self, key):
if not self._cached_fieldsets:
self._gather_fieldsets()
for field in self._cached_fieldsets:
if field.name == key:
return field
raise KeyError
def _gather_fieldsets(self):
if not self.fieldsets:
self.fieldsets = (('main', {'fields': self.form.fields.keys(),
'legend': ''}),)
for name, options in self.fieldsets:
try:
field_names = [n for n in options['fields']
if n in self.form.fields]
except KeyError:
raise ValueError("Fieldset definition must include 'fields' option." )
boundfields = [forms.forms.BoundField(self.form, self.form.fields[n], n)
for n in field_names]
self._cached_fieldsets.append(Fieldset(self.form, name,
boundfields, options.get('legend', None),
' '.join(options.get('classes', ())),
options.get('description', '')))
def _get_meta_attr(attrs, attr, default):
try:
ret = getattr(attrs['Meta'], attr)
except (KeyError, AttributeError):
ret = default
return ret
def _set_meta_attr(attrs, attr, value):
try:
setattr(attrs['Meta'], attr, value)
return True
except KeyError:
return False
def get_fieldsets(bases, attrs):
"""
Get the fieldsets definition from the inner Meta class.
"""
fieldsets = _get_meta_attr(attrs, 'fieldsets', None)
if fieldsets is None:
#grab the fieldsets from the first base class that has them
for base in bases:
fieldsets = getattr(base, 'base_fieldsets', None)
if fieldsets is not None:
break
fieldsets = fieldsets or []
return fieldsets
def get_fields_from_fieldsets(fieldsets):
"""
Get a list of all fields included in a fieldsets definition.
"""
fields = []
try:
for name, options in fieldsets:
fields.extend(options['fields'])
except (TypeError, KeyError):
raise ValueError('"fieldsets" must be an iterable of two-tuples, '
'and the second tuple must be a dictionary '
'with a "fields" key')
return fields
def get_row_attrs(bases, attrs):
"""
Get the row_attrs definition from the inner Meta class.
"""
return _get_meta_attr(attrs, 'row_attrs', {})
def _mark_row_attrs(bf, form):
row_attrs = deepcopy(form._row_attrs.get(bf.name, {}))
if bf.field.required:
req_class = 'required'
else:
req_class = 'optional'
if 'class' in row_attrs:
row_attrs['class'] = row_attrs['class'] + ' ' + req_class
else:
row_attrs['class'] = req_class
bf.row_attrs = mark_safe(flatatt(row_attrs))
return bf
class BetterFormBaseMetaclass(type):
def __new__(cls, name, bases, attrs):
attrs['base_fieldsets'] = get_fieldsets(bases, attrs)
fields = get_fields_from_fieldsets(attrs['base_fieldsets'])
if (_get_meta_attr(attrs, 'fields', None) is None and
_get_meta_attr(attrs, 'exclude', None) is None):
_set_meta_attr(attrs, 'fields', fields)
attrs['base_row_attrs'] = get_row_attrs(bases, attrs)
new_class = super(BetterFormBaseMetaclass,
cls).__new__(cls, name, bases, attrs)
return new_class
class BetterFormMetaclass(BetterFormBaseMetaclass,
forms.forms.DeclarativeFieldsMetaclass):
pass
class BetterModelFormMetaclass(BetterFormBaseMetaclass,
forms.models.ModelFormMetaclass):
pass
class BetterBaseForm(object):
"""
``BetterForm`` and ``BetterModelForm`` are subclasses of Form
and ModelForm that allow for declarative definition of fieldsets
and row_attrs in an inner Meta class.
The row_attrs declaration is a dictionary mapping field names to
dictionaries of attribute/value pairs. The attribute/value
dictionaries will be flattened into HTML-style attribute/values
(i.e. {'style': 'display: none'} will become ``style="display:
none"``), and will be available as the ``row_attrs`` attribute of
the ``BoundField``. Also, a CSS class of "required" or "optional"
will automatically be added to the row_attrs of each
``BoundField``, depending on whether the field is required.
There is no automatic inheritance of ``row_attrs``.
The fieldsets declaration is a list of two-tuples very similar to
the ``fieldsets`` option on a ModelAdmin class in
``django.contrib.admin``.
The first item in each two-tuple is a name for the fieldset, and
the second is a dictionary of fieldset options.
Valid fieldset options in the dictionary include:
``fields`` (required): A tuple of field names to display in this
fieldset.
``classes``: A list of extra CSS classes to apply to the fieldset.
``legend``: This value, if present, will be the contents of a ``legend``
tag to open the fieldset.
``description``: A string of optional extra text to be displayed
under the ``legend`` of the fieldset.
When iterated over, the ``fieldsets`` attribute of a
``BetterForm`` (or ``BetterModelForm``) yields ``Fieldset``s.
Each ``Fieldset`` has a ``name`` attribute, a ``legend``
attribute, , a ``classes`` attribute (the ``classes`` tuple
collapsed into a space-separated string), and a description
attribute, and when iterated over yields its ``BoundField``s.
Subclasses of a ``BetterForm`` will inherit their parent's
fieldsets unless they define their own.
A ``BetterForm`` or ``BetterModelForm`` can still be iterated over
directly to yield all of its ``BoundField``s, regardless of
fieldsets.
"""
def __init__(self, *args, **kwargs):
self._fieldsets = deepcopy(self.base_fieldsets)
self._row_attrs = deepcopy(self.base_row_attrs)
self._fieldset_collection = None
super(BetterBaseForm, self).__init__(*args, **kwargs)
@property
def fieldsets(self):
if not self._fieldset_collection:
self._fieldset_collection = FieldsetCollection(self,
self._fieldsets)
return self._fieldset_collection
def __iter__(self):
for bf in super(BetterBaseForm, self).__iter__():
yield _mark_row_attrs(bf, self)
def __getitem__(self, name):
bf = super(BetterBaseForm, self).__getitem__(name)
return _mark_row_attrs(bf, self)
class BetterForm(BetterBaseForm, forms.Form):
__metaclass__ = BetterFormMetaclass
__doc__ = BetterBaseForm.__doc__
class BetterModelForm(BetterBaseForm, forms.ModelForm):
__metaclass__ = BetterModelFormMetaclass
__doc__ = BetterBaseForm.__doc__
class BasePreviewForm (object):
"""
Mixin to add preview functionality to a form. If the form is submitted with
the following k/v pair in its ``data`` dictionary:
'submit': 'preview' (value string is case insensitive)
Then ``PreviewForm.preview`` will be marked ``True`` and the form will
be marked invalid (though this invalidation will not put an error in
its ``errors`` dictionary).
"""
def __init__(self, *args, **kwargs):
super(BasePreviewForm, self).__init__(*args, **kwargs)
self.preview = self.check_preview(kwargs.get('data', None))
def check_preview(self, data):
if data and data.get('submit', '').lower() == u'preview':
return True
return False
def is_valid(self, *args, **kwargs):
if self.preview:
return False
return super(BasePreviewForm, self).is_valid()
class PreviewModelForm(BasePreviewForm, BetterModelForm):
pass
class PreviewForm(BasePreviewForm, BetterForm):
pass
|
|
"""A multi-producer, multi-consumer queue."""
from time import time as _time
try:
import threading as _threading
except ImportError:
import dummy_threading as _threading
from collections import deque
import heapq
__all__ = ['Empty', 'Full', 'Queue', 'PriorityQueue', 'LifoQueue']
class Empty(Exception):
"Exception raised by Queue.get(block=0)/get_nowait()."
pass
class Full(Exception):
"Exception raised by Queue.put(block=0)/put_nowait()."
pass
class Queue:
"""Create a queue object with a given maximum size.
If maxsize is <= 0, the queue size is infinite.
"""
def __init__(self, maxsize=0):
self.maxsize = maxsize
self._init(maxsize)
# mutex must be held whenever the queue is mutating. All methods
# that acquire mutex must release it before returning. mutex
# is shared between the three conditions, so acquiring and
# releasing the conditions also acquires and releases mutex.
self.mutex = _threading.Lock()
# Notify not_empty whenever an item is added to the queue; a
# thread waiting to get is notified then.
self.not_empty = _threading.Condition(self.mutex)
# Notify not_full whenever an item is removed from the queue;
# a thread waiting to put is notified then.
self.not_full = _threading.Condition(self.mutex)
# Notify all_tasks_done whenever the number of unfinished tasks
# drops to zero; thread waiting to join() is notified to resume
self.all_tasks_done = _threading.Condition(self.mutex)
self.unfinished_tasks = 0
def task_done(self):
"""Indicate that a formerly enqueued task is complete.
Used by Queue consumer threads. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items
have been processed (meaning that a task_done() call was received
for every item that had been put() into the queue).
Raises a ValueError if called more times than there were items
placed in the queue.
"""
self.all_tasks_done.acquire()
try:
unfinished = self.unfinished_tasks - 1
if unfinished <= 0:
if unfinished < 0:
raise ValueError('task_done() called too many times')
self.all_tasks_done.notify_all()
self.unfinished_tasks = unfinished
finally:
self.all_tasks_done.release()
def join(self):
"""Blocks until all items in the Queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the
queue. The count goes down whenever a consumer thread calls task_done()
to indicate the item was retrieved and all work on it is complete.
When the count of unfinished tasks drops to zero, join() unblocks.
"""
self.all_tasks_done.acquire()
try:
while self.unfinished_tasks:
self.all_tasks_done.wait()
finally:
self.all_tasks_done.release()
def qsize(self):
"""Return the approximate size of the queue (not reliable!)."""
self.mutex.acquire()
n = self._qsize()
self.mutex.release()
return n
def empty(self):
"""Return True if the queue is empty, False otherwise (not reliable!)."""
self.mutex.acquire()
n = not self._qsize()
self.mutex.release()
return n
def full(self):
"""Return True if the queue is full, False otherwise (not reliable!)."""
self.mutex.acquire()
n = 0 < self.maxsize == self._qsize()
self.mutex.release()
return n
def put(self, item, block=True, timeout=None):
"""Put an item into the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until a free slot is available. If 'timeout' is
a non-negative number, it blocks at most 'timeout' seconds and raises
the Full exception if no free slot was available within that time.
Otherwise ('block' is false), put an item on the queue if a free slot
is immediately available, else raise the Full exception ('timeout'
is ignored in that case).
"""
self.not_full.acquire()
try:
if self.maxsize > 0:
if not block:
if self._qsize() == self.maxsize:
raise Full
elif timeout is None:
while self._qsize() == self.maxsize:
self.not_full.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
endtime = _time() + timeout
while self._qsize() == self.maxsize:
remaining = endtime - _time()
if remaining <= 0.0:
raise Full
self.not_full.wait(remaining)
self._put(item)
self.unfinished_tasks += 1
self.not_empty.notify()
finally:
self.not_full.release()
def put_nowait(self, item):
"""Put an item into the queue without blocking.
Only enqueue the item if a free slot is immediately available.
Otherwise raise the Full exception.
"""
return self.put(item, False)
def get(self, block=True, timeout=None):
"""Remove and return an item from the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until an item is available. If 'timeout' is
a non-negative number, it blocks at most 'timeout' seconds and raises
the Empty exception if no item was available within that time.
Otherwise ('block' is false), return an item if one is immediately
available, else raise the Empty exception ('timeout' is ignored
in that case).
"""
self.not_empty.acquire()
try:
if not block:
if not self._qsize():
raise Empty
elif timeout is None:
while not self._qsize():
self.not_empty.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
endtime = _time() + timeout
while not self._qsize():
remaining = endtime - _time()
if remaining <= 0.0:
raise Empty
self.not_empty.wait(remaining)
item = self._get()
self.not_full.notify()
return item
finally:
self.not_empty.release()
def get_nowait(self):
"""Remove and return an item from the queue without blocking.
Only get an item if one is immediately available. Otherwise
raise the Empty exception.
"""
return self.get(False)
# Override these methods to implement other queue organizations
# (e.g. stack or priority queue).
# These will only be called with appropriate locks held
# Initialize the queue representation
def _init(self, maxsize):
self.queue = deque()
def _qsize(self, len=len):
return len(self.queue)
# Put a new item in the queue
def _put(self, item):
self.queue.append(item)
# Get an item from the queue
def _get(self):
return self.queue.popleft()
class PriorityQueue(Queue):
'''Variant of Queue that retrieves open entries in priority order (lowest first).
Entries are typically tuples of the form: (priority number, data).
'''
def _init(self, maxsize):
self.queue = []
def _qsize(self, len=len):
return len(self.queue)
def _put(self, item, heappush=heapq.heappush):
heappush(self.queue, item)
def _get(self, heappop=heapq.heappop):
return heappop(self.queue)
class LifoQueue(Queue):
'''Variant of Queue that retrieves most recently added entries first.'''
def _init(self, maxsize):
self.queue = []
def _qsize(self, len=len):
return len(self.queue)
def _put(self, item):
self.queue.append(item)
def _get(self):
return self.queue.pop()
|
|
import simpy
import functools
import random
import time
from enum import Enum
DEBUG = False
simlog = open("simlog.log", "w")
def dprint(*text):
if(DEBUG):
print("[", time.strftime("%H:%M:%S"),"]:", end="", file=simlog)
for t in text:
print("", t, end="", file=simlog)
print("", file=simlog)
# Default attributes
# default traffic generator size:
tg_default_size = lambda x: 50
# default traffic generator distribution:
tg_default_dist = lambda x: random.expovariate(10)
# default DBA bandwidth:
DBA_IPACT_default_bandwidth = 1250000 # 1.25 Gb/s, bandwidth for each frequency/vpon
# default Antenna consumption:
Ant_consumption = lambda x: 0
# default ONU consumption:
ONU_consumption = lambda x: 0
# default Processing Node consumption:
PN_consumption = lambda x: 0
# default LineCard consumption:
LC_consumption = lambda x: 0
# default Digital Unit consumption:
DU_consumption = lambda x: 0
# default ONU threshold:
ONU_threshold = 0
# default ONU bit rate downstreaming:
ONU_bitRate_down = 0
# default ONU bit rate upstreaming:
ONU_bitRate_up = 0
# default Processing Node downstreaming:
PN_bitRate_down = 0
# default Processing Node upstreaming:
PN_bitRate_up = 0
# Constants
# Light Speed:
Light_Speed = 300000000
# Radio Speed:
Antenna_Speed = 300000000
# interactions delay (to not overload the simulator):
foo_delay = 0.00005 # arbitrary
# Statistics
total_lost = 0
total_duplicated = 0
total_requests = 0
bandwidth_used = 0
output_files = []
# writer class
class Writer(object):
def __init__(self, start="#\n"):
filename = time.strftime("%d%m%Y_%H%M%S_output.dat")
output_files.append(filename)
self.file = open(filename, 'w')
dprint("Opening file", filename, "to write.")
self.write(start)
def write(self, text):
self.file.write(text)
def close(self):
self.file.close()
packet_w = None
# topology function
def create_topology(env, qnty_ant, qnty_onu, qnty_pn, qnty_splt, matrix, max_frequency):
id_onu = 0
id_pn = 0
id_ant = 0
id_splt = 0
nodes = []
# create nodes
for i in range(qnty_ant):
dprint("Creating Antenna #", id_ant)
nodes.append(Antenna(env, id_ant, None, Ant_consumption, 0, 0))
id_ant += 1
for i in range(qnty_onu):
dprint("Creating ONU #", id_onu)
nodes.append(ONU(env, id_onu, None, None, ONU_consumption, None, ONU_bitRate_up, ONU_bitRate_down, 0, threshold=ONU_threshold))
id_onu += 1
for i in range(qnty_pn):
dprint("Creating Processing Node #", id_pn)
# create lcs and put them to sleep
pn_lcs = []
pn_lcs.append(LineCard(env, -1, enabled=True, consumption=lambda x: 0)) # control's LC
for j in range(max_frequency):
pn_lcs.append(LineCard(env, j))
# create DUs
pn_dus = []
# attach LCs and DUs
pn_node = Processing_Node(env, id_pn, None, None, PN_consumption, PN_bitRate_up, PN_bitRate_down, 0, LC=pn_lcs, DU=pn_dus)
# add a Digital Unit with DBA
control_du = Digital_Unit(env, 0, 0, pn_node, pn_node, vms=[DBA_Assigner(env, pn_node, 0, max_frequency)], enabled=True)
pn_node.append_DU(control_du)
pn_node.attach_DU(0, 0) # attach DU 0 to LC 0 (-1)
# add a Digital Unit to BB processing (not real BB processing)
bb_du = Digital_Unit(env, 1, 0, pn_node, pn_node, vms=[Foo_BB_VM(env)])
pn_node.append_DU(bb_du)
nodes.append(pn_node)
id_pn += 1
for i in range(qnty_splt):
dprint("Creating Splitter #", id_splt)
nodes.append(Splitter(env, id_splt, None, None, 0))
id_splt += 1
dprint("Total nodes:", len(nodes))
# connect nodes
for m in matrix:
n_one = nodes[m[0]]
n_two = nodes[m[1]]
dist = m[2]
dprint("Attaching", str(n_one), "to", str(n_two), "with a distance of", str(dist))
n_one.target_up = n_two
if(type(n_two) is ONU or type(n_two) is Splitter):
n_two.target_down.append(n_one)
else:
n_two.target_down = n_one
if(type(n_one) is Antenna):
n_one.delay = dist / float(Antenna_Speed)
else:
n_one.delay_up = dist / float(Light_Speed)
def set_local_nodes(node):
if(isinstance(node, Splitter)):
arr = []
for t in node.target_down:
arr += set_local_nodes(t)
return arr
elif(isinstance(node, Processing_Node)):
dprint(str(node), "is a local node")
return [node]
else:
return []
# set local nodes
for n in nodes:
if(isinstance(n, Processing_Node)):
dprint("Setting local nodes to", str(n), "...")
n.local_nodes = set_local_nodes(n.target_down)
return nodes
def create_topology_from_nodes(env, matrix, nodes):
for m in matrix:
n_one = nodes[m[0]]
n_two = nodes[m[1]]
dist = m[2]
dprint("Attaching", str(n_one), "to", str(n_two), "with a distance of", str(dist))
n_one.target_up = n_two
if(type(n_two) is ONU or type(n_two) is Splitter):
n_two.target_down.append(n_one)
else:
n_two.target_down = n_one
if(type(n_one) is Antenna):
n_one.delay = dist / float(Antenna_Speed)
else:
n_one.delay_up = dist / float(Light_Speed)
# remove all DBAs/VPONs
for n in nodes:
if(type(n) is Processing_Node):
for i in range(len(n.DU)):
if(type(n.DU[i]) is DBA_IPACT):
dprint("Removing DBA IPACT from a DU")
n.DU.remove(n.DU[i])
i = i - 1
return nodes
# abstract class
class Traffic_Generator(object):
def __init__(self, env, id, distribution, size):
self.env = env
self.id = id
self.dist = distribution # callable
self.size = size # callable
self.hold = simpy.Store(self.env) # hold data
self.trafic_action = env.process(self.trafic_run())
self.packets_sent = 0
def trafic_run(self):
while True:
while(self.hold == None):
yield self.env.timeout(foo_delay)
yield self.env.timeout(self.dist(self)) # distribution time (wait time between calls)
if(self.hold == None):
continue
p = Packet(self.packets_sent, self.size(self), self.id, -1, self.env.now)
self.hold.put(p)
self.packets_sent += 1
# abstract class
class Active_Node(object):
def __init__(self, env, enabled, consumption_rate, objs, start_time):
self.env = env
self.enabled = enabled
self.consumption_rate = consumption_rate
self.start_time = start_time
self.elapsed_time = 0
self.total_time = 0.0
self.an_action = env.process(self.an_run())
self.obj_sleeping = [] # sleeping objects
self.objs = objs # active nodes inside
def start(self):
self.start_time = self.env.now
self.enabled = True
for o in self.obj_sleeping:
o.start()
self.obj_sleeping = []
def end(self):
self.total_time += self.elapsed_time
self.elapsed_time = 0
self.enabled = False
for o in self.objs:
if(o.enabled is True):
self.obj_sleeping.append(o)
o.end()
def consumption(self):
total = 0
for o in self.objs:
total += o.consumption()
return total + self.consumption_rate(self) * (self.total_time + self.elapsed_time)
def an_run(self):
# count time
while(True):
if(self.enabled):
self.elapsed_time = self.env.now - self.start_time
yield self.env.timeout(foo_delay)
# traffic gen implemented
class Antenna(Traffic_Generator, Active_Node):
def __init__(self, env, id, target_up, consumption_rate, bitRate, distance, enabled=True):
self.env = env
self.id = id
self.bitRate = bitRate
self.target_up = target_up
self.delay = distance / float(Antenna_Speed)
Traffic_Generator.__init__(self, self.env, self.id, tg_default_dist, tg_default_size)
Active_Node.__init__(self, self.env, enabled, consumption_rate, [], self.env.now)
self.action = env.process(self.run())
def start(self):
self.start_time = self.env.now
self.enabled = True
self.hold = simpy.Store(self.env)
def end(self):
self.total_time += self.elapsed_time
self.elapsed_time = 0
self.enabled = False
self.hold = None
def run(self):
while(True):
if(self.enabled):
pkt = yield self.hold.get() # wait data
dprint(str(self), "took", str(pkt), "at", self.env.now)
if(self.target_up != None):
if(self.bitRate > 0):
yield self.env.timeout(pkt.size / (self.bitRate / 8)) # transmission
yield self.env.timeout(self.delay) # propagation
dprint(str(self), "delivered to", str(self.target_up), "at", self.env.now)
self.env.process(self.target_up.put(pkt, up=True))
yield self.env.timeout(foo_delay)
def __repr__(self):
return "Antenna #{}".\
format(self.id)
# data
class Packet(object):
def __init__(self, id, size, src, dst, init_time, freq=-1):
self.id = id
self.size = size
self.src = src
self.dst = dst
self.init_time = init_time
self.waited_time = 0
self.freq = freq
def __repr__(self):
return "Packet [id:{},src:{},size:{},freq:{},init_time:{}]".\
format(self.id, self.src, self.size, self.freq, self.init_time)
# abstract class
class Virtual_Machine(object):
def func(self, r):
return r
# test VM (writing test)
class Foo_BB_VM(Virtual_Machine):
def __init__(self, env, delay=0):
self.env = env
self.delay = delay
def func(self, o):
if(packet_w != None):
if(type(o) is Packet):
packet_w.write("{} {} {} {} {} {} {}\n".format(o.id, o.src, o.init_time, o.waited_time, repr(o.freq).replace(" ", ""), o.size, self.env.now))
if(type(o) is list and type(o[0]) is Packet):
for p in o:
packet_w.write("{} {} {} {} {} {} {}\n".format(p.id, p.src, p.init_time, p.waited_time, repr(p.freq).replace(" ", ""), p.size, self.env.now))
yield self.env.timeout(self.delay)
return None
def __repr__(self):
return "Foo BB VM - (n/a)"
# DBA Request
class Request(Packet):
def __init__(self, id, id_sender, freq, bandwidth, ack):
self.id_sender = id_sender
self.freq = freq
self.bandwidth = bandwidth
self.ack = ack
Packet.__init__(self, id, 0, id_sender, -1, -1)
def __repr__(self):
return "Request [id:{},id_sender:{},freq:{},bandwidth:{},ack:{}]".\
format(self.id, self.id_sender, self.freq, self.bandwidth, self.ack)
# DBA Grant
class Grant(Packet):
def __init__(self, onu, init_time, size, freq, ack):
self.onu = onu
self.ack = ack
Packet.__init__(self, -1, size, -1, -1, init_time, freq=freq)
def __repr__(self):
return "Grant [onu:{},init_time:{},size:{},freq:{},ack:{}]".\
format(self.onu, self.init_time, self.size, self.freq, self.ack)
# passive Splitter
class Splitter(object):
def __init__(self, env, id, target_up, target_down, distance_up):
self.env = env
self.id = id
self.target_up = target_up
self.target_down = target_down
self.target_down = []
self.delay_up = distance_up / float(Light_Speed)
def put(self, pkt, down=False, up=False):
dprint(str(self), "receveid obj", str(pkt), "at", self.env.now)
if(down):
self.target_down.sort(key=lambda target: target.delay_up)
counted = 0
for t in self.target_down:
yield self.env.timeout(t.delay_up - counted)
counted = t.delay_up
self.env.process(t.put(pkt, down=True))
if(up):
yield self.env.timeout(self.delay_up)
self.env.process(self.target_up.put(pkt, up=True))
def __repr__(self):
return "Splitter #{}".\
format(self.id)
# OLT or local node (fog)
class Processing_Node(Active_Node):
def __init__(self, env, id, target_up, target_down, consumption_rate, bitRate_up, bitRate_down, distance, enabled=True, DU=[], LC=[]):
self.env = env
self.id = id
self.DU = DU
self.LC = LC
self.bitRate_up = bitRate_up
self.bitRate_down = bitRate_down
self.res_hold_up = simpy.Resource(self.env, capacity=1)
self.res_hold_down = simpy.Resource(self.env, capacity=1)
self.hold_up = []
self.hold_down = []
self.target_up = target_up
self.target_down = target_down
self.delay_up = distance / float(Light_Speed)
Active_Node.__init__(self, env, enabled, consumption_rate, DU + LC, self.env.now)
self.action = self.env.process(self.run())
# calculate time required to transfer size in bytes to onu from node
def time_to_onu(self, size, id_onu, target=None):
if(target == None):
target = self
if(type(target) is Splitter):
for t in target.target_down:
delay_acc = self.time_to_onu(size, id_onu, target=t)
if(delay_acc > 0):
return delay_acc + target.delay_up
elif(type(target) is ONU):
if(target.id == id_onu):
return target.delay_up
else:
delay_acc = self.time_to_onu(size, id_onu, target=target.target_down)
if(delay_acc > 0):
if(target.bitRate_down > 0):
delay_acc += (size / (target.bitRate_down / 8))
if(self != target):
delay_acc += target.delay_up
return delay_acc
return 0
# calculate time required to transfer size in bytes from onu to node
def time_from_onu(self, size, id_onu, target=None):
if(target == None):
target = self.target_down # first time
if(type(target) is Splitter):
for t in target.target_down:
delay_acc = self.time_from_onu(size, id_onu, target=t)
if(delay_acc > 0):
return delay_acc + target.delay_up
elif(type(target) is ONU):
if(target.id == id_onu):
if(target.bitRate_up > 0):
return target.delay_up + (size / (target.bitRate_up / 8))
else:
return target.delay_up
else:
delay_acc = self.time_from_onu(size, id_onu, target=target.target_down)
if(delay_acc > 0):
if(target.bitRate_up > 0):
delay_acc += (size / (target.bitRate_up / 8))
return delay_acc + target.delay_up
return 0
def attach_DU(self, du, lc):
if(self.LC[lc].enabled is False):
self.LC[lc].start()
self.LC[lc].out = self.DU[du]
def append_DU(self, du):
self.DU.append(du)
# upstreaming
def send_up(self, o):
if(self.target_up != None):
if(self.bitRate_up > 0):
total_size = 0
if(type(o) is list):
for k in o:
total_size += k.size
else:
total_size = o.size
yield self.env.timeout(total_size / (self.bitRate_up / 8)) # transmission
yield self.env.timeout(self.delay_up) # propagation
dprint(str(self), "finished sending (upstream) obj at", self.env.now)
self.env.process(self.target_up.put(o, up=True))
# downstreaming
def send_down(self, o):
if(self.target_down != None):
if(self.bitRate_down > 0):
total_size = 0
if(type(o) is list):
for k in o:
total_size += k.size
else:
total_size = o.size
yield self.env.timeout(total_size / (self.bitRate_down / 8)) # transmission
yield self.env.timeout(self.target_down.delay_up) # propagation
dprint(str(self), "finished sending (downstream) obj at", self.env.now)
self.env.process(self.target_down.put(o, down=True))
def put(self, pkt, down=False, up=False):
dprint(str(self), "receveid obj", str(pkt), "at", self.env.now)
if(self.enabled):
if(down):
with self.res_hold_down.request() as req:
yield req
self.hold_down.append(pkt)
if(up):
with self.res_hold_up.request() as req:
yield req
self.hold_up.append(pkt)
else:
dprint(str(self), "is not enabled at", self.env.now)
if(down):
self.env.process(self.send_down(pkt))
if(up):
self.env.process(self.send_up(pkt))
def run(self):
while(True):
if(self.enabled):
# if any data received from down
while(len(self.hold_up) > 0):
with self.res_hold_up.request() as req:
yield req
o = self.hold_up.pop(0)
target_lc = None
# search correct lc
if(len(self.LC) > 0):
for l in self.LC:
true_object = o
if(type(o) is list):
true_object = o[0]
if(true_object.freq == l.freq):
target_lc = l
break
elif type(true_object.freq) is list:
if(true_object.freq[0] == l.freq):
target_lc = l
break
if(target_lc != None):
self.env.process(target_lc.put(o))
# if any data received from up
while(len(self.hold_down) > 0):
with self.res_hold_down.request() as req:
yield req
dprint(str(self), "is going to send (downstream) at", self.env.now)
self.env.process(self.send_down(self.hold_down.pop(0)))
yield self.env.timeout(foo_delay)
def __repr__(self):
return "Processing Node #{}".\
format(self.id)
# chain of virtualized functions
class Digital_Unit(Active_Node):
def __init__(self, env, id, consumption_rate, node, out, vms=None, enabled=False):
self.id = id
self.env = env
self.node = node
self.res_vms = simpy.Resource(self.env, capacity=1)
self.vms = vms
self.out = out
Active_Node.__init__(self, env, enabled, consumption_rate, vms, self.env.now)
def config(node, DU, config):
for tp in config:
d1 = None
d2 = None
for d in DU:
if(d.id == tp[0]):
d1 = d
if(d.id == tp[1]):
d2 = d
if(d1 == None or d2 == None):
break
d1.out = d
def append_vm(self, vm): # append vms
dprint(str(self), "is appending VM", vm)
with self.res_vms.request() as req:
yield req
self.vms.append(vm)
def execute_functions(self, o):
dprint(str(self), "will execute functions at", self.env.now)
yield self.env.timeout(0)
if(self.vms == None):
self.env.process(self.out.send_up(o))
else:
for v in self.vms:
dprint(str(self), "is using VM", str(v), "on", str(o), "at", self.env.now)
o = yield self.env.process(v.func(o))
dprint(str(self), "returned", str(o), "from execute functions at", self.env.now)
if(o == None):
return
dprint(str(self), "is sending the left data to", str(self.out), "at", self.env.now)
if(type(self.out) is Digital_Unit):
self.env.process(self.out.execute_functions(o))
elif(type(self.out) is Processing_Node):
self.env.process(self.out.send_up(o))
def __repr__(self):
return "Digital Unit #{}{}".\
format(self.node.id, self.id)
# linecard attuned to a frequency
class LineCard(Active_Node):
def __init__(self, env, freq, delay=0, out=None, enabled=False, consumption=LC_consumption):
self.env = env
self.delay = delay
self.freq = freq
self.out = out
Active_Node.__init__(self, env, enabled, consumption, [], self.env.now)
def put(self, p):
if(self.out != None and self.enabled == True):
dprint(str(self), "is pushing", p, "to a DU at", self.env.now)
yield self.env.timeout(self.delay)
self.env.process(self.out.execute_functions(p))
def __repr__(self):
return "LineCard freq:{}".\
format(self.freq)
# ONU
class ONU(Active_Node):
def __init__(self, env, id, target_up, target_down, consumption, cellsite, bitRate_up, bitRate_down, distance, enabled=True, freq=-1, threshold=0):
self.env = env
self.id = id
self.freq = freq
self.target_up = target_up
if(target_down == None):
self.target_down = []
else:
self.target_down = target_down
self.cellsite = cellsite # id cellsite
self.delay_up = distance / float(Light_Speed)
self.total_hold_size = 0
self.ack = 0
self.res_hold_up = simpy.Resource(self.env, capacity=1)
self.res_hold_down = simpy.Resource(self.env, capacity=1)
self.res_grants = simpy.Resource(self.env, capacity=1)
self.res_requests = simpy.Resource(self.env, capacity=1)
self.hold_up = []
self.hold_down = []
self.grants = []
self.requests = []
self.timer = []
self.waiting = False
self.reset_timer = False
self.request_counting = 0
self.bitRate_up = bitRate_up
self.bitRate_down = bitRate_down
self.threshold = threshold
Active_Node.__init__(self, env, enabled, consumption, [], self.env.now)
self.action = env.process(self.run())
def round_trip_time(self):
total = 0
target = self
while(not (isinstance(target, Processing_Node) and target.enabled) ):
total += target.delay_up
target = target.target_up
total += target.time_to_onu(0, self.id)
dprint(str(self), "calculated RTT:", total)
return total
def end(self):
self.total_time += self.elapsed_time
self.elapsed_time = 0
self.enabled = False
self.hold_up = []
self.hold_down = []
self.grants = []
self.requests = []
self.timer = []
# receive new data to upstream/downstream it
def put(self, pkt, down=False, up=False):
dprint(str(self), "receveid obj", str(pkt), "at", self.env.now)
if(self.enabled):
if(down):
# one grant
if(type(pkt) is Grant and pkt.onu == self.id):
self.reset_timer = True
with self.res_grants.request() as req:
yield req
self.grants.append(pkt)
# many grants
elif(type(pkt) is list and type(pkt[0]) is Grant and pkt[0].onu == self.id_sender):
self.reset_timer = True
with self.res_grants.request() as req:
yield req
for g in pkt:
self.grants.append(g)
# data
elif(type(pkt) is Packet):
with self.res_hold_down.request() as req:
yield req
self.hold_down.append(pkt)
elif(up):
with self.res_hold_up.request() as req:
yield req
self.hold_up.append(pkt)
self.total_hold_size += pkt.size
if(self.total_hold_size > self.threshold):
self.env.process(self.gen_request())
# generate a request
def gen_request(self):
dprint(str(self), "is generating a request at", self.env.now)
with self.res_requests.request() as req:
yield req
self.requests.append(Request(self.request_counting, self.id, -1, self.total_hold_size, self.ack))
if(not self.waiting):
self.timer.append(self.round_trip_time() * 2) # 2 x RTT
self.reset_timer = False
self.request_counting += 1
# upstreaming
def send_up(self, o):
if(self.target_up != None):
if(self.bitRate_up > 0):
total_size = 0
if(type(o) is list):
for k in o:
total_size += k.size
else:
total_size = o.size
yield self.env.timeout(total_size / (self.bitRate_up / 8)) # transmission
yield self.env.timeout(self.delay_up) # propagation
dprint(str(self), "finished sending (upstream) obj at", self.env.now)
self.env.process(self.target_up.put(o, up=True))
# downstreaming
def send_down(self, o):
if(self.target_down != None):
sorted(self.target_down, key=lambda target: target.delay_up)
counted = 0
for t in self.target_down:
additional_time = 0
if(bitRate_down > 0):
total_size = 0
if(type(o) is list):
for k in o:
total_size += k.size
else:
total_size = o.size
additional_time = total_size / (self.bitRate_down / 8)
yield self.env.timeout(additional_time + t.delay_up - counted) # errado?
counted = additional_time + t.delay_up
dprint(str(self), "finished sending (downstream) obj at", self.env.now)
self.env.process(t.put(o, down=True))
# transmission
yield self.env.timeout(self.target_down.delay_up) # propagation
self.env.process(self.target_down.put(o, down=True))
# use the grant(s) you received
def use_grant(self, grant):
if(self.ack < grant.ack):
# update ack
self.ack = grant.ack
to_wait = grant.init_time - self.env.now
if(to_wait < 0):
# negative time to wait
dprint(str(self), "is going to discard grant, reason: negative wait time; at", self.env.now)
self.env.process(self.gen_request())
return
data_to_transfer = []
with self.res_hold_up.request() as req:
yield req
total = 0
while(len(self.hold_up) > 0):
p = self.hold_up.pop(0)
if(total + p.size > grant.size):
self.hold_up.insert(0, p)
break
data_to_transfer.append(p)
total += p.size
self.total_hold_size -= total
dprint(str(self), "plans to send", str(data_to_transfer), "with a hold of", str(self.hold_up), "and grant of", str(grant) ,"at", self.env.now)
if(len(data_to_transfer) < 1):
# data is empty! return grant and data
with self.res_hold_up.request() as req:
for d in reversed(data_to_transfer):
self.hold_up.insert(0, d)
with self.res_grants.request() as req:
self.grants.insert(0, grant)
return
for d in data_to_transfer: # (self, id, size, src, dst, init_time):
d.src = self.id
d.waited_time = self.env.now - d.init_time
d.freq = grant.freq
self.freq = grant.freq # tune ONU to freq
dprint(str(self), "is going to wait", str(to_wait), "at", self.env.now)
yield self.env.timeout(to_wait)
yield self.env.process(self.send_up(data_to_transfer))
dprint(str(self), "sent data at", self.env.now)
# in case grant hasn't come
def set_timer(self):
to_wait = self.timer.pop(0)
self.waiting = True
yield self.env.timeout(to_wait)
if(self.reset_timer):
dprint(str(self), "Discarding timer: Grant received already at", self.env.now)
else:
dprint(str(self), "Resending request... at", self.env.now)
self.env.process(self.gen_request())
self.waiting = False
# actions
def run(self):
while True:
if(self.enabled):
if(len(self.requests) > 0): # if you have requests to send
with self.res_requests.request() as req:
yield req
dprint(str(self), "is sending a request at", self.env.now)
self.env.process(self.send_up(self.requests.pop(0)))
if(len(self.grants) > 0 and len(self.hold_up) > 0): # if you got grants
with self.res_grants.request() as req:
yield req
dprint(str(self), "is going to use a grant at", self.env.now)
sorted(self.grants, key=lambda grant: grant.init_time) # sort grants, lower to greater time
self.env.process(self.use_grant(self.grants.pop(0)))
if(len(self.hold_down) > 0): # if you got downstreaming data
with self.res_hold_down as req:
yield req
dprint(str(self), "is going to send (downstream) at", self.env.now)
self.env.process(self.send_down(self.hold_down.pop(0)))
if(len(self.timer) > 0):
if(self.reset_timer):
self.timer = []
else:
dprint(str(self), "is setting timer to resend request at", self.env.now)
self.env.process(self.set_timer())
yield self.env.timeout(foo_delay)
def __repr__(self):
return "ONU #{}".\
format(self.id)
# VDBA IPACT
class DBA_IPACT(Active_Node, Virtual_Machine):
def __init__(self, env, node, consumption_rate, freq, bandwidth, delay=0, enabled=True):
self.env = env
self.node = node
self.delay = delay # delay to execute
self.freq = freq
self.bandwidth = bandwidth
self.counting = False
self.discarded_requests = 0
self.duplicated_requests = 0
self.busy = simpy.Resource(self.env, capacity=1)
self.onus = [] # "connected" onus
self.acks = {}
self.bandwidth_used = []
self.free_time = self.env.now
Active_Node.__init__(self, env, enabled, consumption_rate, [], self.env.now)
self.action = self.env.process(self.run())
### timeout:
self.timeout = False
def update_bandwidth(self):
# update bandwidth used
while(len(self.bandwidth_used) > 0 and self.env.now - self.bandwidth_used[0][2] > 1):
self.bandwidth_used.pop(0)
# update onus connected
self.onus = []
for b in self.bandwidth_used:
self.onus.append(b[0])
def bandwidth_available(self):
self.update_bandwidth()
# check bandwidth
bandwidth_really_used = 0
for b in self.bandwidth_used:
bandwidth_really_used += b[1]
return self.bandwidth - bandwidth_really_used
# override function
def end(self):
self.total_time += self.elapsed_time
self.elapsed_time = 0
self.enabled = False
self.onus = []
self.acks = {}
def associate_onu(self, r):
self.onus.append(r.id_sender)
self.acks[r.id_sender] = r.ack
def desassociate_onu(self, onu):
self.onus.remove(onu)
del self.acks[onu]
### timer:
def timer(self, time):
yield self.env.timeout(time)
# se nao houve requests depois deste ultimo
if(self.free_time < self.env.now):
self.kill_me = True
self.end()
def func(self, r):
global total_duplicated
global total_lost
global total_requests
with self.busy.request() as req: # semaphore
yield req
if(type(r) is Request and r.id_sender in self.onus):
# process request
dprint(str(self), "is receiving", str(r), "at", str(self.env.now))
total_requests += 1
if(r.ack != self.acks[r.id_sender]): # not aligned acks!
dprint(str(self), "received duplicated request at", str(self.env.now))
total_duplicated += 1
return None
# aligned acks
time_to = self.node.time_to_onu(0, r.id_sender)
time_from = self.node.time_from_onu(0, r.id_sender)
time_from += r.bandwidth / (self.bandwidth) # updated in case EON
available_band = self.bandwidth_available()
if(available_band > 0):
# there is bandwidth
g = None
# generate grant(s)
self.acks[r.id_sender] += 1
send_time = 0
if(self.env.now + time_to > self.free_time):
# (possibly) first case
send_time = self.env.now + time_to + foo_delay
else:
# normal case
send_time = self.free_time + foo_delay
send_size = 0
if(available_band >= r.bandwidth):
dprint(str(self), "has enough bandwidth for request at", self.env.now)
send_size = r.bandwidth
else:
dprint(str(self), "is discarding request: not enough bandwidth available at", self.env.now)
total_lost += 1
return
g = Grant(r.id_sender, send_time, send_size, self.freq, self.acks[r.id_sender])
dprint(str(self), "generated", str(g), "at", self.env.now)
self.free_time = send_time + time_from
self.env.process(self.node.send_down(g))
self.bandwidth_used.append((g.onu, g.size, g.init_time, g.init_time + time_from))
dprint("Bandwidth available:", self.bandwidth_available(), "at", self.env.now)
### set the timer:
yield self.env.timeout(self.delay)
self.counting = True
self.env.process(self.timer(time_to + time_from + 2*foo_delay))
return None # return none
else:
# no bandwidth
# activate random local PN
dprint(str(self), "has no bandwidth at", self.env.now)
if(len(self.node.local_nodes) > 0):
# activate more-local PN
dprint(str(self), "is activating a more local node randomly at", self.env.now)
total_lost += 1
node = self.node.local_nodes.pop()
node.start()
else:
# no more local nodes!
dprint(str(self), "is discarding request: no bandwidth available at", self.env.now)
total_lost += 1
else:
# pass along to another dba
dprint(str(self),"is passing along object", str(r), "at", str(self.env.now))
return r
def run(self):
while True:
if(self.enabled and self.counting):
self.update_bandwidth()
if(len(self.onus) < 0):
dprint(str(self), "is going to hibernate at", self.env.now)
self.counting = False
self.node.LC[self.freq+1].end() # suspend LC linked to this VPON
self.end() # suspend this VPON
yield self.env.timeout(foo_delay)
def __repr__(self):
return "DBA IPACT [freq:{},free_time:{}]".\
format(self.freq, self.free_time)
# assign VPON/DBA to requests
class DBA_Assigner(Active_Node, Virtual_Machine):
def __init__(self, env, node, consumption_rate, max_frequency, enabled=True, delay=0):
self.env = env
self.node = node
self.max_frequency = max_frequency
self.delay = delay
self.available_freq = 0
self.dbas = []
Active_Node.__init__(self, env, enabled, consumption_rate, [], self.env.now)
self.action = self.env.process(self.run())
def func(self, o):
global total_lost
global total_requests
if(type(o) is Request):
dprint(str(self), "received", str(o), "at", self.env.now)
# search request's dba (if possible)
target_dba = None
yield self.env.timeout(self.delay)
for d in self.dbas:
if(o.id_sender in d.onus): # found!
dprint(str(self) + ": this ONU has already a DBA")
return o
# not fonud! create/assign new VPON/DBA
dprint(str(self) + ": this ONU hasn't a DBA")
if(target_dba == None):
if(len(self.node.LC) > self.available_freq+1):
# create, if possible
dprint(str(self) + ": Creating DBA at", self.env.now)
target_dba = DBA_IPACT(self.env, self.node, 0, self.available_freq, DBA_IPACT_default_bandwidth) # DBA_IPACT_default_bandwidth
lc = self.node.LC[self.available_freq+1]
if(lc.enabled is False):
lc.start()
if(lc.out == None):
lc.out = self.node.DU[1] # guessed baseband DU
self.available_freq += 1
target_dba.associate_onu(o)
yield self.env.process(self.node.DU[0].append_vm(target_dba))
self.dbas.append(target_dba)
else:
dprint(str(self.node), "has no bandwidth at", self.env.now)
pass
else:
dprint(str(self) + ": Assigning DBA")
# assign
if(target_dba.enabled is False):
target_dba.start()
target_dba.associate_onu(o)
return o
def __repr__(self):
return "DBA Assigner #{}".\
format(self.node.id)
|
|
from __future__ import division, absolute_import, print_function
import sys
import warnings
from numpy.testing import *
from numpy.compat import asbytes, asunicode
import numpy as np
# This is the structure of the table used for plain objects:
#
# +-+-+-+
# |x|y|z|
# +-+-+-+
# Structure of a plain array description:
Pdescr = [
('x', 'i4', (2,)),
('y', 'f8', (2, 2)),
('z', 'u1')]
# A plain list of tuples with values for testing:
PbufferT = [
# x y z
([3, 2], [[6., 4.], [6., 4.]], 8),
([4, 3], [[7., 5.], [7., 5.]], 9),
]
# This is the structure of the table used for nested objects (DON'T PANIC!):
#
# +-+---------------------------------+-----+----------+-+-+
# |x|Info |color|info |y|z|
# | +-----+--+----------------+----+--+ +----+-----+ | |
# | |value|y2|Info2 |name|z2| |Name|Value| | |
# | | | +----+-----+--+--+ | | | | | | |
# | | | |name|value|y3|z3| | | | | | | |
# +-+-----+--+----+-----+--+--+----+--+-----+----+-----+-+-+
#
# The corresponding nested array description:
Ndescr = [
('x', 'i4', (2,)),
('Info', [
('value', 'c16'),
('y2', 'f8'),
('Info2', [
('name', 'S2'),
('value', 'c16', (2,)),
('y3', 'f8', (2,)),
('z3', 'u4', (2,))]),
('name', 'S2'),
('z2', 'b1')]),
('color', 'S2'),
('info', [
('Name', 'U8'),
('Value', 'c16')]),
('y', 'f8', (2, 2)),
('z', 'u1')]
NbufferT = [
# x Info color info y z
# value y2 Info2 name z2 Name Value
# name value y3 z3
([3, 2], (6j, 6., (asbytes('nn'), [6j, 4j], [6., 4.], [1, 2]), asbytes('NN'), True), asbytes('cc'), (asunicode('NN'), 6j), [[6., 4.], [6., 4.]], 8),
([4, 3], (7j, 7., (asbytes('oo'), [7j, 5j], [7., 5.], [2, 1]), asbytes('OO'), False), asbytes('dd'), (asunicode('OO'), 7j), [[7., 5.], [7., 5.]], 9),
]
byteorder = {'little':'<', 'big':'>'}[sys.byteorder]
def normalize_descr(descr):
"Normalize a description adding the platform byteorder."
out = []
for item in descr:
dtype = item[1]
if isinstance(dtype, str):
if dtype[0] not in ['|', '<', '>']:
onebyte = dtype[1:] == "1"
if onebyte or dtype[0] in ['S', 'V', 'b']:
dtype = "|" + dtype
else:
dtype = byteorder + dtype
if len(item) > 2 and np.prod(item[2]) > 1:
nitem = (item[0], dtype, item[2])
else:
nitem = (item[0], dtype)
out.append(nitem)
elif isinstance(item[1], list):
l = []
for j in normalize_descr(item[1]):
l.append(j)
out.append((item[0], l))
else:
raise ValueError("Expected a str or list and got %s" % \
(type(item)))
return out
############################################################
# Creation tests
############################################################
class create_zeros(object):
"""Check the creation of heterogeneous arrays zero-valued"""
def test_zeros0D(self):
"""Check creation of 0-dimensional objects"""
h = np.zeros((), dtype=self._descr)
self.assertTrue(normalize_descr(self._descr) == h.dtype.descr)
self.assertTrue(h.dtype.fields['x'][0].name[:4] == 'void')
self.assertTrue(h.dtype.fields['x'][0].char == 'V')
self.assertTrue(h.dtype.fields['x'][0].type == np.void)
# A small check that data is ok
assert_equal(h['z'], np.zeros((), dtype='u1'))
def test_zerosSD(self):
"""Check creation of single-dimensional objects"""
h = np.zeros((2,), dtype=self._descr)
self.assertTrue(normalize_descr(self._descr) == h.dtype.descr)
self.assertTrue(h.dtype['y'].name[:4] == 'void')
self.assertTrue(h.dtype['y'].char == 'V')
self.assertTrue(h.dtype['y'].type == np.void)
# A small check that data is ok
assert_equal(h['z'], np.zeros((2,), dtype='u1'))
def test_zerosMD(self):
"""Check creation of multi-dimensional objects"""
h = np.zeros((2, 3), dtype=self._descr)
self.assertTrue(normalize_descr(self._descr) == h.dtype.descr)
self.assertTrue(h.dtype['z'].name == 'uint8')
self.assertTrue(h.dtype['z'].char == 'B')
self.assertTrue(h.dtype['z'].type == np.uint8)
# A small check that data is ok
assert_equal(h['z'], np.zeros((2, 3), dtype='u1'))
class test_create_zeros_plain(create_zeros, TestCase):
"""Check the creation of heterogeneous arrays zero-valued (plain)"""
_descr = Pdescr
class test_create_zeros_nested(create_zeros, TestCase):
"""Check the creation of heterogeneous arrays zero-valued (nested)"""
_descr = Ndescr
class create_values(object):
"""Check the creation of heterogeneous arrays with values"""
def test_tuple(self):
"""Check creation from tuples"""
h = np.array(self._buffer, dtype=self._descr)
self.assertTrue(normalize_descr(self._descr) == h.dtype.descr)
if self.multiple_rows:
self.assertTrue(h.shape == (2,))
else:
self.assertTrue(h.shape == ())
def test_list_of_tuple(self):
"""Check creation from list of tuples"""
h = np.array([self._buffer], dtype=self._descr)
self.assertTrue(normalize_descr(self._descr) == h.dtype.descr)
if self.multiple_rows:
self.assertTrue(h.shape == (1, 2))
else:
self.assertTrue(h.shape == (1,))
def test_list_of_list_of_tuple(self):
"""Check creation from list of list of tuples"""
h = np.array([[self._buffer]], dtype=self._descr)
self.assertTrue(normalize_descr(self._descr) == h.dtype.descr)
if self.multiple_rows:
self.assertTrue(h.shape == (1, 1, 2))
else:
self.assertTrue(h.shape == (1, 1))
class test_create_values_plain_single(create_values, TestCase):
"""Check the creation of heterogeneous arrays (plain, single row)"""
_descr = Pdescr
multiple_rows = 0
_buffer = PbufferT[0]
class test_create_values_plain_multiple(create_values, TestCase):
"""Check the creation of heterogeneous arrays (plain, multiple rows)"""
_descr = Pdescr
multiple_rows = 1
_buffer = PbufferT
class test_create_values_nested_single(create_values, TestCase):
"""Check the creation of heterogeneous arrays (nested, single row)"""
_descr = Ndescr
multiple_rows = 0
_buffer = NbufferT[0]
class test_create_values_nested_multiple(create_values, TestCase):
"""Check the creation of heterogeneous arrays (nested, multiple rows)"""
_descr = Ndescr
multiple_rows = 1
_buffer = NbufferT
############################################################
# Reading tests
############################################################
class read_values_plain(object):
"""Check the reading of values in heterogeneous arrays (plain)"""
@dec.skipif('__pypy__' in sys.builtin_module_names)
def test_access_fields(self):
h = np.array(self._buffer, dtype=self._descr)
if not self.multiple_rows:
self.assertTrue(h.shape == ())
assert_equal(h['x'], np.array(self._buffer[0], dtype='i4'))
assert_equal(h['y'], np.array(self._buffer[1], dtype='f8'))
assert_equal(h['z'], np.array(self._buffer[2], dtype='u1'))
else:
self.assertTrue(len(h) == 2)
assert_equal(h['x'], np.array([self._buffer[0][0],
self._buffer[1][0]], dtype='i4'))
assert_equal(h['y'], np.array([self._buffer[0][1],
self._buffer[1][1]], dtype='f8'))
assert_equal(h['z'], np.array([self._buffer[0][2],
self._buffer[1][2]], dtype='u1'))
class test_read_values_plain_single(read_values_plain, TestCase):
"""Check the creation of heterogeneous arrays (plain, single row)"""
_descr = Pdescr
multiple_rows = 0
_buffer = PbufferT[0]
class test_read_values_plain_multiple(read_values_plain, TestCase):
"""Check the values of heterogeneous arrays (plain, multiple rows)"""
_descr = Pdescr
multiple_rows = 1
_buffer = PbufferT
class read_values_nested(object):
"""Check the reading of values in heterogeneous arrays (nested)"""
def test_access_top_fields(self):
"""Check reading the top fields of a nested array"""
h = np.array(self._buffer, dtype=self._descr)
if not self.multiple_rows:
self.assertTrue(h.shape == ())
assert_equal(h['x'], np.array(self._buffer[0], dtype='i4'))
assert_equal(h['y'], np.array(self._buffer[4], dtype='f8'))
assert_equal(h['z'], np.array(self._buffer[5], dtype='u1'))
else:
self.assertTrue(len(h) == 2)
assert_equal(h['x'], np.array([self._buffer[0][0],
self._buffer[1][0]], dtype='i4'))
assert_equal(h['y'], np.array([self._buffer[0][4],
self._buffer[1][4]], dtype='f8'))
assert_equal(h['z'], np.array([self._buffer[0][5],
self._buffer[1][5]], dtype='u1'))
def test_nested1_acessors(self):
"""Check reading the nested fields of a nested array (1st level)"""
h = np.array(self._buffer, dtype=self._descr)
if not self.multiple_rows:
assert_equal(h['Info']['value'],
np.array(self._buffer[1][0], dtype='c16'))
assert_equal(h['Info']['y2'],
np.array(self._buffer[1][1], dtype='f8'))
assert_equal(h['info']['Name'],
np.array(self._buffer[3][0], dtype='U2'))
assert_equal(h['info']['Value'],
np.array(self._buffer[3][1], dtype='c16'))
else:
assert_equal(h['Info']['value'],
np.array([self._buffer[0][1][0],
self._buffer[1][1][0]],
dtype='c16'))
assert_equal(h['Info']['y2'],
np.array([self._buffer[0][1][1],
self._buffer[1][1][1]],
dtype='f8'))
assert_equal(h['info']['Name'],
np.array([self._buffer[0][3][0],
self._buffer[1][3][0]],
dtype='U2'))
assert_equal(h['info']['Value'],
np.array([self._buffer[0][3][1],
self._buffer[1][3][1]],
dtype='c16'))
def test_nested2_acessors(self):
"""Check reading the nested fields of a nested array (2nd level)"""
h = np.array(self._buffer, dtype=self._descr)
if not self.multiple_rows:
assert_equal(h['Info']['Info2']['value'],
np.array(self._buffer[1][2][1], dtype='c16'))
assert_equal(h['Info']['Info2']['z3'],
np.array(self._buffer[1][2][3], dtype='u4'))
else:
assert_equal(h['Info']['Info2']['value'],
np.array([self._buffer[0][1][2][1],
self._buffer[1][1][2][1]],
dtype='c16'))
assert_equal(h['Info']['Info2']['z3'],
np.array([self._buffer[0][1][2][3],
self._buffer[1][1][2][3]],
dtype='u4'))
def test_nested1_descriptor(self):
"""Check access nested descriptors of a nested array (1st level)"""
h = np.array(self._buffer, dtype=self._descr)
self.assertTrue(h.dtype['Info']['value'].name == 'complex128')
self.assertTrue(h.dtype['Info']['y2'].name == 'float64')
if sys.version_info[0] >= 3:
self.assertTrue(h.dtype['info']['Name'].name == 'str256')
else:
self.assertTrue(h.dtype['info']['Name'].name == 'unicode256')
self.assertTrue(h.dtype['info']['Value'].name == 'complex128')
def test_nested2_descriptor(self):
"""Check access nested descriptors of a nested array (2nd level)"""
h = np.array(self._buffer, dtype=self._descr)
self.assertTrue(h.dtype['Info']['Info2']['value'].name == 'void256')
self.assertTrue(h.dtype['Info']['Info2']['z3'].name == 'void64')
class test_read_values_nested_single(read_values_nested, TestCase):
"""Check the values of heterogeneous arrays (nested, single row)"""
_descr = Ndescr
multiple_rows = False
_buffer = NbufferT[0]
class test_read_values_nested_multiple(read_values_nested, TestCase):
"""Check the values of heterogeneous arrays (nested, multiple rows)"""
_descr = Ndescr
multiple_rows = True
_buffer = NbufferT
class TestEmptyField(TestCase):
def test_assign(self):
a = np.arange(10, dtype=np.float32)
a.dtype = [("int", "<0i4"), ("float", "<2f4")]
assert_(a['int'].shape == (5, 0))
assert_(a['float'].shape == (5, 2))
class TestCommonType(TestCase):
def test_scalar_loses1(self):
res = np.find_common_type(['f4', 'f4', 'i2'], ['f8'])
assert_(res == 'f4')
def test_scalar_loses2(self):
res = np.find_common_type(['f4', 'f4'], ['i8'])
assert_(res == 'f4')
def test_scalar_wins(self):
res = np.find_common_type(['f4', 'f4', 'i2'], ['c8'])
assert_(res == 'c8')
def test_scalar_wins2(self):
res = np.find_common_type(['u4', 'i4', 'i4'], ['f4'])
assert_(res == 'f8')
def test_scalar_wins3(self): # doesn't go up to 'f16' on purpose
res = np.find_common_type(['u8', 'i8', 'i8'], ['f8'])
assert_(res == 'f8')
class TestMultipleFields(TestCase):
def setUp(self):
self.ary = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], dtype='i4,f4,i2,c8')
def _bad_call(self):
with warnings.catch_warnings(record=True):
warnings.filterwarnings('always', '', DeprecationWarning)
return self.ary['f0', 'f1']
def test_no_tuple(self):
self.assertRaises(IndexError, self._bad_call)
def test_return(self):
res = self.ary[['f0', 'f2']].tolist()
assert_(res == [(1, 3), (5, 7)])
if __name__ == "__main__":
run_module_suite()
|
|
from django.core.management.base import BaseCommand, CommandError
from django.utils.dateparse import parse_datetime, parse_date
from django.utils.text import slugify
from django.db.utils import IntegrityError, DataError
from core.models import Person, Bill, Organization, Action, ActionRelatedEntity, \
Post, Membership, Sponsorship, LegislativeSession, \
Document, BillDocument, Event, EventParticipant, EventDocument, \
EventAgendaItem, AgendaItemBill
from councilmatic.settings import HEADSHOT_PATH
from councilmatic.settings_local import DEBUG
from councilmatic.city_config import OCD_JURISDICTION_ID, OCD_CITY_COUNCIL_ID, TIMEZONE
import requests
import json
import pytz
import os.path
import re
app_timezone = pytz.timezone(TIMEZONE)
base_url = 'http://ocd.datamade.us'
class Command(BaseCommand):
help = 'loads in data from the open civic data API'
def add_arguments(self, parser):
parser.add_argument('--endpoint', help="a specific endpoint to load data from")
parser.add_argument('--delete',
action='store_true',
default=False,
help='delete data before loading')
def handle(self, *args, **options):
if options['endpoint'] == 'organizations':
print("\nLOADING ORGANIZATIONS\n")
self.grab_organizations(delete=options['delete'])
print("\ndone!")
elif options['endpoint'] == 'bills':
print("\nLOADING BILLS\n")
self.grab_bills(delete=options['delete'])
print("\ndone!")
elif options['endpoint'] == 'people':
print("\nLOADING PEOPLE\n")
self.grab_people(delete=options['delete'])
print("\ndone!")
elif options['endpoint'] == 'events':
print("\nLOADING EVENTS\n")
self.grab_events(delete=options['delete'])
else:
print("\nLOADING EVERYTHING\n")
self.grab_organizations(delete=options['delete'])
self.grab_bills(delete=options['delete'])
self.grab_people(delete=options['delete'])
self.grab_events(delete=options['delete'])
print("\ndone!")
def grab_organizations(self, delete=False):
if delete:
print("deleting all organizations and posts")
Organization.objects.all().delete()
Post.objects.all().delete()
# first grab ny city council root
self.grab_organization_posts(OCD_CITY_COUNCIL_ID)
# this grabs a paginated listing of all organizations within a jurisdiction
orgs_url = base_url+'/organizations/?jurisdiction_id='+OCD_JURISDICTION_ID
r = requests.get(orgs_url)
page_json = json.loads(r.text)
for i in range(page_json['meta']['max_page']):
r = requests.get(orgs_url+'&page='+str(i+1))
page_json = json.loads(r.text)
for result in page_json['results']:
self.grab_organization_posts(result['id'])
def grab_organization_posts(self, organization_ocd_id, parent=None):
url = base_url+'/'+organization_ocd_id
r = requests.get(url)
page_json = json.loads(r.text)
source_url = ''
if page_json['sources']:
source_url = page_json['sources'][0]['url']
if parent:
try:
org_obj, created = Organization.objects.get_or_create(
ocd_id=organization_ocd_id,
name=page_json['name'],
classification=page_json['classification'],
source_url=source_url,
slug=slugify(page_json['name']),
parent=parent,
)
except IntegrityError:
ocd_id_part = organization_ocd_id.rsplit('-',1)[1]
org_obj, created = Organization.objects.get_or_create(
ocd_id=organization_ocd_id,
name=page_json['name'],
classification=page_json['classification'],
source_url=source_url,
slug=slugify(page_json['name'])+ocd_id_part,
parent=parent,
)
else:
try:
org_obj, created = Organization.objects.get_or_create(
ocd_id=organization_ocd_id,
name=page_json['name'],
classification=page_json['classification'],
source_url=source_url,
slug=slugify(page_json['name']),
)
except IntegrityError:
ocd_id_part = organization_ocd_id.rsplit('-',1)[1]
org_obj, created = Organization.objects.get_or_create(
ocd_id=organization_ocd_id,
name=page_json['name'],
classification=page_json['classification'],
source_url=source_url,
slug=slugify(page_json['name'])+ocd_id_part,
)
if created and DEBUG:
print(' adding organization: %s' % org_obj.name )
for post_json in page_json['posts']:
obj, created = Post.objects.get_or_create(
ocd_id = post_json['id'],
label = post_json['label'],
role = post_json['role'],
organization = org_obj,
)
if created and DEBUG:
print(' adding post: %s %s' %(post_json['role'], post_json['label']))
for child in page_json['children']:
self.grab_organization_posts(child['id'], org_obj)
def grab_people(self, delete=False):
# find people associated with existing organizations & bills
if delete:
print("deleting all people, memberships, sponsorships")
Person.objects.all().delete()
Membership.objects.all().delete()
Sponsorship.objects.all().delete()
# grab people associated with all existing organizations
orgs = Organization.objects.exclude(name='Democratic').exclude(name='Republican').all()
for organization in orgs:
url = base_url+'/'+organization.ocd_id
r = requests.get(url)
page_json = json.loads(r.text)
for membership_json in page_json['memberships']:
self.grab_person_memberships(membership_json['person']['id'])
# add sponsorships for all existing bills
bills = Bill.objects.all()
for bill in bills:
url = base_url+'/'+bill.ocd_id
r = requests.get(url)
page_json = json.loads(r.text)
for sponsor_json in page_json['sponsorships']:
sponsor=Person.objects.filter(ocd_id=sponsor_json['entity_id']).first()
if sponsor:
obj, created = Sponsorship.objects.get_or_create(
bill=bill,
person=sponsor,
classification=sponsor_json['classification'],
is_primary=sponsor_json['primary'],
)
if created and DEBUG:
print(' adding sponsorship: %s %s' % (obj.bill, obj.person))
def grab_bills(self, delete=False):
# this grabs all bills & associated actions, documents from city council
# organizations need to be populated before bills & actions are populated
if delete:
print("deleting all bills, actions, legislative sessions")
Bill.objects.all().delete()
Action.objects.all().delete()
ActionRelatedEntity.objects.all().delete()
LegislativeSession.objects.all().delete()
Document.objects.all().delete()
BillDocument.objects.all().delete()
# get legislative sessions
self.grab_legislative_sessions()
bill_url = base_url+'/bills/?from_organization_id='+OCD_CITY_COUNCIL_ID
r = requests.get(bill_url)
page_json = json.loads(r.text)
for i in range(page_json['meta']['max_page']):
r = requests.get(bill_url+'&page='+str(i+1))
page_json = json.loads(r.text)
for result in page_json['results']:
self.grab_bill(result['id'])
def grab_legislative_sessions(self):
# TO-DO: update this when ocd data is fixed
obj, created = LegislativeSession.objects.get_or_create(
identifier='2014',
jurisdiction_ocd_id=OCD_JURISDICTION_ID,
name='2014 Legislative Session',
)
if created and DEBUG:
print('adding legislative session: %s' %obj.name)
def grab_bill(self, bill_id):
bill_url = base_url+'/'+bill_id
r = requests.get(bill_url)
page_json = json.loads(r.text)
from_org = Organization.objects.filter(ocd_id=page_json['from_organization']['id']).first()
legislative_session = LegislativeSession.objects.filter(identifier=page_json['legislative_session']['identifier']).first()
# THIS IF STATEMENT IS A NYC CUSTOMIZATION
# only load bills that have a local classification
# this is a temporary fix - remove when outdated bills are no longer in ocd
if 'local_classification' in page_json['extras']:
bill_type = page_json['extras']['local_classification']
# skip bill types that will get loaded as events
if bill_type not in ['Town Hall Meeting', 'Oversight', 'Tour', 'Local Laws 2015']:
if 'full_text' in page_json['extras']:
full_text = page_json['extras']['full_text']
else:
full_text = ''
if page_json['abstracts']:
abstract = page_json['abstracts'][0]['abstract']
else:
abstract = ''
try:
obj, created = Bill.objects.get_or_create(
ocd_id=bill_id,
description=page_json['title'],
identifier=page_json['identifier'],
classification=page_json['classification'][0],
date_created=page_json['created_at'],
date_updated=page_json['updated_at'],
source_url=page_json['sources'][0]['url'],
source_note=page_json['sources'][0]['note'],
from_organization=from_org,
full_text=full_text,
abstract=abstract,
legislative_session=legislative_session,
bill_type=bill_type,
slug=slugify(page_json['identifier']),
)
except IntegrityError:
ocd_id_part = bill_id.rsplit('-',1)[1]
obj, created = Bill.objects.get_or_create(
ocd_id=bill_id,
description=page_json['title'],
identifier=page_json['identifier'],
classification=page_json['classification'][0],
date_created=page_json['created_at'],
date_updated=page_json['updated_at'],
source_url=page_json['sources'][0]['url'],
source_note=page_json['sources'][0]['note'],
from_organization=from_org,
full_text=full_text,
abstract=abstract,
legislative_session=legislative_session,
bill_type=bill_type,
slug=slugify(page_json['identifier'])+ocd_id_part,
)
if created and DEBUG:
print(' adding %s' % bill_id)
action_order = 0
for action_json in page_json['actions']:
self.load_action(action_json, obj, action_order)
action_order+=1
# update bill last_action_date with most recent action
obj.last_action_date = obj.get_last_action_date()
obj.save()
# update documents associated with a bill
for document_json in page_json['documents']:
self.load_bill_document(document_json, obj)
# if bills don't have local classification, don't load them
else:
print("*"*60)
print("SKIPPING BILL %s" %bill_id)
print("bill data looks incomplete")
print("*"*60)
def load_action(self, action_json, bill, action_order):
org = Organization.objects.filter(ocd_id=action_json['organization']['id']).first()
classification = ""
if action_json['classification']:
classification = action_json['classification'][0]
action_obj, created = Action.objects.get_or_create(
date=action_json['date'],
classification=classification,
description=action_json['description'],
organization=org,
bill=bill,
order=action_order,
)
if created and DEBUG:
print(' adding action: %s' %action_json['description'])
for related_entity_json in action_json['related_entities']:
obj, created = ActionRelatedEntity.objects.get_or_create(
action = action_obj,
entity_type = related_entity_json['entity_type'],
entity_name = related_entity_json['name'],
organization_ocd_id = related_entity_json['organization_id'] if related_entity_json['organization_id'] else "",
person_ocd_id = related_entity_json['person_id'] if related_entity_json['person_id'] else ""
)
if created and DEBUG:
print(' adding related entity: %s' %obj.entity_name)
def load_bill_document(self, document_json, bill):
doc_obj, created = Document.objects.get_or_create(
note=document_json['note'],
url=document_json['links'][0]['url'],
)
obj, created = BillDocument.objects.get_or_create(
bill = bill,
document = doc_obj,
)
if created:
print(' adding document: %s' % doc_obj.note)
def grab_person_memberships(self, person_id):
# this grabs a person and all their memberships
url = base_url+'/'+person_id
r = requests.get(url)
page_json = json.loads(r.text)
# TO DO: handle updating people & memberships
person = Person.objects.filter(ocd_id=person_id).first()
if not person:
# save image to disk
if page_json['image']:
print(" saving image for %s" % page_json['name'])
r = requests.get(page_json['image'])
if r.status_code == 200:
with open((HEADSHOT_PATH + page_json['id'] + ".jpg"), 'wb') as f:
for chunk in r.iter_content(1000):
f.write(chunk)
email = ''
for contact_detail in page_json['contact_details']:
if contact_detail['type'] == 'email':
if contact_detail['value'] != 'mailto:':
email = contact_detail['value']
website_url = ''
for link in page_json['links']:
if link['note'] == "web site":
website_url = link['url']
try:
person = Person.objects.create(
ocd_id=page_json['id'],
name=page_json['name'],
headshot=page_json['image'],
source_url=page_json['sources'][0]['url'],
source_note=page_json['sources'][0]['note'],
website_url = website_url,
email = email,
slug=slugify(page_json['name']),
)
except IntegrityError:
ocd_id_part=page_json['id'].rsplit('-',1)[1]
person = Person.objects.create(
ocd_id=page_json['id'],
name=page_json['name'],
headshot=page_json['image'],
source_url=page_json['sources'][0]['url'],
source_note=page_json['sources'][0]['note'],
website_url = '',
email = email,
slug=slugify(page_json['name'])+ocd_id_part,
)
if DEBUG:
print(' adding person: %s' % person.name)
for membership_json in page_json['memberships']:
if membership_json['post']:
post = Post.objects.filter(ocd_id=membership_json['post']['id']).first()
else:
post = None
organization = Organization.objects.filter(ocd_id=membership_json['organization']['id']).first()
# adding republican or democratic party when encountered
# b/c parties are not added when organizations are loaded (in grab_organizations)
if not organization and membership_json['organization']['name'] in ['Republican', 'Democratic']:
self.grab_organization_posts(membership_json['organization']['id'])
organization = Organization.objects.filter(ocd_id=membership_json['organization']['id']).first()
try:
end_date = parse_date(membership_json['end_date'])
except:
end_date = None
try:
start_date = parse_date(membership_json['start_date'])
except:
start_date = None
obj, created = Membership.objects.get_or_create(
organization = organization,
person = person,
post = post,
label = membership_json['label'],
role = membership_json['role'],
start_date = start_date,
end_date = end_date
)
if created and DEBUG:
print(' adding membership: %s' % obj.role)
def grab_events(self, delete=False):
if delete:
print("deleting all events")
Event.objects.all().delete()
EventParticipant.objects.all().delete()
EventDocument.objects.all().delete()
EventAgendaItem.objects.all().delete()
AgendaItemBill.objects.all().delete()
# this grabs a paginated listing of all events within a jurisdiction
events_url = base_url+'/events/?jurisdiction_id='+OCD_JURISDICTION_ID
r = requests.get(events_url)
page_json = json.loads(r.text)
for i in range(page_json['meta']['max_page']):
r = requests.get(events_url+'&page='+str(i+1))
page_json = json.loads(r.text)
for result in page_json['results']:
self.grab_event(result['id'])
def grab_event(self, event_ocd_id):
event_url = base_url+'/'+event_ocd_id
r = requests.get(event_url)
if r.status_code == 200:
page_json = json.loads(r.text)
try:
legistar_id = re.findall('ID=(.*)&GUID', page_json['sources'][0]['url'])[0]
event_obj, created = Event.objects.get_or_create(
ocd_id = event_ocd_id,
name = page_json['name'],
description = page_json['description'],
classification = page_json['classification'],
start_time = parse_datetime(page_json['start_time']),
end_time = parse_datetime(page_json['end_time']) if page_json['end_time'] else None,
all_day = page_json['all_day'],
status = page_json['status'],
location_name = page_json['location']['name'],
location_url = page_json['location']['url'],
source_url = page_json['sources'][0]['url'],
source_note = page_json['sources'][0]['note'],
slug = legistar_id,
)
if created and DEBUG:
print(' adding event: %s' % event_ocd_id)
for participant_json in page_json['participants']:
obj, created = EventParticipant.objects.get_or_create(
event = event_obj,
note = participant_json['note'],
entity_name = participant_json['entity_name'],
entity_type = participant_json['entity_type']
)
if created and DEBUG:
print(' adding participant: %s' %obj.entity_name)
for document_json in page_json['documents']:
self.load_eventdocument(document_json, event_obj)
for agenda_item_json in page_json['agenda']:
self.load_eventagendaitem(agenda_item_json, event_obj)
# TEMPORARY - skip events w/ names that are too long
# this will be fixed when names no longer have descriptions appended
except DataError:
print("*"*60)
print("SKIPPING EVENT %s" %event_ocd_id)
print("error loading event data")
print("*"*60)
else:
print("*"*60)
print("SKIPPING EVENT %s" %event_ocd_id)
print("cannot retrieve event data")
print("*"*60)
def load_eventagendaitem(self, agenda_item_json, event):
agendaitem_obj, created = EventAgendaItem.objects.get_or_create(
event = event,
order = agenda_item_json['order'],
description = agenda_item_json['description'],
)
if created and DEBUG:
print(' adding agenda item: %s' %agendaitem_obj.order)
related_entity_json = agenda_item_json['related_entities'][0]
clean_bill_identifier = re.sub(' 0', ' ', related_entity_json['entity_name'])
related_bill = Bill.objects.filter(identifier = clean_bill_identifier).first()
if related_bill:
obj, created = AgendaItemBill.objects.get_or_create(
agenda_item = agendaitem_obj,
bill = related_bill,
note = related_entity_json['note'],
)
if created and DEBUG:
print(' adding related bill: %s' %related_bill.identifier)
def load_eventdocument(self, document_json, event):
doc_obj, created = Document.objects.get_or_create(
note=document_json['note'],
url=document_json['links'][0]['url'],
)
obj, created = EventDocument.objects.get_or_create(
event = event,
document = doc_obj,
)
if created and DEBUG:
print(' adding document: %s' % doc_obj.note)
|
|
# Copyright 2011 OpenStack Foundation
# Copyright 2012 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The security groups extension."""
import contextlib
from xml.dom import minidom
from oslo_serialization import jsonutils
import six
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
from nova.i18n import _
from nova.network.security_group import openstack_driver
from nova.openstack.common import log as logging
from nova.virt import netutils
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'security_groups')
softauth = extensions.soft_extension_authorizer('compute', 'security_groups')
def _authorize_context(req):
context = req.environ['nova.context']
authorize(context)
return context
@contextlib.contextmanager
def translate_exceptions():
"""Translate nova exceptions to http exceptions."""
try:
yield
except exception.Invalid as exp:
msg = exp.format_message()
raise exc.HTTPBadRequest(explanation=msg)
except exception.SecurityGroupNotFound as exp:
msg = exp.format_message()
raise exc.HTTPNotFound(explanation=msg)
except exception.InstanceNotFound as exp:
msg = exp.format_message()
raise exc.HTTPNotFound(explanation=msg)
except exception.SecurityGroupLimitExceeded as exp:
msg = exp.format_message()
raise exc.HTTPForbidden(explanation=msg)
except exception.NoUniqueMatch as exp:
msg = exp.format_message()
raise exc.HTTPConflict(explanation=msg)
class SecurityGroupControllerBase(object):
"""Base class for Security Group controllers."""
def __init__(self):
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver())
self.compute_api = compute.API(
security_group_api=self.security_group_api)
def _format_security_group_rule(self, context, rule, group_rule_data=None):
"""Return a secuity group rule in desired API response format.
If group_rule_data is passed in that is used rather than querying
for it.
"""
sg_rule = {}
sg_rule['id'] = rule['id']
sg_rule['parent_group_id'] = rule['parent_group_id']
sg_rule['ip_protocol'] = rule['protocol']
sg_rule['from_port'] = rule['from_port']
sg_rule['to_port'] = rule['to_port']
sg_rule['group'] = {}
sg_rule['ip_range'] = {}
if rule['group_id']:
with translate_exceptions():
try:
source_group = self.security_group_api.get(
context, id=rule['group_id'])
except exception.SecurityGroupNotFound:
# NOTE(arosen): There is a possible race condition that can
# occur here if two api calls occur concurrently: one that
# lists the security groups and another one that deletes a
# security group rule that has a group_id before the
# group_id is fetched. To handle this if
# SecurityGroupNotFound is raised we return None instead
# of the rule and the caller should ignore the rule.
LOG.debug("Security Group ID %s does not exist",
rule['group_id'])
return
sg_rule['group'] = {'name': source_group.get('name'),
'tenant_id': source_group.get('project_id')}
elif group_rule_data:
sg_rule['group'] = group_rule_data
else:
sg_rule['ip_range'] = {'cidr': rule['cidr']}
return sg_rule
def _format_security_group(self, context, group):
security_group = {}
security_group['id'] = group['id']
security_group['description'] = group['description']
security_group['name'] = group['name']
security_group['tenant_id'] = group['project_id']
security_group['rules'] = []
for rule in group['rules']:
formatted_rule = self._format_security_group_rule(context, rule)
if formatted_rule:
security_group['rules'] += [formatted_rule]
return security_group
def _from_body(self, body, key):
if not body:
raise exc.HTTPBadRequest(
explanation=_("The request body can't be empty"))
value = body.get(key, None)
if value is None:
raise exc.HTTPBadRequest(
explanation=_("Missing parameter %s") % key)
return value
class SecurityGroupController(SecurityGroupControllerBase):
"""The Security group API controller for the OpenStack API."""
def show(self, req, id):
"""Return data about the given security group."""
context = _authorize_context(req)
with translate_exceptions():
id = self.security_group_api.validate_id(id)
security_group = self.security_group_api.get(context, None, id,
map_exception=True)
return {'security_group': self._format_security_group(context,
security_group)}
def delete(self, req, id):
"""Delete a security group."""
context = _authorize_context(req)
with translate_exceptions():
id = self.security_group_api.validate_id(id)
security_group = self.security_group_api.get(context, None, id,
map_exception=True)
self.security_group_api.destroy(context, security_group)
return webob.Response(status_int=202)
def index(self, req):
"""Returns a list of security groups."""
context = _authorize_context(req)
search_opts = {}
search_opts.update(req.GET)
with translate_exceptions():
project_id = context.project_id
raw_groups = self.security_group_api.list(context,
project=project_id,
search_opts=search_opts)
limited_list = common.limited(raw_groups, req)
result = [self._format_security_group(context, group)
for group in limited_list]
return {'security_groups':
list(sorted(result,
key=lambda k: (k['tenant_id'], k['name'])))}
def create(self, req, body):
"""Creates a new security group."""
context = _authorize_context(req)
security_group = self._from_body(body, 'security_group')
group_name = security_group.get('name', None)
group_description = security_group.get('description', None)
with translate_exceptions():
self.security_group_api.validate_property(group_name, 'name', None)
self.security_group_api.validate_property(group_description,
'description', None)
group_ref = self.security_group_api.create_security_group(
context, group_name, group_description)
return {'security_group': self._format_security_group(context,
group_ref)}
def update(self, req, id, body):
"""Update a security group."""
context = _authorize_context(req)
with translate_exceptions():
id = self.security_group_api.validate_id(id)
security_group = self.security_group_api.get(context, None, id,
map_exception=True)
security_group_data = self._from_body(body, 'security_group')
group_name = security_group_data.get('name', None)
group_description = security_group_data.get('description', None)
with translate_exceptions():
self.security_group_api.validate_property(group_name, 'name', None)
self.security_group_api.validate_property(group_description,
'description', None)
group_ref = self.security_group_api.update_security_group(
context, security_group, group_name, group_description)
return {'security_group': self._format_security_group(context,
group_ref)}
class SecurityGroupRulesController(SecurityGroupControllerBase):
def create(self, req, body):
context = _authorize_context(req)
sg_rule = self._from_body(body, 'security_group_rule')
with translate_exceptions():
parent_group_id = self.security_group_api.validate_id(
sg_rule.get('parent_group_id', None))
security_group = self.security_group_api.get(context, None,
parent_group_id,
map_exception=True)
try:
new_rule = self._rule_args_to_dict(context,
to_port=sg_rule.get('to_port'),
from_port=sg_rule.get('from_port'),
ip_protocol=sg_rule.get('ip_protocol'),
cidr=sg_rule.get('cidr'),
group_id=sg_rule.get('group_id'))
except exception.SecurityGroupNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except Exception as exp:
raise exc.HTTPBadRequest(explanation=six.text_type(exp))
if new_rule is None:
msg = _("Not enough parameters to build a valid rule.")
raise exc.HTTPBadRequest(explanation=msg)
new_rule['parent_group_id'] = security_group['id']
if 'cidr' in new_rule:
net, prefixlen = netutils.get_net_and_prefixlen(new_rule['cidr'])
if net not in ('0.0.0.0', '::') and prefixlen == '0':
msg = _("Bad prefix for network in cidr %s") % new_rule['cidr']
raise exc.HTTPBadRequest(explanation=msg)
group_rule_data = None
with translate_exceptions():
if sg_rule.get('group_id'):
source_group = self.security_group_api.get(
context, id=sg_rule['group_id'])
group_rule_data = {'name': source_group.get('name'),
'tenant_id': source_group.get('project_id')}
security_group_rule = (
self.security_group_api.create_security_group_rule(
context, security_group, new_rule))
formatted_rule = self._format_security_group_rule(context,
security_group_rule,
group_rule_data)
return {"security_group_rule": formatted_rule}
def _rule_args_to_dict(self, context, to_port=None, from_port=None,
ip_protocol=None, cidr=None, group_id=None):
if group_id is not None:
group_id = self.security_group_api.validate_id(group_id)
# check if groupId exists
self.security_group_api.get(context, id=group_id)
return self.security_group_api.new_group_ingress_rule(
group_id, ip_protocol, from_port, to_port)
else:
cidr = self.security_group_api.parse_cidr(cidr)
return self.security_group_api.new_cidr_ingress_rule(
cidr, ip_protocol, from_port, to_port)
def delete(self, req, id):
context = _authorize_context(req)
with translate_exceptions():
id = self.security_group_api.validate_id(id)
rule = self.security_group_api.get_rule(context, id)
group_id = rule['parent_group_id']
security_group = self.security_group_api.get(context, None,
group_id,
map_exception=True)
self.security_group_api.remove_rules(context, security_group,
[rule['id']])
return webob.Response(status_int=202)
class ServerSecurityGroupController(SecurityGroupControllerBase):
def index(self, req, server_id):
"""Returns a list of security groups for the given instance."""
context = _authorize_context(req)
self.security_group_api.ensure_default(context)
with translate_exceptions():
instance = self.compute_api.get(context, server_id)
groups = self.security_group_api.get_instance_security_groups(
context, instance['uuid'], True)
result = [self._format_security_group(context, group)
for group in groups]
return {'security_groups':
list(sorted(result,
key=lambda k: (k['tenant_id'], k['name'])))}
class SecurityGroupActionController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(SecurityGroupActionController, self).__init__(*args, **kwargs)
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver())
self.compute_api = compute.API(
security_group_api=self.security_group_api)
def _parse(self, body, action):
try:
body = body[action]
group_name = body['name']
except TypeError:
msg = _("Missing parameter dict")
raise webob.exc.HTTPBadRequest(explanation=msg)
except KeyError:
msg = _("Security group not specified")
raise webob.exc.HTTPBadRequest(explanation=msg)
if not group_name or group_name.strip() == '':
msg = _("Security group name cannot be empty")
raise webob.exc.HTTPBadRequest(explanation=msg)
return group_name
def _invoke(self, method, context, id, group_name):
with translate_exceptions():
instance = self.compute_api.get(context, id)
method(context, instance, group_name)
return webob.Response(status_int=202)
@wsgi.action('addSecurityGroup')
def _addSecurityGroup(self, req, id, body):
context = req.environ['nova.context']
authorize(context)
group_name = self._parse(body, 'addSecurityGroup')
return self._invoke(self.security_group_api.add_to_instance,
context, id, group_name)
@wsgi.action('removeSecurityGroup')
def _removeSecurityGroup(self, req, id, body):
context = req.environ['nova.context']
authorize(context)
group_name = self._parse(body, 'removeSecurityGroup')
return self._invoke(self.security_group_api.remove_from_instance,
context, id, group_name)
class SecurityGroupsOutputController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(SecurityGroupsOutputController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver())
def _extend_servers(self, req, servers):
# TODO(arosen) this function should be refactored to reduce duplicate
# code and use get_instance_security_groups instead of get_db_instance.
if not len(servers):
return
key = "security_groups"
context = _authorize_context(req)
if not openstack_driver.is_neutron_security_groups():
for server in servers:
instance = req.get_db_instance(server['id'])
groups = instance.get(key)
if groups:
server[key] = [{"name": group["name"]} for group in groups]
else:
# If method is a POST we get the security groups intended for an
# instance from the request. The reason for this is if using
# neutron security groups the requested security groups for the
# instance are not in the db and have not been sent to neutron yet.
if req.method != 'POST':
sg_instance_bindings = (
self.security_group_api
.get_instances_security_groups_bindings(context,
servers))
for server in servers:
groups = sg_instance_bindings.get(server['id'])
if groups:
server[key] = groups
# In this section of code len(servers) == 1 as you can only POST
# one server in an API request.
else:
try:
# try converting to json
req_obj = jsonutils.loads(req.body)
# Add security group to server, if no security group was in
# request add default since that is the group it is part of
servers[0][key] = req_obj['server'].get(
key, [{'name': 'default'}])
except ValueError:
root = minidom.parseString(req.body)
sg_root = root.getElementsByTagName(key)
groups = []
if sg_root:
security_groups = sg_root[0].getElementsByTagName(
'security_group')
for security_group in security_groups:
groups.append(
{'name': security_group.getAttribute('name')})
if not groups:
groups = [{'name': 'default'}]
servers[0][key] = groups
def _show(self, req, resp_obj):
if not softauth(req.environ['nova.context']):
return
if 'server' in resp_obj.obj:
self._extend_servers(req, [resp_obj.obj['server']])
@wsgi.extends
def show(self, req, resp_obj, id):
return self._show(req, resp_obj)
@wsgi.extends
def create(self, req, resp_obj, body):
return self._show(req, resp_obj)
@wsgi.extends
def detail(self, req, resp_obj):
if not softauth(req.environ['nova.context']):
return
self._extend_servers(req, list(resp_obj.obj['servers']))
class Security_groups(extensions.ExtensionDescriptor):
"""Security group support."""
name = "SecurityGroups"
alias = "os-security-groups"
namespace = "http://docs.openstack.org/compute/ext/securitygroups/api/v1.1"
updated = "2013-05-28T00:00:00Z"
def get_controller_extensions(self):
controller = SecurityGroupActionController()
actions = extensions.ControllerExtension(self, 'servers', controller)
controller = SecurityGroupsOutputController()
output = extensions.ControllerExtension(self, 'servers', controller)
return [actions, output]
def get_resources(self):
resources = []
res = extensions.ResourceExtension('os-security-groups',
controller=SecurityGroupController())
resources.append(res)
res = extensions.ResourceExtension('os-security-group-rules',
controller=SecurityGroupRulesController())
resources.append(res)
res = extensions.ResourceExtension(
'os-security-groups',
controller=ServerSecurityGroupController(),
parent=dict(member_name='server', collection_name='servers'))
resources.append(res)
return resources
|
|
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import struct
import socket
import logging
import netaddr
from ryu.ofproto import ether
from ryu.ofproto import inet
from ryu.ofproto import ofproto_v1_2
from ryu.ofproto import ofproto_v1_2_parser
from ryu.lib import hub
from ryu.lib import mac
LOG = logging.getLogger('ryu.lib.ofctl_v1_2')
DEFAULT_TIMEOUT = 1.0
def str_to_int(src):
if isinstance(src, str):
if src.startswith("0x") or src.startswith("0X"):
dst = int(src, 16)
else:
dst = int(src)
else:
dst = src
return dst
def to_action(dp, dic):
ofp = dp.ofproto
parser = dp.ofproto_parser
action_type = dic.get('type')
if action_type == 'OUTPUT':
out_port = int(dic.get('port', ofp.OFPP_ANY))
max_len = int(dic.get('max_len', ofp.OFPCML_MAX))
result = parser.OFPActionOutput(out_port, max_len)
elif action_type == 'COPY_TTL_OUT':
result = parser.OFPActionCopyTtlOut()
elif action_type == 'COPY_TTL_IN':
result = parser.OFPActionCopyTtlIn()
elif action_type == 'SET_MPLS_TTL':
mpls_ttl = int(dic.get('mpls_ttl'))
result = parser.OFPActionSetMplsTtl(mpls_ttl)
elif action_type == 'DEC_MPLS_TTL':
result = parser.OFPActionDecMplsTtl()
elif action_type == 'PUSH_VLAN':
ethertype = int(dic.get('ethertype'))
result = parser.OFPActionPushVlan(ethertype)
elif action_type == 'POP_VLAN':
result = parser.OFPActionPopVlan()
elif action_type == 'PUSH_MPLS':
ethertype = int(dic.get('ethertype'))
result = parser.OFPActionPushMpls(ethertype)
elif action_type == 'POP_MPLS':
ethertype = int(dic.get('ethertype'))
result = parser.OFPActionPopMpls(ethertype)
elif action_type == 'SET_QUEUE':
queue_id = int(dic.get('queue_id'))
result = parser.OFPActionSetQueue(queue_id)
elif action_type == 'GROUP':
group_id = int(dic.get('group_id'))
result = parser.OFPActionGroup(group_id)
elif action_type == 'SET_NW_TTL':
nw_ttl = int(dic.get('nw_ttl'))
result = parser.OFPActionSetNwTtl(nw_ttl)
elif action_type == 'DEC_NW_TTL':
result = parser.OFPActionDecNwTtl()
elif action_type == 'SET_FIELD':
field = dic.get('field')
value = dic.get('value')
result = parser.OFPActionSetField(**{field: value})
else:
result = None
return result
def to_actions(dp, acts):
inst = []
actions = []
ofp = dp.ofproto
parser = dp.ofproto_parser
for a in acts:
action = to_action(dp, a)
if action is not None:
actions.append(action)
else:
action_type = a.get('type')
if action_type == 'GOTO_TABLE':
table_id = int(a.get('table_id'))
inst.append(parser.OFPInstructionGotoTable(table_id))
elif action_type == 'WRITE_METADATA':
metadata = str_to_int(a.get('metadata'))
metadata_mask = (str_to_int(a['metadata_mask'])
if 'metadata_mask' in a
else parser.UINT64_MAX)
inst.append(
parser.OFPInstructionWriteMetadata(
metadata, metadata_mask))
else:
LOG.debug('Unknown action type: %s' % action_type)
inst.append(parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS,
actions))
return inst
def action_to_str(act):
action_type = act.cls_action_type
if action_type == ofproto_v1_2.OFPAT_OUTPUT:
buf = 'OUTPUT:' + str(act.port)
elif action_type == ofproto_v1_2.OFPAT_COPY_TTL_OUT:
buf = 'COPY_TTL_OUT'
elif action_type == ofproto_v1_2.OFPAT_COPY_TTL_IN:
buf = 'COPY_TTL_IN'
elif action_type == ofproto_v1_2.OFPAT_SET_MPLS_TTL:
buf = 'SET_MPLS_TTL:' + str(act.mpls_ttl)
elif action_type == ofproto_v1_2.OFPAT_DEC_MPLS_TTL:
buf = 'DEC_MPLS_TTL'
elif action_type == ofproto_v1_2.OFPAT_PUSH_VLAN:
buf = 'PUSH_VLAN:' + str(act.ethertype)
elif action_type == ofproto_v1_2.OFPAT_POP_VLAN:
buf = 'POP_VLAN'
elif action_type == ofproto_v1_2.OFPAT_PUSH_MPLS:
buf = 'PUSH_MPLS:' + str(act.ethertype)
elif action_type == ofproto_v1_2.OFPAT_POP_MPLS:
buf = 'POP_MPLS:' + str(act.ethertype)
elif action_type == ofproto_v1_2.OFPAT_SET_QUEUE:
buf = 'SET_QUEUE:' + str(act.queue_id)
elif action_type == ofproto_v1_2.OFPAT_GROUP:
buf = 'GROUP:' + str(act.group_id)
elif action_type == ofproto_v1_2.OFPAT_SET_NW_TTL:
buf = 'SET_NW_TTL:' + str(act.nw_ttl)
elif action_type == ofproto_v1_2.OFPAT_DEC_NW_TTL:
buf = 'DEC_NW_TTL'
elif action_type == ofproto_v1_2.OFPAT_SET_FIELD:
buf = 'SET_FIELD: {%s:%s}' % (act.key, act.value)
else:
buf = 'UNKNOWN'
return buf
def actions_to_str(instructions):
actions = []
for instruction in instructions:
if isinstance(instruction,
ofproto_v1_2_parser.OFPInstructionActions):
for a in instruction.actions:
actions.append(action_to_str(a))
elif isinstance(instruction,
ofproto_v1_2_parser.OFPInstructionGotoTable):
buf = 'GOTO_TABLE:' + str(instruction.table_id)
actions.append(buf)
elif isinstance(instruction,
ofproto_v1_2_parser.OFPInstructionWriteMetadata):
buf = ('WRITE_METADATA:0x%x/0x%x' % (instruction.metadata,
instruction.metadata_mask)
if instruction.metadata_mask
else 'WRITE_METADATA:0x%x' % instruction.metadata)
actions.append(buf)
else:
continue
return actions
def to_match(dp, attrs):
convert = {'in_port': int,
'in_phy_port': int,
'metadata': to_match_metadata,
'dl_dst': to_match_eth,
'dl_src': to_match_eth,
'eth_dst': to_match_eth,
'eth_src': to_match_eth,
'dl_type': int,
'eth_type': int,
'dl_vlan': to_match_vid,
'vlan_vid': to_match_vid,
'vlan_pcp': int,
'ip_dscp': int,
'ip_ecn': int,
'nw_proto': int,
'ip_proto': int,
'nw_src': to_match_ip,
'nw_dst': to_match_ip,
'ipv4_src': to_match_ip,
'ipv4_dst': to_match_ip,
'tp_src': int,
'tp_dst': int,
'tcp_src': int,
'tcp_dst': int,
'udp_src': int,
'udp_dst': int,
'sctp_src': int,
'sctp_dst': int,
'icmpv4_type': int,
'icmpv4_code': int,
'arp_op': int,
'arp_spa': to_match_ip,
'arp_tpa': to_match_ip,
'arp_sha': to_match_eth,
'arp_tha': to_match_eth,
'ipv6_src': to_match_ip,
'ipv6_dst': to_match_ip,
'ipv6_flabel': int,
'icmpv6_type': int,
'icmpv6_code': int,
'ipv6_nd_target': to_match_ip,
'ipv6_nd_sll': to_match_eth,
'ipv6_nd_tll': to_match_eth,
'mpls_label': int,
'mpls_tc': int}
keys = {'dl_dst': 'eth_dst',
'dl_src': 'eth_src',
'dl_type': 'eth_type',
'dl_vlan': 'vlan_vid',
'nw_src': 'ipv4_src',
'nw_dst': 'ipv4_dst',
'nw_proto': 'ip_proto'}
if attrs.get('dl_type') == ether.ETH_TYPE_ARP or \
attrs.get('eth_type') == ether.ETH_TYPE_ARP:
if 'nw_src' in attrs and 'arp_spa' not in attrs:
attrs['arp_spa'] = attrs['nw_src']
del attrs['nw_src']
if 'nw_dst' in attrs and 'arp_tpa' not in attrs:
attrs['arp_tpa'] = attrs['nw_dst']
del attrs['nw_dst']
kwargs = {}
for key, value in attrs.items():
if key in convert:
value = convert[key](value)
if key in keys:
# For old field name
key = keys[key]
if key == 'tp_src' or key == 'tp_dst':
# TCP/UDP port
conv = {inet.IPPROTO_TCP: {'tp_src': 'tcp_src',
'tp_dst': 'tcp_dst'},
inet.IPPROTO_UDP: {'tp_src': 'udp_src',
'tp_dst': 'udp_dst'}}
ip_proto = attrs.get('nw_proto', attrs.get('ip_proto', 0))
key = conv[ip_proto][key]
kwargs[key] = value
else:
# others
kwargs[key] = value
return dp.ofproto_parser.OFPMatch(**kwargs)
def to_match_eth(value):
if '/' in value:
value = value.split('/')
return value[0], value[1]
else:
return value
def to_match_ip(value):
if '/' in value:
ip = netaddr.ip.IPNetwork(value)
ip_addr = str(ip.ip)
ip_mask = str(ip.netmask)
return ip_addr, ip_mask
else:
return value
def to_match_vid(value):
# NOTE: If "vlan_id/dl_vlan" field is described as decimal int value
# (and decimal string value), it is treated as values of
# VLAN tag, and OFPVID_PRESENT(0x1000) bit is automatically
# applied. OTOH, If it is described as hexadecimal string,
# treated as values of oxm_value (including OFPVID_PRESENT
# bit), and OFPVID_PRESENT bit is NOT automatically applied.
if isinstance(value, int):
# described as decimal int value
return value | ofproto_v1_2.OFPVID_PRESENT
else:
if '/' in value:
val = value.split('/')
return int(val[0], 0), int(val[1], 0)
else:
if value.isdigit():
# described as decimal string value
return int(value, 10) | ofproto_v1_2.OFPVID_PRESENT
else:
return int(value, 0)
def to_match_metadata(value):
if '/' in value:
value = value.split('/')
return str_to_int(value[0]), str_to_int(value[1])
else:
return str_to_int(value)
def match_to_str(ofmatch):
keys = {'eth_src': 'dl_src',
'eth_dst': 'dl_dst',
'eth_type': 'dl_type',
'vlan_vid': 'dl_vlan',
'ipv4_src': 'nw_src',
'ipv4_dst': 'nw_dst',
'ip_proto': 'nw_proto',
'tcp_src': 'tp_src',
'tcp_dst': 'tp_dst',
'udp_src': 'tp_src',
'udp_dst': 'tp_dst'
}
match = {}
ofmatch = ofmatch.to_jsondict()['OFPMatch']
ofmatch = ofmatch['oxm_fields']
for match_field in ofmatch:
key = match_field['OXMTlv']['field']
if key in keys:
key = keys[key]
mask = match_field['OXMTlv']['mask']
value = match_field['OXMTlv']['value']
if key == 'dl_vlan':
value = match_vid_to_str(value, mask)
elif key == 'metadata':
value = match_metadata_to_str(value, mask)
else:
if mask is not None:
value = value + '/' + mask
else:
value = value
match.setdefault(key, value)
return match
def match_metadata_to_str(value, mask):
return ('%d/%d' % (value, mask) if mask else '%d' % value)
def match_vid_to_str(value, mask):
if mask is not None:
value = '0x%04x/0x%04x' % (value, mask)
else:
if value & ofproto_v1_2.OFPVID_PRESENT:
value = str(value & ~ofproto_v1_2.OFPVID_PRESENT)
else:
value = '0x%04x' % value
return value
def send_stats_request(dp, stats, waiters, msgs):
dp.set_xid(stats)
waiters_per_dp = waiters.setdefault(dp.id, {})
lock = hub.Event()
waiters_per_dp[stats.xid] = (lock, msgs)
dp.send_msg(stats)
lock.wait(timeout=DEFAULT_TIMEOUT)
if not lock.is_set():
del waiters_per_dp[stats.xid]
def get_desc_stats(dp, waiters):
stats = dp.ofproto_parser.OFPDescStatsRequest(dp)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
s = {}
for msg in msgs:
stats = msg.body
s = {'mfr_desc': stats.mfr_desc,
'hw_desc': stats.hw_desc,
'sw_desc': stats.sw_desc,
'serial_num': stats.serial_num,
'dp_desc': stats.dp_desc}
desc = {str(dp.id): s}
return desc
def get_queue_stats(dp, waiters):
ofp = dp.ofproto
stats = dp.ofproto_parser.OFPQueueStatsRequest(dp, 0, ofp.OFPP_ANY,
ofp.OFPQ_ALL)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
s = []
for msg in msgs:
stats = msg.body
for stat in stats:
s.append({'port_no': stat.port_no,
'queue_id': stat.queue_id,
'tx_bytes': stat.tx_bytes,
'tx_errors': stat.tx_errors,
'tx_packets': stat.tx_packets})
desc = {str(dp.id): s}
return desc
def get_flow_stats(dp, waiters, flow={}):
table_id = int(flow.get('table_id', dp.ofproto.OFPTT_ALL))
out_port = int(flow.get('out_port', dp.ofproto.OFPP_ANY))
out_group = int(flow.get('out_group', dp.ofproto.OFPG_ANY))
cookie = int(flow.get('cookie', 0))
cookie_mask = int(flow.get('cookie_mask', 0))
match = to_match(dp, flow.get('match', {}))
stats = dp.ofproto_parser.OFPFlowStatsRequest(
dp, table_id, out_port, out_group, cookie, cookie_mask, match)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
flows = []
for msg in msgs:
for stats in msg.body:
actions = actions_to_str(stats.instructions)
match = match_to_str(stats.match)
s = {'priority': stats.priority,
'cookie': stats.cookie,
'idle_timeout': stats.idle_timeout,
'hard_timeout': stats.hard_timeout,
'actions': actions,
'match': match,
'byte_count': stats.byte_count,
'duration_sec': stats.duration_sec,
'duration_nsec': stats.duration_nsec,
'packet_count': stats.packet_count,
'table_id': stats.table_id,
'length': stats.length}
flows.append(s)
flows = {str(dp.id): flows}
return flows
def get_port_stats(dp, waiters):
stats = dp.ofproto_parser.OFPPortStatsRequest(
dp, dp.ofproto.OFPP_ANY, 0)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
ports = []
for msg in msgs:
for stats in msg.body:
s = {'port_no': stats.port_no,
'rx_packets': stats.rx_packets,
'tx_packets': stats.tx_packets,
'rx_bytes': stats.rx_bytes,
'tx_bytes': stats.tx_bytes,
'rx_dropped': stats.rx_dropped,
'tx_dropped': stats.tx_dropped,
'rx_errors': stats.rx_errors,
'tx_errors': stats.tx_errors,
'rx_frame_err': stats.rx_frame_err,
'rx_over_err': stats.rx_over_err,
'rx_crc_err': stats.rx_crc_err,
'collisions': stats.collisions}
ports.append(s)
ports = {str(dp.id): ports}
return ports
def get_group_stats(dp, waiters):
stats = dp.ofproto_parser.OFPGroupStatsRequest(
dp, dp.ofproto.OFPG_ALL, 0)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
groups = []
for msg in msgs:
for stats in msg.body:
bucket_counters = []
for bucket_counter in stats.bucket_counters:
c = {'packet_count': bucket_counter.packet_count,
'byte_count': bucket_counter.byte_count}
bucket_counters.append(c)
g = {'length': stats.length,
'group_id': stats.group_id,
'ref_count': stats.ref_count,
'packet_count': stats.packet_count,
'byte_count': stats.byte_count,
'bucket_stats': bucket_counters}
groups.append(g)
groups = {str(dp.id): groups}
return groups
def get_group_features(dp, waiters):
ofp = dp.ofproto
type_convert = {ofp.OFPGT_ALL: 'ALL',
ofp.OFPGT_SELECT: 'SELECT',
ofp.OFPGT_INDIRECT: 'INDIRECT',
ofp.OFPGT_FF: 'FF'}
cap_convert = {ofp.OFPGFC_SELECT_WEIGHT: 'SELECT_WEIGHT',
ofp.OFPGFC_SELECT_LIVENESS: 'SELECT_LIVENESS',
ofp.OFPGFC_CHAINING: 'CHAINING',
ofp.OFPGFC_CHAINING_CHECKS: 'CHAINING_CHECKS'}
act_convert = {ofp.OFPAT_OUTPUT: 'OUTPUT',
ofp.OFPAT_COPY_TTL_OUT: 'COPY_TTL_OUT',
ofp.OFPAT_COPY_TTL_IN: 'COPY_TTL_IN',
ofp.OFPAT_SET_MPLS_TTL: 'SET_MPLS_TTL',
ofp.OFPAT_DEC_MPLS_TTL: 'DEC_MPLS_TTL',
ofp.OFPAT_PUSH_VLAN: 'PUSH_VLAN',
ofp.OFPAT_POP_VLAN: 'POP_VLAN',
ofp.OFPAT_PUSH_MPLS: 'PUSH_MPLS',
ofp.OFPAT_POP_MPLS: 'POP_MPLS',
ofp.OFPAT_SET_QUEUE: 'SET_QUEUE',
ofp.OFPAT_GROUP: 'GROUP',
ofp.OFPAT_SET_NW_TTL: 'SET_NW_TTL',
ofp.OFPAT_DEC_NW_TTL: 'DEC_NW_TTL',
ofp.OFPAT_SET_FIELD: 'SET_FIELD'}
stats = dp.ofproto_parser.OFPGroupFeaturesStatsRequest(dp, 0)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
features = []
for msg in msgs:
feature = msg.body
types = []
for k, v in type_convert.items():
if (1 << k) & feature.types:
types.append(v)
capabilities = []
for k, v in cap_convert.items():
if k & feature.capabilities:
capabilities.append(v)
max_groups = []
for k, v in type_convert.items():
max_groups.append({v: feature.max_groups[k]})
actions = []
for k1, v1 in type_convert.items():
acts = []
for k2, v2 in act_convert.items():
if (1 << k2) & feature.actions[k1]:
acts.append(v2)
actions.append({v1: acts})
f = {'types': types,
'capabilities': capabilities,
'max_groups': max_groups,
'actions': actions}
features.append(f)
features = {str(dp.id): features}
return features
def get_group_desc(dp, waiters):
type_convert = {dp.ofproto.OFPGT_ALL: 'ALL',
dp.ofproto.OFPGT_SELECT: 'SELECT',
dp.ofproto.OFPGT_INDIRECT: 'INDIRECT',
dp.ofproto.OFPGT_FF: 'FF'}
stats = dp.ofproto_parser.OFPGroupDescStatsRequest(dp, 0)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
descs = []
for msg in msgs:
for stats in msg.body:
buckets = []
for bucket in stats.buckets:
actions = []
for action in bucket.actions:
actions.append(action_to_str(action))
b = {'weight': bucket.weight,
'watch_port': bucket.watch_port,
'watch_group': bucket.watch_group,
'actions': actions}
buckets.append(b)
d = {'type': type_convert.get(stats.type),
'group_id': stats.group_id,
'buckets': buckets}
descs.append(d)
descs = {str(dp.id): descs}
return descs
def get_port_desc(dp, waiters):
stats = dp.ofproto_parser.OFPFeaturesRequest(dp)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
descs = []
for msg in msgs:
stats = msg.ports
for stat in stats.values():
d = {'port_no': stat.port_no,
'hw_addr': stat.hw_addr,
'name': stat.name,
'config': stat.config,
'state': stat.state,
'curr': stat.curr,
'advertised': stat.advertised,
'supported': stat.supported,
'peer': stat.peer,
'curr_speed': stat.curr_speed,
'max_speed': stat.max_speed}
descs.append(d)
descs = {str(dp.id): descs}
return descs
def mod_flow_entry(dp, flow, cmd):
cookie = int(flow.get('cookie', 0))
cookie_mask = int(flow.get('cookie_mask', 0))
table_id = int(flow.get('table_id', 0))
idle_timeout = int(flow.get('idle_timeout', 0))
hard_timeout = int(flow.get('hard_timeout', 0))
priority = int(flow.get('priority', 0))
buffer_id = int(flow.get('buffer_id', dp.ofproto.OFP_NO_BUFFER))
out_port = int(flow.get('out_port', dp.ofproto.OFPP_ANY))
out_group = int(flow.get('out_group', dp.ofproto.OFPG_ANY))
flags = int(flow.get('flags', 0))
match = to_match(dp, flow.get('match', {}))
inst = to_actions(dp, flow.get('actions', []))
flow_mod = dp.ofproto_parser.OFPFlowMod(
dp, cookie, cookie_mask, table_id, cmd, idle_timeout,
hard_timeout, priority, buffer_id, out_port, out_group,
flags, match, inst)
dp.send_msg(flow_mod)
def mod_group_entry(dp, group, cmd):
type_convert = {'ALL': dp.ofproto.OFPGT_ALL,
'SELECT': dp.ofproto.OFPGT_SELECT,
'INDIRECT': dp.ofproto.OFPGT_INDIRECT,
'FF': dp.ofproto.OFPGT_FF}
type_ = type_convert.get(group.get('type', 'ALL'))
if type_ is None:
LOG.debug('Unknown type: %s', group.get('type'))
group_id = int(group.get('group_id', 0))
buckets = []
for bucket in group.get('buckets', []):
weight = int(bucket.get('weight', 0))
watch_port = int(bucket.get('watch_port', dp.ofproto.OFPP_ANY))
watch_group = int(bucket.get('watch_group', dp.ofproto.OFPG_ANY))
actions = []
for dic in bucket.get('actions', []):
action = to_action(dp, dic)
if action is not None:
actions.append(action)
buckets.append(dp.ofproto_parser.OFPBucket(
weight, watch_port, watch_group, actions))
group_mod = dp.ofproto_parser.OFPGroupMod(
dp, cmd, type_, group_id, buckets)
dp.send_msg(group_mod)
def mod_port_behavior(dp, port_config):
port_no = int(port_config.get('port_no', 0))
hw_addr = port_config.get('hw_addr')
config = int(port_config.get('config', 0))
mask = int(port_config.get('mask', 0))
advertise = int(port_config.get('advertise'))
port_mod = dp.ofproto_parser.OFPPortMod(
dp, port_no, hw_addr, config, mask, advertise)
dp.send_msg(port_mod)
def send_experimenter(dp, exp):
experimenter = exp.get('experimenter', 0)
exp_type = exp.get('exp_type', 0)
data_type = exp.get('data_type', 'ascii')
if data_type != 'ascii' and data_type != 'base64':
LOG.debug('Unknown data type: %s', data_type)
data = exp.get('data', '')
if data_type == 'base64':
data = base64.b64decode(data)
expmsg = dp.ofproto_parser.OFPExperimenter(
dp, experimenter, exp_type, data)
dp.send_msg(expmsg)
|
|
import os
import sys
import collections
import glob
import configparser
import argparse
NAME = 'LAF-Fabric'
VERSION = '4.8.4'
APIREF = 'http://laf-fabric.readthedocs.org/en/latest/texts/API-reference.html'
DEFAULT_DATA_DIR = 'laf-fabric-data'
MAIN_CFG = 'laf-fabric.cfg'
ALL_PARTS = ['monad', 'section', 'lingo']
class Settings:
''' Stores configuration information from the main configuration file and the command line.
Defines an extra function in order to get the items in a section as a dictionary,
without getting the DEFAULT items as wel
'''
_myconfig = {
'my_name': NAME,
'version': VERSION,
}
_env_def = {
'my_name': '{my_name}',
'version': '{version}',
'template_dir': '{script_dir}/templates',
'xml_dir': '{script_dir}/xml',
'source': '{source}',
'meta_info': '{data_dir}/{source}/config/main.cfg',
'feature_info': '{data_dir}/{source}/config/ObjectsFeaturesValues.txt',
'feature_plain_info': '{data_dir}/{source}/config/ObjectsFeatures.csv',
'object_info': '{data_dir}/{source}/config/Objects.txt',
'raw_emdros_dir': '{data_dir}/{source}/raw',
'source_data': '{data_dir}/{source}/mql/{source}',
'query_dst_dir': '{data_dir}/{source}/mql',
'result_dir': '{data_dir}/{source}/laf',
'annot_hdr': '{data_dir}/{source}/laf/{source}',
'primary_text': '{data_dir}/{source}/laf/{source}.txt',
'primary_hdr_txt': '{data_dir}/{source}/laf/{source}.txt.hdr',
'resource_hdr_txt': '{data_dir}/{source}/laf/{source}.hdr',
'monad_index': '{data_dir}/{source}/laf/{source}.lst',
'decl_dst_dir': '{data_dir}/{source}/decl',
}
_metaconfig = {
'my_name': NAME,
'version': VERSION,
'ISOcatprefix': 'http://www.isocat.org/datcat/DC-',
'DANSpidprefix': 'http://persistent-identifier/?identifier=',
}
_meta_def = {
'my_name': '{my_name}',
'version': '{version}',
'source': '{source}',
'ISOcatprefix': '{ISOcatprefix}',
'DANSpidprefix': '{DANSpidprefix}',
'danspid_act': '{DANSpidprefix}{danspid_urn}',
'publicationdate': '{publicationdate}',
'danspid_urn': '{danspid_urn}',
'annot_method': 'conversion script {my_name} {version}',
'annot_resp': '{annot_resp}',
'primary': '{primary}',
'trailer': '{trailer}',
'verse_newline': '{verse_newline}',
'annot_space_def': '{annot_space_def}',
'prim_creator': '{prim_creator}',
'res_creator': '{res_creator}',
'prim_title': '{prim_title}',
'res_title': '{res_title}',
'prim_source_title': '{prim_source_title}',
'prim_source_author': '{prim_source_author}',
'prim_source_publisher':'{prim_source_publisher}',
'prim_source_date': '{prim_source_date}',
'prim_source_year': '{prim_source_year}',
'prim_source_place': '{prim_source_place}',
'prim_languages': '{prim_languages}',
'res_funder': '{res_funder}',
'res_respons_link': '{res_respons_link}',
'res_respons_name': '{res_respons_name}',
'res_distributor': '{res_distributor}',
'res_institute': '{res_institute}',
'res_email': '{res_email}',
'res_project_desc': '{res_project_desc}',
'res_sampling_desc': '{res_sampling_desc}',
'res_transduction': '{res_transduction}',
'res_correction': '{res_correction}',
'res_segmentation': '{res_segmentation}',
}
_laf_templates = {
'feature_decl': ('feature_decl.xml', False),
'feature': ('feature.xml', True),
'feature_local': ('feature_local.xml', True),
'feature_val': ('feature_val.xml', True),
'feature_val1': ('feature_sym.xml', True),
'feature_basic': ('feature_basic.xml', True),
'annotation_decl': ('annotation_decl.xml', True),
'annotation_item': ('annotation_item.xml', True),
'annotation_hdr': ('annotation_header.xml', False),
'annotation_label': ('annotation_label.xml', True),
'annotation_ftr': ('annotation_footer.xml', False),
'annotation_elem': ('annotation_element.xml', False),
'feature_elem': ('feature_element.xml', False),
'node_elem': ('node_element.xml', False),
'edge_elem': ('edge_element.xml', False),
'edgenode_elem': ('edgenode_element.xml', False),
'region_hdr': ('region_header.xml', False),
'region_elem': ('region_element.xml', False),
'resource_hdr': ('resource_header.xml', False),
'primary_hdr': ('primary_header.xml', False),
'dependency': ('dependency.xml', True),
}
_laf = {
'resource_header': 'requires',
'annotation_header': 'dependsOn',
}
_xml = {
'xmllint_cmd': 'xmllint --noout --nonet --schema {{schema}} {{xmlfile}}',
'xmllint_cat_env_var': 'XML_CATALOG_FILES',
'xmllint_cat_env_val': '{xml_dir}/xmllint_cat.xml',
'xlink_src': '{xml_dir}/xlink.xsd',
'xlink_dst': '{decl_dst_dir}/xlink.xsd',
'xml_src': '{xml_dir}/xml.xsd',
'xml_dst': '{decl_dst_dir}/xml.xsd',
'xml_isofs_src': '{xml_dir}/xml-isofs.xsd',
'xml_isofs_dst': '{decl_dst_dir}/xml-isofs.xsd',
'graf_annot_src': '{xml_dir}/graf-standoff.xsd',
'graf_annot_dst': '{decl_dst_dir}/graf-standoff.xsd',
'graf_resource_src': '{xml_dir}/graf-resource.xsd',
'graf_resource_dst': '{decl_dst_dir}/graf-resource.xsd',
'graf_document_src': '{xml_dir}/graf-document.xsd',
'graf_document_dst': '{decl_dst_dir}/graf-document.xsd',
'tei_fs_src': '{xml_dir}/isofs_dcr.xsd',
'tei_fs_dst': '{decl_dst_dir}/isofs_dcr.xsd',
'dcr_src': '{xml_dir}/dcr.xsd',
'dcr_dst': '{decl_dst_dir}/dcr.xsd',
}
_parts = {
'monad': {
'raw_text': '{raw_emdros_dir}/monad.txt',
'object_type': 'word',
'make_index': '',
'do_primary': '',
'separate_node_file':'',
},
'section': {
'raw_text': '{raw_emdros_dir}/section.txt',
'use_index': '',
'find_embedding': '',
'hierarchy': 'book chapter verse half_verse',
},
'lingo': {
'raw_text': '{raw_emdros_dir}/lingo.txt',
'separate_node_file':'',
'no_monad_nodes': '',
},
}
_annotation_kind = {
'monad': 'minimal objects&text&',
'section': 'section objects&text&',
'lingo': 'linguistic objects&text&',
'reference': 'linguistic relationships&fsDecl&decl/ft.xml',
'ft': 'linguistic features&fsDecl&decl/ft.xml',
'sft': 'sectional features&fsDecl&decl/sft.xml',
'db': 'database features&fsDecl&decl/db.xml',
}
_annotation_regions = {
'name': 'region',
'word': 'w',
'punct': 'p',
'section': 's',
}
_annotation_skip_object = {
'lingo': 'word',
}
annotation_skip = set(('self',))
_annotation_label = {
'section_label': 'sft',
'lingo_label': 'ft',
'monad_label': 'ft',
'db_label': 'db',
}
_type_mapping = {
'string': 'string',
'ascii': 'string',
'integer': 'numeric&value="0" max="100000000"',
'enum': 'symbol',
'boolean': 'binary',
'reference': 'string',
}
_type_boolean = {
't': 'false',
'f': 'true',
}
laf_switches = set(('comment_local_deps',))
_file_types = collections.OrderedDict((
('f.hdr', '.hdr&xml'),
('f.primary.hdr', '.text.hdr&xml'),
('f.primary', '.txt&text'),
('f_monad.region', '_regions.xml&xml'),
('f_monad', '_monads.xml&xml&db&f_monad.region'),
('f_lingo', '_lingo.xml&xml&db&f_monad'),
('f_section', '_sections.xml&xml&db sft'),
('f_monad.*', '_monads.{{subpart}}.xml&xml&ft&f_monad'),
('f_lingo.*', '_lingo.{{subpart}}.xml&xml&ft&f_lingo'),
))
def flag(self, name): return getattr(self.args, name)
def __init__(self):
print('This is {} {}\n{}'.format(NAME, VERSION, APIREF))
strings = configparser.ConfigParser(inline_comment_prefixes=('#'))
script_dir = os.path.dirname(os.path.abspath(__file__))
home_dir = os.path.expanduser('~')
global_config_dir = "{}/{}".format(home_dir, DEFAULT_DATA_DIR)
global_config_path = "{}/{}".format(global_config_dir, MAIN_CFG)
local_config_path = MAIN_CFG
default_data_dir = global_config_dir
default_laf_dir = global_config_dir
config_data_dir = None
config_laf_dir = None
config_output_dir = None
the_config_path = None
for config_path in (local_config_path, global_config_path):
if os.path.exists(config_path): the_config_path = config_path
if the_config_path != None:
with open(the_config_path, "r", encoding="utf-8") as f: strings.read_file(f)
if 'locations' in strings:
if 'data_dir' in strings['locations']: config_data_dir = strings['locations']['data_dir']
if 'laf_dir' in strings['locations']: config_laf_dir = strings['locations']['laf_dir']
if 'output_dir' in strings['locations']: config_output_dir = strings['locations']['output_dir']
the_data_dir = config_data_dir or default_data_dir
the_laf_dir = config_laf_dir or the_data_dir
the_output_dir = config_output_dir
the_data_dir = \
the_data_dir.replace('.', cw_dir, 1) if the_data_dir.startswith('.') else the_data_dir.replace('~', home_dir, 1) if the_data_dir.startswith('~') else the_data_dir
the_laf_dir = \
the_laf_dir.replace('.', cw_dir, 1) if the_laf_dir.startswith('.') else the_laf_dir.replace('~', home_dir, 1) if the_laf_dir.startswith('~') else the_laf_dir
the_output_dir = \
the_output_dir.replace('.', cw_dir, 1) if the_output_dir.startswith('.') else the_output_dir.replace('~', home_dir, 1) if the_output_dir.startswith('~') else the_output_dir
sources = [os.path.basename(x) for x in glob.glob("{}/*".format(the_data_dir)) if os.path.isdir(x)]
self._myconfig['data_dir'] = the_data_dir
self._myconfig['home_dir'] = home_dir
self._myconfig['script_dir'] = script_dir
argsparser = argparse.ArgumentParser(description = 'Conversion of Emdros to LAF')
argsparser.add_argument(
'--source',
nargs = 1,
type = str,
choices = sources,
metavar = 'Source',
help = 'Source selection for conversion',
)
argsparser.add_argument(
'--parts',
nargs = '*',
type = str,
choices = ALL_PARTS + ['all', 'none'],
metavar = 'Kind',
help = 'task in conversion process',
)
argsparser.add_argument(
"--raw",
action = "store_true",
help = "retrieve raw data from Emdros",
)
argsparser.add_argument(
"--validate",
action = "store_true",
help = "validate genrated xml files against their schemas",
)
argsparser.add_argument(
"--fdecls-only",
dest = 'fdecls_only',
default = False,
action = "store_true",
help = "only generate feature declaration file, nothing else",
)
argsparser.add_argument(
"--limit",
dest = 'limit',
type = int,
metavar = 'Limit',
help = "limit to the first N monads",
)
self.args = argsparser.parse_args()
self.given_parts = collections.OrderedDict()
for arg in self.args.parts:
if arg == 'none' or self.args.fdecls_only:
for a in ALL_PARTS:
if a in self.given_parts: del self.given_parts[a]
elif arg == 'all':
for a in ALL_PARTS: self.given_parts[a] = True
else: self.given_parts[arg] = True
source = self.args.source[0]
self.env = dict((e, v.format(source=source, **self._myconfig)) for (e,v) in self._env_def.items())
with open(self.env['meta_info'], "r", encoding="utf-8") as f: strings.read_file(f)
self._metaconfig.update(strings['meta'] if 'meta' in strings else {})
self.meta = dict((e, v.format(source=source, **self._metaconfig).replace('\\n','\n')) for (e,v) in self._meta_def.items())
self._myconfig.update(self.env)
self.laf_templates = dict((e, (v[0].format(**self._myconfig), v[1])) for (e,v) in self._laf_templates.items())
self.laf = dict((e, v.format(**self._myconfig)) for (e,v) in self._laf.items())
self.xml = dict((e, v.format(**self._myconfig)) for (e,v) in self._xml.items())
self.parts = {}
for p in self._parts:
self.parts[p] = {}
self.parts[p] = dict((e, v.format(**self._myconfig)) for (e,v) in self._parts[p].items())
self.annotation_kind = dict((e, v.format(**self._myconfig)) for (e,v) in self._annotation_kind.items())
self.annotation_regions = dict((e, v.format(**self._myconfig)) for (e,v) in self._annotation_regions.items())
self.annotation_skip_object = dict((e, v.format(**self._myconfig)) for (e,v) in self._annotation_skip_object.items())
self.annotation_label = dict((e, v.format(**self._myconfig)) for (e,v) in self._annotation_label.items())
self.type_mapping = dict((e, v.format(**self._myconfig)) for (e,v) in self._type_mapping.items())
self.type_boolean = dict((e, v.format(**self._myconfig)) for (e,v) in self._type_boolean.items())
self.file_types = collections.OrderedDict((e, v.format(**self._myconfig)) for (e,v) in self._file_types.items())
|
|
import pytest, py
from _pytest import python as funcargs
class TestMetafunc:
def Metafunc(self, func):
# the unit tests of this class check if things work correctly
# on the funcarg level, so we don't need a full blown
# initiliazation
class FixtureInfo:
name2fixturedefs = None
def __init__(self, names):
self.names_closure = names
names = funcargs.getfuncargnames(func)
fixtureinfo = FixtureInfo(names)
return funcargs.Metafunc(func, fixtureinfo, None)
def test_no_funcargs(self, testdir):
def function(): pass
metafunc = self.Metafunc(function)
assert not metafunc.fixturenames
repr(metafunc._calls)
def test_function_basic(self):
def func(arg1, arg2="qwe"): pass
metafunc = self.Metafunc(func)
assert len(metafunc.fixturenames) == 1
assert 'arg1' in metafunc.fixturenames
assert metafunc.function is func
assert metafunc.cls is None
def test_addcall_no_args(self):
def func(arg1): pass
metafunc = self.Metafunc(func)
metafunc.addcall()
assert len(metafunc._calls) == 1
call = metafunc._calls[0]
assert call.id == "0"
assert not hasattr(call, 'param')
def test_addcall_id(self):
def func(arg1): pass
metafunc = self.Metafunc(func)
pytest.raises(ValueError, "metafunc.addcall(id=None)")
metafunc.addcall(id=1)
pytest.raises(ValueError, "metafunc.addcall(id=1)")
pytest.raises(ValueError, "metafunc.addcall(id='1')")
metafunc.addcall(id=2)
assert len(metafunc._calls) == 2
assert metafunc._calls[0].id == "1"
assert metafunc._calls[1].id == "2"
def test_addcall_param(self):
def func(arg1): pass
metafunc = self.Metafunc(func)
class obj: pass
metafunc.addcall(param=obj)
metafunc.addcall(param=obj)
metafunc.addcall(param=1)
assert len(metafunc._calls) == 3
assert metafunc._calls[0].getparam("arg1") == obj
assert metafunc._calls[1].getparam("arg1") == obj
assert metafunc._calls[2].getparam("arg1") == 1
def test_addcall_funcargs(self):
def func(x): pass
metafunc = self.Metafunc(func)
class obj: pass
metafunc.addcall(funcargs={"x": 2})
metafunc.addcall(funcargs={"x": 3})
pytest.raises(pytest.fail.Exception, "metafunc.addcall({'xyz': 0})")
assert len(metafunc._calls) == 2
assert metafunc._calls[0].funcargs == {'x': 2}
assert metafunc._calls[1].funcargs == {'x': 3}
assert not hasattr(metafunc._calls[1], 'param')
def test_parametrize_error(self):
def func(x, y): pass
metafunc = self.Metafunc(func)
metafunc.parametrize("x", [1,2])
pytest.raises(ValueError, lambda: metafunc.parametrize("x", [5,6]))
pytest.raises(ValueError, lambda: metafunc.parametrize("x", [5,6]))
metafunc.parametrize("y", [1,2])
pytest.raises(ValueError, lambda: metafunc.parametrize("y", [5,6]))
pytest.raises(ValueError, lambda: metafunc.parametrize("y", [5,6]))
def test_parametrize_and_id(self):
def func(x, y): pass
metafunc = self.Metafunc(func)
metafunc.parametrize("x", [1,2], ids=['basic', 'advanced'])
metafunc.parametrize("y", ["abc", "def"])
ids = [x.id for x in metafunc._calls]
assert ids == ["basic-abc", "basic-def", "advanced-abc", "advanced-def"]
def test_parametrize_with_wrong_number_of_ids(self, testdir):
def func(x, y): pass
metafunc = self.Metafunc(func)
pytest.raises(ValueError, lambda:
metafunc.parametrize("x", [1,2], ids=['basic']))
pytest.raises(ValueError, lambda:
metafunc.parametrize(("x","y"), [("abc", "def"),
("ghi", "jkl")], ids=["one"]))
def test_parametrize_with_userobjects(self):
def func(x, y): pass
metafunc = self.Metafunc(func)
class A:
pass
metafunc.parametrize("x", [A(), A()])
metafunc.parametrize("y", list("ab"))
assert metafunc._calls[0].id == "x0-a"
assert metafunc._calls[1].id == "x0-b"
assert metafunc._calls[2].id == "x1-a"
assert metafunc._calls[3].id == "x1-b"
@pytest.mark.issue250
def test_idmaker_autoname(self):
from _pytest.python import idmaker
result = idmaker(("a", "b"), [("string", 1.0),
("st-ring", 2.0)])
assert result == ["string-1.0", "st-ring-2.0"]
result = idmaker(("a", "b"), [(object(), 1.0),
(object(), object())])
assert result == ["a0-1.0", "a1-b1"]
# unicode mixing, issue250
result = idmaker((py.builtin._totext("a"), "b"), [({}, '\xc3\xb4')])
assert result == ['a0-\xc3\xb4']
def test_idmaker_native_strings(self):
from _pytest.python import idmaker
result = idmaker(("a", "b"), [(1.0, -1.1),
(2, -202),
("three", "three hundred"),
(True, False),
(None, None),
(list("six"), [66, 66]),
(set([7]), set("seven")),
(tuple("eight"), (8, -8, 8))
])
assert result == ["1.0--1.1",
"2--202",
"three-three hundred",
"True-False",
"None-None",
"a5-b5",
"a6-b6",
"a7-b7"]
@pytest.mark.issue351
def test_idmaker_idfn(self):
from _pytest.python import idmaker
def ids(val):
if isinstance(val, Exception):
return repr(val)
result = idmaker(("a", "b"), [(10.0, IndexError()),
(20, KeyError()),
("three", [1, 2, 3]),
], idfn=ids)
assert result == ["10.0-IndexError()",
"20-KeyError()",
"three-b2",
]
@pytest.mark.issue351
def test_idmaker_idfn_unique_names(self):
from _pytest.python import idmaker
def ids(val):
return 'a'
result = idmaker(("a", "b"), [(10.0, IndexError()),
(20, KeyError()),
("three", [1, 2, 3]),
], idfn=ids)
assert result == ["0a-a",
"1a-a",
"2a-a",
]
@pytest.mark.issue351
def test_idmaker_idfn_exception(self):
from _pytest.python import idmaker
def ids(val):
raise Exception("bad code")
result = idmaker(("a", "b"), [(10.0, IndexError()),
(20, KeyError()),
("three", [1, 2, 3]),
], idfn=ids)
assert result == ["10.0-b0",
"20-b1",
"three-b2",
]
def test_addcall_and_parametrize(self):
def func(x, y): pass
metafunc = self.Metafunc(func)
metafunc.addcall({'x': 1})
metafunc.parametrize('y', [2,3])
assert len(metafunc._calls) == 2
assert metafunc._calls[0].funcargs == {'x': 1, 'y': 2}
assert metafunc._calls[1].funcargs == {'x': 1, 'y': 3}
assert metafunc._calls[0].id == "0-2"
assert metafunc._calls[1].id == "0-3"
def test_parametrize_indirect(self):
def func(x, y): pass
metafunc = self.Metafunc(func)
metafunc.parametrize('x', [1], indirect=True)
metafunc.parametrize('y', [2,3], indirect=True)
metafunc.parametrize('unnamed', [1], indirect=True)
assert len(metafunc._calls) == 2
assert metafunc._calls[0].funcargs == {}
assert metafunc._calls[1].funcargs == {}
assert metafunc._calls[0].params == dict(x=1,y=2, unnamed=1)
assert metafunc._calls[1].params == dict(x=1,y=3, unnamed=1)
def test_addcalls_and_parametrize_indirect(self):
def func(x, y): pass
metafunc = self.Metafunc(func)
metafunc.addcall(param="123")
metafunc.parametrize('x', [1], indirect=True)
metafunc.parametrize('y', [2,3], indirect=True)
assert len(metafunc._calls) == 2
assert metafunc._calls[0].funcargs == {}
assert metafunc._calls[1].funcargs == {}
assert metafunc._calls[0].params == dict(x=1,y=2)
assert metafunc._calls[1].params == dict(x=1,y=3)
def test_parametrize_functional(self, testdir):
testdir.makepyfile("""
def pytest_generate_tests(metafunc):
metafunc.parametrize('x', [1,2], indirect=True)
metafunc.parametrize('y', [2])
def pytest_funcarg__x(request):
return request.param * 10
#def pytest_funcarg__y(request):
# return request.param
def test_simple(x,y):
assert x in (10,20)
assert y == 2
""")
result = testdir.runpytest("-v")
result.stdout.fnmatch_lines([
"*test_simple*1-2*",
"*test_simple*2-2*",
"*2 passed*",
])
def test_parametrize_onearg(self):
metafunc = self.Metafunc(lambda x: None)
metafunc.parametrize("x", [1,2])
assert len(metafunc._calls) == 2
assert metafunc._calls[0].funcargs == dict(x=1)
assert metafunc._calls[0].id == "1"
assert metafunc._calls[1].funcargs == dict(x=2)
assert metafunc._calls[1].id == "2"
def test_parametrize_onearg_indirect(self):
metafunc = self.Metafunc(lambda x: None)
metafunc.parametrize("x", [1,2], indirect=True)
assert metafunc._calls[0].params == dict(x=1)
assert metafunc._calls[0].id == "1"
assert metafunc._calls[1].params == dict(x=2)
assert metafunc._calls[1].id == "2"
def test_parametrize_twoargs(self):
metafunc = self.Metafunc(lambda x,y: None)
metafunc.parametrize(("x", "y"), [(1,2), (3,4)])
assert len(metafunc._calls) == 2
assert metafunc._calls[0].funcargs == dict(x=1, y=2)
assert metafunc._calls[0].id == "1-2"
assert metafunc._calls[1].funcargs == dict(x=3, y=4)
assert metafunc._calls[1].id == "3-4"
def test_parametrize_multiple_times(self, testdir):
testdir.makepyfile("""
import pytest
pytestmark = pytest.mark.parametrize("x", [1,2])
def test_func(x):
assert 0, x
class TestClass:
pytestmark = pytest.mark.parametrize("y", [3,4])
def test_meth(self, x, y):
assert 0, x
""")
result = testdir.runpytest()
assert result.ret == 1
result.assert_outcomes(failed=6)
def test_parametrize_CSV(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.parametrize("x, y,", [(1,2), (2,3)])
def test_func(x, y):
assert x+1 == y
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=2)
def test_parametrize_class_scenarios(self, testdir):
testdir.makepyfile("""
# same as doc/en/example/parametrize scenario example
def pytest_generate_tests(metafunc):
idlist = []
argvalues = []
for scenario in metafunc.cls.scenarios:
idlist.append(scenario[0])
items = scenario[1].items()
argnames = [x[0] for x in items]
argvalues.append(([x[1] for x in items]))
metafunc.parametrize(argnames, argvalues, ids=idlist, scope="class")
class Test(object):
scenarios = [['1', {'arg': {1: 2}, "arg2": "value2"}],
['2', {'arg':'value2', "arg2": "value2"}]]
def test_1(self, arg, arg2):
pass
def test_2(self, arg2, arg):
pass
def test_3(self, arg, arg2):
pass
""")
result = testdir.runpytest("-v")
assert result.ret == 0
result.stdout.fnmatch_lines("""
*test_1*1*
*test_2*1*
*test_3*1*
*test_1*2*
*test_2*2*
*test_3*2*
*6 passed*
""")
class TestMetafuncFunctional:
def test_attributes(self, testdir):
p = testdir.makepyfile("""
# assumes that generate/provide runs in the same process
import py, pytest
def pytest_generate_tests(metafunc):
metafunc.addcall(param=metafunc)
def pytest_funcarg__metafunc(request):
assert request._pyfuncitem._genid == "0"
return request.param
def test_function(metafunc, pytestconfig):
assert metafunc.config == pytestconfig
assert metafunc.module.__name__ == __name__
assert metafunc.function == test_function
assert metafunc.cls is None
class TestClass:
def test_method(self, metafunc, pytestconfig):
assert metafunc.config == pytestconfig
assert metafunc.module.__name__ == __name__
if py.std.sys.version_info > (3, 0):
unbound = TestClass.test_method
else:
unbound = TestClass.test_method.im_func
# XXX actually have an unbound test function here?
assert metafunc.function == unbound
assert metafunc.cls == TestClass
""")
result = testdir.runpytest(p, "-v")
result.assert_outcomes(passed=2)
def test_addcall_with_two_funcargs_generators(self, testdir):
testdir.makeconftest("""
def pytest_generate_tests(metafunc):
assert "arg1" in metafunc.fixturenames
metafunc.addcall(funcargs=dict(arg1=1, arg2=2))
""")
p = testdir.makepyfile("""
def pytest_generate_tests(metafunc):
metafunc.addcall(funcargs=dict(arg1=1, arg2=1))
class TestClass:
def test_myfunc(self, arg1, arg2):
assert arg1 == arg2
""")
result = testdir.runpytest("-v", p)
result.stdout.fnmatch_lines([
"*test_myfunc*0*PASS*",
"*test_myfunc*1*FAIL*",
"*1 failed, 1 passed*"
])
def test_two_functions(self, testdir):
p = testdir.makepyfile("""
def pytest_generate_tests(metafunc):
metafunc.addcall(param=10)
metafunc.addcall(param=20)
def pytest_funcarg__arg1(request):
return request.param
def test_func1(arg1):
assert arg1 == 10
def test_func2(arg1):
assert arg1 in (10, 20)
""")
result = testdir.runpytest("-v", p)
result.stdout.fnmatch_lines([
"*test_func1*0*PASS*",
"*test_func1*1*FAIL*",
"*test_func2*PASS*",
"*1 failed, 3 passed*"
])
def test_noself_in_method(self, testdir):
p = testdir.makepyfile("""
def pytest_generate_tests(metafunc):
assert 'xyz' not in metafunc.fixturenames
class TestHello:
def test_hello(xyz):
pass
""")
result = testdir.runpytest(p)
result.assert_outcomes(passed=1)
def test_generate_plugin_and_module(self, testdir):
testdir.makeconftest("""
def pytest_generate_tests(metafunc):
assert "arg1" in metafunc.fixturenames
metafunc.addcall(id="world", param=(2,100))
""")
p = testdir.makepyfile("""
def pytest_generate_tests(metafunc):
metafunc.addcall(param=(1,1), id="hello")
def pytest_funcarg__arg1(request):
return request.param[0]
def pytest_funcarg__arg2(request):
return request.param[1]
class TestClass:
def test_myfunc(self, arg1, arg2):
assert arg1 == arg2
""")
result = testdir.runpytest("-v", p)
result.stdout.fnmatch_lines([
"*test_myfunc*hello*PASS*",
"*test_myfunc*world*FAIL*",
"*1 failed, 1 passed*"
])
def test_generate_tests_in_class(self, testdir):
p = testdir.makepyfile("""
class TestClass:
def pytest_generate_tests(self, metafunc):
metafunc.addcall(funcargs={'hello': 'world'}, id="hello")
def test_myfunc(self, hello):
assert hello == "world"
""")
result = testdir.runpytest("-v", p)
result.stdout.fnmatch_lines([
"*test_myfunc*hello*PASS*",
"*1 passed*"
])
def test_two_functions_not_same_instance(self, testdir):
p = testdir.makepyfile("""
def pytest_generate_tests(metafunc):
metafunc.addcall({'arg1': 10})
metafunc.addcall({'arg1': 20})
class TestClass:
def test_func(self, arg1):
assert not hasattr(self, 'x')
self.x = 1
""")
result = testdir.runpytest("-v", p)
result.stdout.fnmatch_lines([
"*test_func*0*PASS*",
"*test_func*1*PASS*",
"*2 pass*",
])
def test_issue28_setup_method_in_generate_tests(self, testdir):
p = testdir.makepyfile("""
def pytest_generate_tests(metafunc):
metafunc.addcall({'arg1': 1})
class TestClass:
def test_method(self, arg1):
assert arg1 == self.val
def setup_method(self, func):
self.val = 1
""")
result = testdir.runpytest(p)
result.assert_outcomes(passed=1)
def test_parametrize_functional2(self, testdir):
testdir.makepyfile("""
def pytest_generate_tests(metafunc):
metafunc.parametrize("arg1", [1,2])
metafunc.parametrize("arg2", [4,5])
def test_hello(arg1, arg2):
assert 0, (arg1, arg2)
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*(1, 4)*",
"*(1, 5)*",
"*(2, 4)*",
"*(2, 5)*",
"*4 failed*",
])
def test_parametrize_and_inner_getfuncargvalue(self, testdir):
p = testdir.makepyfile("""
def pytest_generate_tests(metafunc):
metafunc.parametrize("arg1", [1], indirect=True)
metafunc.parametrize("arg2", [10], indirect=True)
def pytest_funcarg__arg1(request):
x = request.getfuncargvalue("arg2")
return x + request.param
def pytest_funcarg__arg2(request):
return request.param
def test_func1(arg1, arg2):
assert arg1 == 11
""")
result = testdir.runpytest("-v", p)
result.stdout.fnmatch_lines([
"*test_func1*1*PASS*",
"*1 passed*"
])
def test_parametrize_on_setup_arg(self, testdir):
p = testdir.makepyfile("""
def pytest_generate_tests(metafunc):
assert "arg1" in metafunc.fixturenames
metafunc.parametrize("arg1", [1], indirect=True)
def pytest_funcarg__arg1(request):
return request.param
def pytest_funcarg__arg2(request, arg1):
return 10 * arg1
def test_func(arg2):
assert arg2 == 10
""")
result = testdir.runpytest("-v", p)
result.stdout.fnmatch_lines([
"*test_func*1*PASS*",
"*1 passed*"
])
def test_parametrize_with_ids(self, testdir):
testdir.makepyfile("""
import pytest
def pytest_generate_tests(metafunc):
metafunc.parametrize(("a", "b"), [(1,1), (1,2)],
ids=["basic", "advanced"])
def test_function(a, b):
assert a == b
""")
result = testdir.runpytest("-v")
assert result.ret == 1
result.stdout.fnmatch_lines_random([
"*test_function*basic*PASSED",
"*test_function*advanced*FAILED",
])
def test_parametrize_without_ids(self, testdir):
testdir.makepyfile("""
import pytest
def pytest_generate_tests(metafunc):
metafunc.parametrize(("a", "b"),
[(1,object()), (1.3,object())])
def test_function(a, b):
assert 1
""")
result = testdir.runpytest("-v")
result.stdout.fnmatch_lines("""
*test_function*1-b0*
*test_function*1.3-b1*
""")
@pytest.mark.parametrize(("scope", "length"),
[("module", 2), ("function", 4)])
def test_parametrize_scope_overrides(self, testdir, scope, length):
testdir.makepyfile("""
import pytest
l = []
def pytest_generate_tests(metafunc):
if "arg" in metafunc.funcargnames:
metafunc.parametrize("arg", [1,2], indirect=True,
scope=%r)
def pytest_funcarg__arg(request):
l.append(request.param)
return request.param
def test_hello(arg):
assert arg in (1,2)
def test_world(arg):
assert arg in (1,2)
def test_checklength():
assert len(l) == %d
""" % (scope, length))
reprec = testdir.inline_run()
reprec.assertoutcome(passed=5)
def test_parametrize_issue323(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture(scope='module', params=range(966))
def foo(request):
return request.param
def test_it(foo):
pass
def test_it2(foo):
pass
""")
reprec = testdir.inline_run("--collect-only")
assert not reprec.getcalls("pytest_internalerror")
def test_usefixtures_seen_in_generate_tests(self, testdir):
testdir.makepyfile("""
import pytest
def pytest_generate_tests(metafunc):
assert "abc" in metafunc.fixturenames
metafunc.parametrize("abc", [1])
@pytest.mark.usefixtures("abc")
def test_function():
pass
""")
reprec = testdir.runpytest()
reprec.assert_outcomes(passed=1)
def test_generate_tests_only_done_in_subdir(self, testdir):
sub1 = testdir.mkpydir("sub1")
sub2 = testdir.mkpydir("sub2")
sub1.join("conftest.py").write(py.code.Source("""
def pytest_generate_tests(metafunc):
assert metafunc.function.__name__ == "test_1"
"""))
sub2.join("conftest.py").write(py.code.Source("""
def pytest_generate_tests(metafunc):
assert metafunc.function.__name__ == "test_2"
"""))
sub1.join("test_in_sub1.py").write("def test_1(): pass")
sub2.join("test_in_sub2.py").write("def test_2(): pass")
result = testdir.runpytest("-v", "-s", sub1, sub2, sub1)
result.assert_outcomes(passed=3)
def test_generate_same_function_names_issue403(self, testdir):
testdir.makepyfile("""
import pytest
def make_tests():
@pytest.mark.parametrize("x", range(2))
def test_foo(x):
pass
return test_foo
test_x = make_tests()
test_y = make_tests()
""")
reprec = testdir.runpytest()
reprec.assert_outcomes(passed=4)
@pytest.mark.issue463
def test_parameterize_misspelling(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.parameterize("x", range(2))
def test_foo(x):
pass
""")
reprec = testdir.inline_run('--collectonly')
failures = reprec.getfailures()
assert len(failures) == 1
expectederror = "MarkerError: test_foo has 'parameterize', spelling should be 'parametrize'"
assert expectederror in failures[0].longrepr.reprcrash.message
class TestMarkersWithParametrization:
pytestmark = pytest.mark.issue308
def test_simple_mark(self, testdir):
s = """
import pytest
@pytest.mark.foo
@pytest.mark.parametrize(("n", "expected"), [
(1, 2),
pytest.mark.bar((1, 3)),
(2, 3),
])
def test_increment(n, expected):
assert n + 1 == expected
"""
items = testdir.getitems(s)
assert len(items) == 3
for item in items:
assert 'foo' in item.keywords
assert 'bar' not in items[0].keywords
assert 'bar' in items[1].keywords
assert 'bar' not in items[2].keywords
def test_select_based_on_mark(self, testdir):
s = """
import pytest
@pytest.mark.parametrize(("n", "expected"), [
(1, 2),
pytest.mark.foo((2, 3)),
(3, 4),
])
def test_increment(n, expected):
assert n + 1 == expected
"""
testdir.makepyfile(s)
rec = testdir.inline_run("-m", 'foo')
passed, skipped, fail = rec.listoutcomes()
assert len(passed) == 1
assert len(skipped) == 0
assert len(fail) == 0
@pytest.mark.xfail(reason="is this important to support??")
def test_nested_marks(self, testdir):
s = """
import pytest
mastermark = pytest.mark.foo(pytest.mark.bar)
@pytest.mark.parametrize(("n", "expected"), [
(1, 2),
mastermark((1, 3)),
(2, 3),
])
def test_increment(n, expected):
assert n + 1 == expected
"""
items = testdir.getitems(s)
assert len(items) == 3
for mark in ['foo', 'bar']:
assert mark not in items[0].keywords
assert mark in items[1].keywords
assert mark not in items[2].keywords
def test_simple_xfail(self, testdir):
s = """
import pytest
@pytest.mark.parametrize(("n", "expected"), [
(1, 2),
pytest.mark.xfail((1, 3)),
(2, 3),
])
def test_increment(n, expected):
assert n + 1 == expected
"""
testdir.makepyfile(s)
reprec = testdir.inline_run()
# xfail is skip??
reprec.assertoutcome(passed=2, skipped=1)
def test_simple_xfail_single_argname(self, testdir):
s = """
import pytest
@pytest.mark.parametrize("n", [
2,
pytest.mark.xfail(3),
4,
])
def test_isEven(n):
assert n % 2 == 0
"""
testdir.makepyfile(s)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=2, skipped=1)
def test_xfail_with_arg(self, testdir):
s = """
import pytest
@pytest.mark.parametrize(("n", "expected"), [
(1, 2),
pytest.mark.xfail("True")((1, 3)),
(2, 3),
])
def test_increment(n, expected):
assert n + 1 == expected
"""
testdir.makepyfile(s)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=2, skipped=1)
def test_xfail_with_kwarg(self, testdir):
s = """
import pytest
@pytest.mark.parametrize(("n", "expected"), [
(1, 2),
pytest.mark.xfail(reason="some bug")((1, 3)),
(2, 3),
])
def test_increment(n, expected):
assert n + 1 == expected
"""
testdir.makepyfile(s)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=2, skipped=1)
def test_xfail_with_arg_and_kwarg(self, testdir):
s = """
import pytest
@pytest.mark.parametrize(("n", "expected"), [
(1, 2),
pytest.mark.xfail("True", reason="some bug")((1, 3)),
(2, 3),
])
def test_increment(n, expected):
assert n + 1 == expected
"""
testdir.makepyfile(s)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=2, skipped=1)
def test_xfail_passing_is_xpass(self, testdir):
s = """
import pytest
@pytest.mark.parametrize(("n", "expected"), [
(1, 2),
pytest.mark.xfail("sys.version > 0", reason="some bug")((2, 3)),
(3, 4),
])
def test_increment(n, expected):
assert n + 1 == expected
"""
testdir.makepyfile(s)
reprec = testdir.inline_run()
# xpass is fail, obviously :)
reprec.assertoutcome(passed=2, failed=1)
def test_parametrize_called_in_generate_tests(self, testdir):
s = """
import pytest
def pytest_generate_tests(metafunc):
passingTestData = [(1, 2),
(2, 3)]
failingTestData = [(1, 3),
(2, 2)]
testData = passingTestData + [pytest.mark.xfail(d)
for d in failingTestData]
metafunc.parametrize(("n", "expected"), testData)
def test_increment(n, expected):
assert n + 1 == expected
"""
testdir.makepyfile(s)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=2, skipped=2)
@pytest.mark.issue290
def test_parametrize_ID_generation_string_int_works(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture
def myfixture():
return 'example'
@pytest.mark.parametrize(
'limit', (0, '0'))
def test_limit(limit, myfixture):
return
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=2)
|
|
# coding=utf-8
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
from typing import Optional
from qrl.core import config
from qrl.core.misc import logger, db
from qrl.generated import qrl_pb2, qrlstateinfo_pb2
class State:
def __init__(self, my_db=None):
self._db = my_db
if not my_db:
self._db = db.DB() # generate db object here
self._tmp_state = None # Temporary State file which needs to be fetched during migration to new db
self._state_version = 1 # Change State Version, each time any change made to leveldb structure
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self._db is not None:
if self._db.db is not None:
del self._db.db
del self._db
self._db = None
@property
def state_version(self):
return self._state_version
@property
def batch(self):
return self._db.get_batch()
@property
def total_coin_supply(self):
try:
return int.from_bytes(self._db.get_raw(b'total_coin_supply'), byteorder='big', signed=False)
except KeyError:
return 0
def get_state_version(self) -> int:
try:
version = self._db.get_raw(b'state_version')
return int(version.decode())
except KeyError:
return 0
except Exception:
raise Exception("Exception while retrieving version")
def put_state_version(self):
try:
self._db.put_raw(b'state_version', str(self._state_version).encode())
except Exception:
raise Exception("Exception while Setting version")
def is_older_state_version(self):
current_state_version = self.get_state_version()
if current_state_version < self._state_version:
return True
def is_state_compatible(self) -> bool:
current_state_version = self.get_state_version()
if current_state_version > self._state_version:
logger.warning("You have a state with Version %s", current_state_version)
logger.warning("This node only supports State Version %s", self._state_version)
return False
elif self.is_older_state_version():
logger.warning("Old State Version Found %s", current_state_version)
return True
def get_mainchain_height(self) -> int:
try:
return int.from_bytes(self._db.get_raw(b'blockheight'), byteorder='big', signed=False)
except KeyError:
pass
except Exception as e:
logger.error('get_blockheight Exception %s', e)
return -1
def update_mainchain_height(self, height, batch):
self._db.put_raw(b'blockheight', height.to_bytes(8, byteorder='big', signed=False), batch)
def get_re_org_limit(self) -> int:
try:
return int.from_bytes(self._db.get_raw(b'reorg_limit'), byteorder='big', signed=False)
except KeyError:
return 0
except Exception as e:
logger.error('get_re_org_limit Exception %s', e)
return -1
def update_re_org_limit(self, height, batch):
reorg_limit = height - config.dev.reorg_limit
if reorg_limit <= 0:
return
current_reorg_limit = self.get_re_org_limit()
if reorg_limit <= current_reorg_limit:
return
self._db.put_raw(b'reorg_limit', reorg_limit.to_bytes(8, byteorder='big', signed=False), batch)
def get_address_is_used(self, address: bytes) -> bool:
# FIXME: Probably obsolete
try:
return self._db.get_raw(address)
except KeyError:
return False
except Exception as e:
# FIXME: Review
logger.error('Exception in address_used')
logger.exception(e)
raise
def write_batch(self, batch, sync=True):
self._db.write_batch(batch, sync)
#########################################
#########################################
#########################################
#########################################
#########################################
def _update_total_coin_supply(self, balance, batch):
self._db.put_raw(b'total_coin_supply',
(self.total_coin_supply + balance).to_bytes(8, byteorder='big', signed=False),
batch)
def _delete(self, key, batch):
self._db.delete(key, batch)
def put_dev_config_state(self, dev_config, batch):
self._db.put_raw(dev_config.current_state_key, dev_config.SerializeToString(), batch)
def get_dev_config_state(self, dev_config_state_key: bytes):
try:
data = self._db.get_raw(dev_config_state_key)
pbdata = qrl_pb2.DevConfig()
pbdata.ParseFromString(bytes(data))
return pbdata
except KeyError:
logger.debug('[get_dev_config_state] Dev Config not found')
except Exception as e:
logger.error('[get_dev_config_state] %s', e)
return None
def get_dev_config_current_state_key(self):
try:
return self._db.get_raw(b'dev_config_current_state_key')
except KeyError:
logger.debug('[get_dev_config_current_state_key] Dev Config not found')
except Exception as e:
logger.error('[get_dev_config_current_state_key] %s', e)
return None
def put_dev_config_current_state_key(self, dev_config_state_key: bytes, batch):
self._db.put_raw(b'dev_config_current_state_key', dev_config_state_key, batch)
def put_fork_state(self, fork_state: qrlstateinfo_pb2.ForkState, batch=None):
self._db.put_raw(b'fork_state', fork_state.SerializeToString(), batch)
def get_fork_state(self) -> Optional[qrlstateinfo_pb2.ForkState]:
try:
data = self._db.get_raw(b'fork_state')
fork_state = qrlstateinfo_pb2.ForkState()
fork_state.ParseFromString(bytes(data))
return fork_state
except KeyError:
return None
except Exception as e:
logger.error('Exception in get_fork_state')
logger.exception(e)
raise
def delete_fork_state(self, batch=None):
self._db.delete(b'fork_state', batch)
@staticmethod
def generate_token_key(address, token_txhash) -> bytes:
return b'token_' + address + b'_' + token_txhash
@staticmethod
def generate_slave_key(address, slave_pk) -> bytes:
return b'slave_' + address + b'_' + slave_pk
def get_slave_pk_access_type(self, address: bytes, slave_pk: bytes) -> qrl_pb2.SlaveMetadata:
slave_key = self.generate_slave_key(address, slave_pk)
try:
slave_metadata = qrl_pb2.SlaveMetadata()
slave_metadata.ParseFromString(self._db.get_raw(slave_key))
return slave_metadata
except KeyError:
pass
except Exception as e:
logger.error('[get_slave_pk_access_type] %s', e)
return None
def get_token(self, address: bytes, token_txhash: bytes) -> qrl_pb2.TokenBalance:
try:
token_balance = qrl_pb2.TokenBalance()
token_balance.ParseFromString(self._db.get_raw(self.generate_token_key(address, token_txhash)))
return token_balance
except KeyError:
pass
except Exception as e:
logger.error('[get_token] %s', e)
return None
|
|
import json
import math
import pdb
import random
import numpy as np
import sys
import time
sys.path.extend(['.', '..'])
from itertools import chain, repeat, izip
from collections import defaultdict
from operator import mul, and_, or_
from scipy.optimize import fsolve
from matplotlib import pyplot as plt
from ..util import *
from ..bottomup.bounding_box import *
from ..bottomup.cluster import *
from frontier import *
from merger import Merger
_logger = get_logger()
class BaseRangeMerger(Merger):
"""
Merges clusters by computing the frontier of all the clusters and
iteratively expands the frontier until convergence.
Accepts all of the clusters from the partitioner at once.
"""
def __init__(self, *args, **kwargs):
Merger.__init__(self, *args, **kwargs)
self.learner_hash = kwargs.get('learner_hash', '')
self.c_range = kwargs.get('c_range', [0.01, 0.7])
self.get_frontier = Frontier(self.c_range, 0.)
self.CACHENAME = './dbwipes.rangemerger.cache'
self.i = 0
#
# per execution state
#
# dim -> list of value subsets that were not on frontier
# e.g., subregion -> [ (SR1, SR2), (SR3), ... ]
self.rejected_disc_vals = defaultdict(list)
# (dim, direction) -> range it has expanded along
self.rejected_cont_vals = defaultdict(set)
def __hash__(self):
return hash((self.learner_hash, tuple(self.c_range)))
def setup_stats(self, clusters):
"""
computes error bounds and the minimum volume of a 0-volume cluster
adds data structures to cluster object
"""
Merger.setup_stats(self, clusters)
for c in clusters:
c.inf_func = self.learner.create_inf_func(c)
c.c_range = list(self.c_range)
c.inf_range = [c.inf_func(c.c_range[0]), c.inf_func(c.c_range[1])]
def print_clusters(self, clusters):
print "\n".join(map(str, clusters))
def get_variety_frontier(self, clusters):
seen = set()
ret = []
cluster_bcs = [list(c.inf_state[1]) for c in clusters]
while True:
for c in clusters:
c.c_range = list(self.c_range)
frontier,_ = self.get_frontier(clusters)
frontier.difference_update(seen)
print "variety"
self.print_clusters(frontier)
if not frontier: break
ret.append(frontier)
seen.update(frontier)
for c in clusters:
for f in frontier:
for idx in xrange(len(c.inf_state[1])):
c.inf_state[1][idx] = max(0, c.inf_state[1][idx] - 3*f.inf_state[1][idx])
for idx, c in enumerate(clusters):
c.inf_state = list(c.inf_state)
c.inf_state[1] = cluster_bcs[idx]
return ret, set(clusters).difference(seen)
@instrument
def __call__(self, clusters, **kwargs):
if not clusters:
return list(clusters)
_logger.debug("merging %d clusters", len(clusters))
_logger.debug("DEBUG = %s", self.DEBUG)
self.set_params(**kwargs)
self.setup_stats(clusters)
self.rejected_disc_vals = defaultdict(list)
self.rejected_cont_vals = defaultdict(set)
self.learner.update_status("expanding frontier: indexing partitions")
self.adj_graph = self.make_adjacency(clusters, self.partitions_complete)
if self.DEBUG:
self.renderer = InfRenderer('/tmp/infs.pdf', c_range=self.c_range)
self.renderer.plot_inf_curves(clusters)
self.renderer.set_title("inf 0")
self.learner.update_status("expanding frontier, iter 0")
frontiers, removed_clusters = self.get_variety_frontier(clusters)
_logger.debug("%d clusters in frontier", sum(map(len, frontiers)))
clusters.sort(key=lambda c: c.inf_func(0.1), reverse=True)
self.print_clusters(chain(*frontiers))
print
self.print_clusters(clusters[:5])
if self.DEBUG:
self.boxrenderer = JitteredClusterRenderer('/tmp/boxes.pdf')
self.boxrenderer.plot_clusters(removed_clusters, color="grey")
for frontier in frontiers:
self.boxrenderer.plot_clusters(frontier, color="red")
self.renderer.new_page()
self.renderer.plot_inf_curves(clusters)
print self.get_frontier.stats.items()
print self.get_frontier.heap.stats.items()
start = time.time()
iteridx = 1
seen = set()
clusters_set = set()
for frontier in frontiers:
for c in frontier:
c.c_range = list(self.c_range)
versionid = 0
while len(frontier) > 0:
self.learner.update_status("expanding frontier, iter %d" % iteridx)
iteridx += 1
if self.DEBUG:
self.renderer.new_page()
self.renderer.plot_inf_curves(frontier, color='grey', alpha=.2)
self.renderer.set_title("frontier expansion %d" % iteridx)
print "frontier"
self.print_clusters(frontier)
new_clusters, removed_clusters = self.expand_frontier(frontier, seen, None)
if self.DEBUG:
self.renderer.plot_inf_curves(removed_clusters, color="grey")
self.renderer.plot_inf_curves(new_clusters)
print "\nnew clusters"
self.print_clusters(new_clusters)
if (not new_clusters.difference(frontier)) or (time.time() - start) > 60:
clusters_set.update(removed_clusters)
clusters_set.update(new_clusters)
break
self.adj_graph.remove(removed_clusters, versionid)
self.adj_graph.insert(new_clusters, versionid)
self.adj_graph.sync()
versionid += 1
frontier = new_clusters
print "returning %d clusters total!" % len(clusters_set)
if self.DEBUG:
self.renderer.new_page()
self.renderer.plot_inf_curves(clusters_set)
self.renderer.set_title("final frontier")
self.renderer.close()
self.boxrenderer.close()
self.print_clusters(clusters_set)
self.learner.merge_stats(self.get_frontier.stats, 'frontier_')
self.learner.merge_stats(self.get_frontier.heap.stats, 'inter_heap_')
return list(clusters_set)
@instrument
def expand_frontier(self, frontier, seen, version=None):
"""
Return (newclusters, rmclusters)
"""
newclusters = set(frontier)
for cluster in frontier:
merges = self.expand(cluster, seen, version=None)
#for c in rms: c.c_range = list(self.c_range)
#newclusters.update(rms)
newclusters.update(merges)
for cluster in newclusters:
cluster.c_range = list(self.c_range)
return self.get_frontier(newclusters)
@instrument
def dim_merge(self, cluster, dim, dec=None, inc=None, skip=None):
if dec is not None:
if round(dec, 1) in self.rejected_cont_vals[(dim, 'dec')]:
return None
if inc is not None:
if round(inc, 1) in self.rejected_cont_vals[(dim, 'inc')]:
return None
merged = Merger.dim_merge(self, cluster, dim, dec, inc, skip)
if merged:
merged.c_range = list(self.c_range)
merged.inf_func = self.learner.create_inf_func(merged)
return merged
@instrument
def disc_merge(self, cluster, dim, vals, skip=None):
# reject if union is a superset of anything in
# rejected_disc_vals
vals = set(vals)
vals.update(cluster.discretes.get(dim, ()))
for subset in self.rejected_disc_vals[dim]:
if vals.issuperset(subset):
return None
merged = Merger.disc_merge(self, cluster, dim, vals, skip)
if merged:
merged.c_range = list(self.c_range)
merged.inf_func = self.learner.create_inf_func(merged)
return merged
@instrument
def expand(self, c, seen, version=None):
"""
Returns a frontier of clusters expanded from c that
are possible optimals in _some_ c range
XXX: optimization could be a minimum c range a cluster must be
a candidate over
"""
_logger.debug("expand\t%s", str(c.rule)[:100])
start = time.time()
self.rejected_disc_vals = defaultdict(list)
self.rejected_cont_vals = defaultdict(set)
cur_bests = set([c])
ret = set()
rms = set()
all_merges = set()
self.i = 0
while cur_bests:
if not self.DEBUG and (time.time() - start) > 30:
ret.update(cur_bests)
break
cur = cur_bests.pop()
if cur.bound_hash in seen:
_logger.debug("seen \t%s", str(cur.rule)[:100])
continue
seen.add(cur.bound_hash)
_logger.debug("expand\tsub\t%.3f-%.3f\t%s", cur.c_range[0], cur.c_range[1], str(cur.rule)[:100])
cur_seen = set()
dim_to_bests = self.greedy_expansion(cur, cur_seen, version=version)
seen.update(cur_seen)
merges = list()
map(merges.extend, dim_to_bests.values())
all_merges.update(map(hash, merges))
merges = set(filter(lambda c: r_vol(c.c_range), merges))
if cur in merges:
_logger.debug("cur added to bests")
ret.add(cur)
merges.remove(cur)
if len(merges) is 0:
_logger.debug("# better expanded = 0")
ret.add(cur)
continue
combined = set(cur_bests)
combined.update(merges)
frontier, losers = self.get_frontier(combined)
if self.DEBUG:
self.boxrenderer.new_page()
self.boxrenderer.plot_clusters([l for l in losers if l not in merges], color="grey", alpha=0.2)
self.boxrenderer.plot_clusters([m for m in merges if m in losers], color='blue', alpha=0.2)
self.boxrenderer.plot_clusters([m for m in merges if m in frontier], color='red', alpha=0.3)
self.boxrenderer.plot_clusters([cur], color="black", alpha=0.7)
self.boxrenderer.set_title("expand iter %d\n%s" % (self.i, str(c.rule)))
self.i += 1
if cur_bests.issuperset(frontier):
_logger.debug("frontier subset of curbests")
ret.add(cur)
continue
for merged in frontier.difference(cur_bests):
rms.update(merged.parents)
self.adj_graph.insert(cur_bests)
seen.update([c.bound_hash for c in rms])
cur_bests = frontier
cur_bests.difference_update([cur])
_logger.debug("\t%d in frontier", len(cur_bests))
return ret
ret, more_rms = self.get_frontier(ret)
rms.update(more_rms)
return ret, rms
class RangeMerger(BaseRangeMerger):
@instrument
def greedy_expansion(self, cur, seen, version=None):
expansions = self.expand_candidates(cur, seen, version=version)
dim_to_bests = defaultdict(set)
for dim, direction, g in expansions:
dim_bests = self.expand_dim(cur, dim, direction, g, seen)
#if cur in dim_bests: dim_bests.remove(cur)
dim_to_bests[(dim, direction)] = dim_bests
# cross product between inc and dec of each dimension
return dim_to_bests
@instrument
def expand_dim(self, cur, dim, direction, g, seen):
"""
Args
dim: if direction == 'disc', dim is the attr name
else dim is the index into cluster.cols
"""
if True or direction == 'disc':
cands = []
bests = set([cur])
prev_card = None
cands = list(g)
bests.update(cands)
bests, _ = self.get_frontier(bests)
for cand in cands:
if cand not in bests:
seen.add(cand.bound_hash)
else:
cands = []
bests = set([cur])
for cand in g:
cands.append(cand)
bests, _ = self.get_frontier(bests.union(set([cand])))
if cand not in bests:
seen.add(cand.bound_hash)
break
bests = [cur] + cands
bests = list(bests)
# update rejection state
for c in cands:
if c in bests: continue
if direction == 'disc':
self.rejected_disc_vals[dim].append(set(c.discretes[dim]))
if direction == 'inc':
v = c.bbox[1][dim]
self.rejected_cont_vals[(dim, direction)].add(v)
if direction == 'dec':
v = c.bbox[0][dim]
self.rejected_cont_vals[(dim, direction)].add(v)
if self.DEBUG:
if direction == 'disc':
name = dim[:10]
s = ','.join([str(len(c.discretes[dim])) for c in bests])
ss = ','.join([str(c.discretes[dim]) for c in cands])
if 'subregion' in str(cur):
for c in cands:
isbest = False
if c in bests:
isbest = True
_logger.debug('\tbest? %s\tcand:\t%s', isbest, c)
else:
name = cur.cols[dim][:10]
if bests:
if direction == 'inc':
s = ','.join(["%.4f" % c.bbox[1][dim] for c in bests])
s = '%.4f - %s' % (bests[0].bbox[0][dim], s)
else:
s = ','.join(["%.4f" % c.bbox[0][dim] for c in bests])
s = '%s - %.4f' % (s, bests[0].bbox[1][dim])
else:
s = '---'
ss = ''
_logger.debug("\t%s\t%s\t%d bests\t%d candidates", name, direction, len(bests), len(cands))
_logger.debug("\tbests\t\t\t\t%s", s)
_logger.debug("\tcands\t\t\t\t%s", ss)
return bests
class RangeMerger2(BaseRangeMerger):
"""
Variation of the above merger that merges clusters one axis at a time.
The merging algorithm works as follows for a given cluster C:
* find C's neighbors
* merge their rules with C's
* for each attribute, find all of the values the merged clusters expand to
attribute i -> [v1, v2,.... ]
* expand each attribute _and compute its influence_
in increasing order from smallest value to largest
* don't try larger values if a smaller value doesn't result in a better cluster
"""
def pick_expansion_vals(self, cluster, dim, direction, vals):
if len(vals) == 0: return vals
if direction == 'disc':
return vals
return np.random.choice(vals, min(len(vals), 6), replace=False)
vals = random.sample(vals, min(4, len(vals)))
vals.sort(reverse=(direction == 'dec'))
if direction == 'inc':
baseval = cluster.bbox[1][dim]
else:
baseval = cluster.bbox[0][dim]
rng = self.learner.cont_dists[cluster.cols[dim]]
min_step = r_vol([rng.min, rng.max]) * 0.005
vals = filter(lambda v: abs(v-baseval) > min_step, vals)
if len(vals) <= 4:
return vals
vals = random.sample(vals, 4)
return vals
@instrument
def greedy_expansion(self, cluster, seen, version=None):
curset = set([cluster])
for dim, direction, vals in self.dims_to_expand(cluster, seen, version=version):
if self.DEBUG:
self.boxrenderer.new_page()
self.boxrenderer.plot_clusters(curset, color='blue', alpha=0.3)
_logger.debug("%s\t%s\t%s", dim, direction, str(vals))
tmp = set()
realvals = self.pick_expansion_vals(cluster, dim, direction, vals)
if direction != 'disc':
_logger.debug("# to expand\t%s\t%s\t%d -> %d\t%s -> %s",
cluster.cols[dim][:10], direction, len(vals),
len(realvals), str(vals), str(realvals))
if direction == 'inc':
for inc in realvals:
tmp.update([self.dim_merge(c, dim, None, inc, seen) for c in curset])
elif direction == 'dec':
for dec in realvals:
tmp.update([self.dim_merge(c, dim, dec, None, seen) for c in curset])
else:
for disc in realvals:
tmp.update([self.disc_merge(c, dim, disc) for c in curset])
tmp = filter(bool, tmp)
_logger.debug("# actual expansions\t%d using %d of %d vals\t%d in curset", len(tmp), len(realvals), len(vals), len(curset))
if not tmp:
continue
for c in curset:
c.c_range = list(self.c_range)
tmp.extend(curset)
curset,_ = self.get_frontier(tmp)
seen.update([c.bound_hash for c in _])
# update rejection state
for v in vals:
if direction == 'disc':
self.rejected_disc_vals[dim].append(set(v))#c.discretes[dim]))
if direction == 'inc':
#v = c.bbox[1][dim]
self.rejected_cont_vals[(dim, direction)].add(round(v, 1))
if direction == 'dec':
#v = c.bbox[0][dim]
self.rejected_cont_vals[(dim, direction)].add(round(v, 1))
if self.DEBUG:
self.boxrenderer.plot_clusters(curset, color='red', alpha=0.3)
self.boxrenderer.plot_clusters(_, color='grey', alpha=0.2)
self.boxrenderer.set_title("greedy expansion\n%s" % cluster)
ret = { 'dim': curset }
return ret
|
|
#!/usr/bin/python2.5
# Copyright (C) 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
An example application that uses the transitfeed module.
You must provide a Google Maps API key.
"""
import BaseHTTPServer, sys, urlparse
import bisect
from gtfsscheduleviewer.marey_graph import MareyGraph
import gtfsscheduleviewer
import mimetypes
import os.path
import re
import signal
import simplejson
import socket
import time
import transitfeed
from transitfeed import util
import urllib
# By default Windows kills Python with Ctrl+Break. Instead make Ctrl+Break
# raise a KeyboardInterrupt.
if hasattr(signal, 'SIGBREAK'):
signal.signal(signal.SIGBREAK, signal.default_int_handler)
mimetypes.add_type('text/plain', '.vbs')
class ResultEncoder(simplejson.JSONEncoder):
def default(self, obj):
try:
iterable = iter(obj)
except TypeError:
pass
else:
return list(iterable)
return simplejson.JSONEncoder.default(self, obj)
# Code taken from
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/425210/index_txt
# An alternate approach is shown at
# http://mail.python.org/pipermail/python-list/2003-July/212751.html
# but it requires multiple threads. A sqlite object can only be used from one
# thread.
class StoppableHTTPServer(BaseHTTPServer.HTTPServer):
def server_bind(self):
BaseHTTPServer.HTTPServer.server_bind(self)
self.socket.settimeout(1)
self._run = True
def get_request(self):
while self._run:
try:
sock, addr = self.socket.accept()
sock.settimeout(None)
return (sock, addr)
except socket.timeout:
pass
def stop(self):
self._run = False
def serve(self):
while self._run:
self.handle_request()
def StopToTuple(stop):
"""Return tuple as expected by javascript function addStopMarkerFromList"""
return (stop.stop_id, stop.stop_name, float(stop.stop_lat),
float(stop.stop_lon), stop.location_type)
class ScheduleRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
scheme, host, path, x, params, fragment = urlparse.urlparse(self.path)
parsed_params = {}
for k in params.split('&'):
k = urllib.unquote(k)
if '=' in k:
k, v = k.split('=', 1)
parsed_params[k] = unicode(v, 'utf8')
else:
parsed_params[k] = ''
if path == '/':
return self.handle_GET_home()
m = re.match(r'/json/([a-z]{1,64})', path)
if m:
handler_name = 'handle_json_GET_%s' % m.group(1)
handler = getattr(self, handler_name, None)
if callable(handler):
return self.handle_json_wrapper_GET(handler, parsed_params)
# Restrict allowable file names to prevent relative path attacks etc
m = re.match(r'/file/([a-z0-9_-]{1,64}\.?[a-z0-9_-]{1,64})$', path)
if m and m.group(1):
try:
f, mime_type = self.OpenFile(m.group(1))
return self.handle_static_file_GET(f, mime_type)
except IOError, e:
print "Error: unable to open %s" % m.group(1)
# Ignore and treat as 404
m = re.match(r'/([a-z]{1,64})', path)
if m:
handler_name = 'handle_GET_%s' % m.group(1)
handler = getattr(self, handler_name, None)
if callable(handler):
return handler(parsed_params)
return self.handle_GET_default(parsed_params, path)
def OpenFile(self, filename):
"""Try to open filename in the static files directory of this server.
Return a tuple (file object, string mime_type) or raise an exception."""
(mime_type, encoding) = mimetypes.guess_type(filename)
assert mime_type
# A crude guess of when we should use binary mode. Without it non-unix
# platforms may corrupt binary files.
if mime_type.startswith('text/'):
mode = 'r'
else:
mode = 'rb'
return open(os.path.join(self.server.file_dir, filename), mode), mime_type
def handle_GET_default(self, parsed_params, path):
self.send_error(404)
def handle_static_file_GET(self, fh, mime_type):
content = fh.read()
self.send_response(200)
self.send_header('Content-Type', mime_type)
self.send_header('Content-Length', str(len(content)))
self.end_headers()
self.wfile.write(content)
def AllowEditMode(self):
return False
def handle_GET_home(self):
schedule = self.server.schedule
(min_lat, min_lon, max_lat, max_lon) = schedule.GetStopBoundingBox()
forbid_editing = ('true', 'false')[self.AllowEditMode()]
agency = ', '.join(a.agency_name for a in schedule.GetAgencyList()).encode('utf-8')
key = self.server.key
host = self.server.host
# A very simple template system. For a fixed set of values replace [xxx]
# with the value of local variable xxx
f, _ = self.OpenFile('index.html')
content = f.read()
for v in ('agency', 'min_lat', 'min_lon', 'max_lat', 'max_lon', 'key',
'host', 'forbid_editing'):
content = content.replace('[%s]' % v, str(locals()[v]))
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', str(len(content)))
self.end_headers()
self.wfile.write(content)
def handle_json_GET_routepatterns(self, params):
"""Given a route_id generate a list of patterns of the route. For each
pattern include some basic information and a few sample trips."""
schedule = self.server.schedule
route = schedule.GetRoute(params.get('route', None))
if not route:
self.send_error(404)
return
time = int(params.get('time', 0))
date = params.get('date', "")
sample_size = 3 # For each pattern return the start time for this many trips
pattern_id_trip_dict = route.GetPatternIdTripDict()
patterns = []
for pattern_id, trips in pattern_id_trip_dict.items():
time_stops = trips[0].GetTimeStops()
if not time_stops:
continue
has_non_zero_trip_type = False;
# Iterating over a copy so we can remove from trips inside the loop
trips_with_service = []
for trip in trips:
service_id = trip.service_id
service_period = schedule.GetServicePeriod(service_id)
if date and not service_period.IsActiveOn(date):
continue
trips_with_service.append(trip)
if trip['trip_type'] and trip['trip_type'] != '0':
has_non_zero_trip_type = True
# We're only interested in the trips that do run on the specified date
trips = trips_with_service
name = u'%s to %s, %d stops' % (time_stops[0][2].stop_name, time_stops[-1][2].stop_name, len(time_stops))
transitfeed.SortListOfTripByTime(trips)
num_trips = len(trips)
if num_trips <= sample_size:
start_sample_index = 0
num_after_sample = 0
else:
# Will return sample_size trips that start after the 'time' param.
# Linear search because I couldn't find a built-in way to do a binary
# search with a custom key.
start_sample_index = len(trips)
for i, trip in enumerate(trips):
if trip.GetStartTime() >= time:
start_sample_index = i
break
num_after_sample = num_trips - (start_sample_index + sample_size)
if num_after_sample < 0:
# Less than sample_size trips start after 'time' so return all the
# last sample_size trips.
num_after_sample = 0
start_sample_index = num_trips - sample_size
sample = []
for t in trips[start_sample_index:start_sample_index + sample_size]:
sample.append( (t.GetStartTime(), t.trip_id) )
patterns.append((name, pattern_id, start_sample_index, sample,
num_after_sample, (0,1)[has_non_zero_trip_type]))
patterns.sort()
return patterns
def handle_json_wrapper_GET(self, handler, parsed_params):
"""Call handler and output the return value in JSON."""
schedule = self.server.schedule
result = handler(parsed_params)
content = ResultEncoder().encode(result)
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.send_header('Content-Length', str(len(content)))
self.end_headers()
self.wfile.write(content)
def handle_json_GET_routes(self, params):
"""Return a list of all routes."""
schedule = self.server.schedule
result = []
for r in schedule.GetRouteList():
result.append( (r.route_id, r.route_short_name, r.route_long_name) )
result.sort(key = lambda x: x[1:3])
return result
def handle_json_GET_routerow(self, params):
schedule = self.server.schedule
route = schedule.GetRoute(params.get('route', None))
return [transitfeed.Route._FIELD_NAMES, route.GetFieldValuesTuple()]
def handle_json_GET_triprows(self, params):
"""Return a list of rows from the feed file that are related to this
trip."""
schedule = self.server.schedule
try:
trip = schedule.GetTrip(params.get('trip', None))
except KeyError:
# if a non-existent trip is searched for, the return nothing
return
route = schedule.GetRoute(trip.route_id)
trip_row = dict(trip.iteritems())
route_row = dict(route.iteritems())
return [['trips.txt', trip_row], ['routes.txt', route_row]]
def handle_json_GET_tripstoptimes(self, params):
schedule = self.server.schedule
try:
trip = schedule.GetTrip(params.get('trip'))
except KeyError:
# if a non-existent trip is searched for, the return nothing
return
time_stops = trip.GetTimeStops()
stops = []
arrival_times = []
departure_times = []
for arr,dep,stop in time_stops:
stops.append(StopToTuple(stop))
arrival_times.append(arr)
departure_times.append(dep)
return [stops, arrival_times, departure_times]
def handle_json_GET_tripshape(self, params):
schedule = self.server.schedule
try:
trip = schedule.GetTrip(params.get('trip'))
except KeyError:
# if a non-existent trip is searched for, the return nothing
return
points = []
if trip.shape_id:
shape = schedule.GetShape(trip.shape_id)
for (lat, lon, dist) in shape.points:
points.append((lat, lon))
else:
time_stops = trip.GetTimeStops()
for arr,dep,stop in time_stops:
points.append((stop.stop_lat, stop.stop_lon))
return points
def handle_json_GET_neareststops(self, params):
"""Return a list of the nearest 'limit' stops to 'lat', 'lon'"""
schedule = self.server.schedule
lat = float(params.get('lat'))
lon = float(params.get('lon'))
limit = int(params.get('limit'))
stops = schedule.GetNearestStops(lat=lat, lon=lon, n=limit)
return [StopToTuple(s) for s in stops]
def handle_json_GET_boundboxstops(self, params):
"""Return a list of up to 'limit' stops within bounding box with 'n','e'
and 's','w' in the NE and SW corners. Does not handle boxes crossing
longitude line 180."""
schedule = self.server.schedule
n = float(params.get('n'))
e = float(params.get('e'))
s = float(params.get('s'))
w = float(params.get('w'))
limit = int(params.get('limit'))
stops = schedule.GetStopsInBoundingBox(north=n, east=e, south=s, west=w, n=limit)
return [StopToTuple(s) for s in stops]
def handle_json_GET_stopsearch(self, params):
schedule = self.server.schedule
query = params.get('q', None).lower()
matches = []
for s in schedule.GetStopList():
if s.stop_id.lower().find(query) != -1 or s.stop_name.lower().find(query) != -1:
matches.append(StopToTuple(s))
return matches
def handle_json_GET_stoptrips(self, params):
"""Given a stop_id and time in seconds since midnight return the next
trips to visit the stop."""
schedule = self.server.schedule
stop = schedule.GetStop(params.get('stop', None))
time = int(params.get('time', 0))
date = params.get('date', "")
time_trips = stop.GetStopTimeTrips(schedule)
time_trips.sort() # OPT: use bisect.insort to make this O(N*ln(N)) -> O(N)
# Keep the first 5 after param 'time'.
# Need make a tuple to find correct bisect point
time_trips = time_trips[bisect.bisect_left(time_trips, (time, 0)):]
time_trips = time_trips[:5]
# TODO: combine times for a route to show next 2 departure times
result = []
for time, (trip, index), tp in time_trips:
service_id = trip.service_id
service_period = schedule.GetServicePeriod(service_id)
if date and not service_period.IsActiveOn(date):
continue
headsign = None
# Find the most recent headsign from the StopTime objects
for stoptime in trip.GetStopTimes()[index::-1]:
if stoptime.stop_headsign:
headsign = stoptime.stop_headsign
break
# If stop_headsign isn't found, look for a trip_headsign
if not headsign:
headsign = trip.trip_headsign
route = schedule.GetRoute(trip.route_id)
trip_name = ''
if route.route_short_name:
trip_name += route.route_short_name
if route.route_long_name:
if len(trip_name):
trip_name += " - "
trip_name += route.route_long_name
if headsign:
trip_name += " (Direction: %s)" % headsign
result.append((time, (trip.trip_id, trip_name, trip.service_id), tp))
return result
def handle_GET_ttablegraph(self,params):
"""Draw a Marey graph in SVG for a pattern (collection of trips in a route
that visit the same sequence of stops)."""
schedule = self.server.schedule
marey = MareyGraph()
trip = schedule.GetTrip(params.get('trip', None))
route = schedule.GetRoute(trip.route_id)
height = int(params.get('height', 300))
if not route:
print 'no such route'
self.send_error(404)
return
pattern_id_trip_dict = route.GetPatternIdTripDict()
pattern_id = trip.pattern_id
if pattern_id not in pattern_id_trip_dict:
print 'no pattern %s found in %s' % (pattern_id, pattern_id_trip_dict.keys())
self.send_error(404)
return
triplist = pattern_id_trip_dict[pattern_id]
pattern_start_time = min((t.GetStartTime() for t in triplist))
pattern_end_time = max((t.GetEndTime() for t in triplist))
marey.SetSpan(pattern_start_time,pattern_end_time)
marey.Draw(triplist[0].GetPattern(), triplist, height)
content = marey.Draw()
self.send_response(200)
self.send_header('Content-Type', 'image/svg+xml')
self.send_header('Content-Length', str(len(content)))
self.end_headers()
self.wfile.write(content)
def FindPy2ExeBase():
"""If this is running in py2exe return the install directory else return
None"""
# py2exe puts gtfsscheduleviewer in library.zip. For py2exe setup.py is
# configured to put the data next to library.zip.
windows_ending = gtfsscheduleviewer.__file__.find('\\library.zip\\')
if windows_ending != -1:
return transitfeed.__file__[:windows_ending]
else:
return None
def FindDefaultFileDir():
"""Return the path of the directory containing the static files. By default
the directory is called 'files'. The location depends on where setup.py put
it."""
base = FindPy2ExeBase()
if base:
return os.path.join(base, 'schedule_viewer_files')
else:
# For all other distributions 'files' is in the gtfsscheduleviewer
# directory.
base = os.path.dirname(gtfsscheduleviewer.__file__) # Strip __init__.py
return os.path.join(base, 'files')
def GetDefaultKeyFilePath():
"""In py2exe return absolute path of file in the base directory and in all
other distributions return relative path 'key.txt'"""
windows_base = FindPy2ExeBase()
if windows_base:
return os.path.join(windows_base, 'key.txt')
else:
return 'key.txt'
def main(RequestHandlerClass = ScheduleRequestHandler):
usage = \
'''%prog [options] [<input GTFS.zip>]
Runs a webserver that lets you explore a <input GTFS.zip> in your browser.
If <input GTFS.zip> is omited the filename is read from the console. Dragging
a file into the console may enter the filename.
For more information see
http://code.google.com/p/googletransitdatafeed/wiki/ScheduleViewer
'''
parser = util.OptionParserLongError(
usage=usage, version='%prog '+transitfeed.__version__)
parser.add_option('--feed_filename', '--feed', dest='feed_filename',
help='file name of feed to load')
parser.add_option('--key', dest='key',
help='Google Maps API key or the name '
'of a text file that contains an API key')
parser.add_option('--host', dest='host', help='Host name of Google Maps')
parser.add_option('--port', dest='port', type='int',
help='port on which to listen')
parser.add_option('--file_dir', dest='file_dir',
help='directory containing static files')
parser.add_option('-n', '--noprompt', action='store_false',
dest='manual_entry',
help='disable interactive prompts')
parser.set_defaults(port=8765,
host='maps.google.com',
file_dir=FindDefaultFileDir(),
manual_entry=True)
(options, args) = parser.parse_args()
if not os.path.isfile(os.path.join(options.file_dir, 'index.html')):
print "Can't find index.html with --file_dir=%s" % options.file_dir
exit(1)
if not options.feed_filename and len(args) == 1:
options.feed_filename = args[0]
if not options.feed_filename and options.manual_entry:
options.feed_filename = raw_input('Enter Feed Location: ').strip('"')
default_key_file = GetDefaultKeyFilePath()
if not options.key and os.path.isfile(default_key_file):
options.key = open(default_key_file).read().strip()
if options.key and os.path.isfile(options.key):
options.key = open(options.key).read().strip()
# This key is registered to gtfs.schedule.viewer@gmail.com
if not options.key:
options.key = 'AIzaSyAZTTRO6RC6LQyKCD3JODhxbClsZl95P9U'
schedule = transitfeed.Schedule(problem_reporter=transitfeed.ProblemReporter())
print 'Loading data from feed "%s"...' % options.feed_filename
print '(this may take a few minutes for larger cities)'
schedule.Load(options.feed_filename)
server = StoppableHTTPServer(server_address=('', options.port),
RequestHandlerClass=RequestHandlerClass)
server.key = options.key
server.schedule = schedule
server.file_dir = options.file_dir
server.host = options.host
server.feed_path = options.feed_filename
print ("To view, point your browser at http://localhost:%d/" %
(server.server_port))
server.serve_forever()
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python
"""An interface to the Earth Engine batch processing system.
Use the static methods on the Export class to create export tasks, call start()
on them to launch them, then poll status() to find out when they are finished.
The function styling uses camelCase to match the JavaScript names.
"""
# pylint: disable=g-bad-name
# pylint: disable=g-bad-import-order
import json
import six
from . import data
from . import ee_exception
from . import geometry
class Task(object):
"""A batch task that can be run on the EE batch processing system."""
def __init__(self, taskId, config=None):
"""Creates a Task with the given ID and configuration.
The constructor is not for public use. Instances can be obtained by:
- Calling the static method Task.list().
- Calling any of the methods on the Export static class.
- Unpickling a previously pickled Task object.
If you're looking for a task's status but don't need a full task object,
ee.data.getTaskStatus() may be appropriate.
Args:
taskId: The task ID, originally obtained through ee.data.newTaskId().
config: The task configuration dictionary. Only necessary if start()
will be called. Fields shared by all tasks are:
- type: The type of the task. One of entries in Task.Type.
- state: The state of the task. One of entries in Task.State.
- description: The name of the task, a freeform string.
- sourceURL: An optional URL for the script that generated the task.
Specific task types have other custom config fields.
"""
self.id = taskId
self.config = config and config.copy()
class Type(object):
EXPORT_IMAGE = 'EXPORT_IMAGE'
EXPORT_MAP = 'EXPORT_TILES'
EXPORT_TABLE = 'EXPORT_FEATURES'
EXPORT_VIDEO = 'EXPORT_VIDEO'
class State(object):
UNSUBMITTED = 'UNSUBMITTED'
READY = 'READY'
RUNNING = 'RUNNING'
COMPLETED = 'COMPLETED'
FAILED = 'FAILED'
CANCEL_REQUESTED = 'CANCEL_REQUESTED'
CANCELLED = 'CANCELLED'
# Export destinations.
class ExportDestination(object):
DRIVE = 'DRIVE'
GCS = 'GOOGLE_CLOUD_STORAGE'
ASSET = 'ASSET'
def start(self):
"""Starts the task. No-op for started tasks."""
if not self.config:
raise ee_exception.EEException(
'Task config must be specified for tasks to be started.')
data.startProcessing(self.id, self.config)
def status(self):
"""Fetches the current status of the task.
Returns:
A dictionary describing the current status of the task as it appears on
the EE server. Includes the following fields:
- state: One of the values in Task.State.
- creation_timestamp_ms: The Unix timestamp of when the task was created.
- update_timestamp_ms: The Unix timestamp of when the task last changed.
- output_url: URL of the output. Appears only if state is COMPLETED.
- error_message: Failure reason. Appears only if state is FAILED.
May also include other fields.
"""
result = data.getTaskStatus(self.id)[0]
if result['state'] == 'UNKNOWN': result['state'] = Task.State.UNSUBMITTED
return result
def active(self):
"""Returns whether the task is still running."""
return self.status()['state'] in (Task.State.READY,
Task.State.RUNNING,
Task.State.CANCEL_REQUESTED)
def cancel(self):
"""Cancels the task."""
data.cancelTask(self.id)
@staticmethod
def list():
"""Returns the tasks submitted to EE by the current user.
These include all currently running tasks as well as recently canceled or
failed tasks.
Returns:
A list of Tasks.
"""
statuses = data.getTaskList()
tasks = []
for status in statuses:
tasks.append(Task(status['id'], {
'type': status.get('task_type'),
'description': status.get('description'),
'state': status.get('state'),
}))
return tasks
def __repr__(self):
"""Returns a string representation of the task."""
if self.config:
return '<Task %(type)s: %(description)s (%(state)s)>' % self.config
else:
return '<Task "%s">' % self.id
class Export(object):
"""A class with static methods to start export tasks."""
def __init__(self):
"""Forbids class instantiation."""
raise AssertionError('This class cannot be instantiated.')
class image(object):
"""A static class with methods to start image export tasks."""
def __init__(self):
"""Forbids class instantiation."""
raise AssertionError('This class cannot be instantiated.')
def __new__(cls, image, description='myExportImageTask', config=None):
"""Creates a task to export an EE Image to Google Drive or Cloud Storage.
Args:
image: The image to be exported.
description: Human-readable name of the task.
config: A dictionary that will be copied and used as parameters
for the task:
- region: The lon,lat coordinates for a LinearRing or Polygon
specifying the region to export. Can be specified as a nested
lists of numbers or a serialized string. Defaults to the image's
region.
- scale: The resolution in meters per pixel.
Defaults to the native resolution of the image assset unless
a crs_transform is specified.
- maxPixels: The maximum allowed number of pixels in the exported
image. The task will fail if the exported region covers
more pixels in the specified projection. Defaults to 100,000,000.
- crs: The coordinate reference system of the exported image's
projection. Defaults to the image's default projection.
- crs_transform: A comma-separated string of 6 numbers describing
the affine transform of the coordinate reference system of the
exported image's projection, in the order: xScale, yShearing,
xShearing, yScale, xTranslation and yTranslation. Defaults to
the image's native CRS transform.
- dimensions: The dimensions of the exported image. Takes either a
single positive integer as the maximum dimension or
"WIDTHxHEIGHT" where WIDTH and HEIGHT are each positive integers.
- skipEmptyTiles: If true, skip writing empty (i.e. fully-masked)
image tiles. Defaults to false.
If exporting to Google Drive (default):
- driveFolder: The name of a unique folder in your Drive account to
export into. Defaults to the root of the drive.
- driveFileNamePrefix: The Google Drive filename for the export.
Defaults to the name of the task.
If exporting to Google Cloud Storage:
- outputBucket: The name of a Cloud Storage bucket for the export.
- outputPrefix: Cloud Storage object name prefix for the export.
Returns:
An unstarted Task that exports the image.
"""
config = (config or {}).copy()
if 'driveFileNamePrefix' not in config and 'outputBucket' not in config:
config['driveFileNamePrefix'] = description
if 'region' in config:
# Convert the region to a serialized form, if necessary.
config['region'] = _GetSerializedRegion(config['region'])
return _CreateTask(
Task.Type.EXPORT_IMAGE, image, description, config)
# Disable argument usage check; arguments are accessed using locals().
# pylint: disable=unused-argument
@staticmethod
def toAsset(image, description='myExportImageTask', assetId=None,
pyramidingPolicy=None, dimensions=None, region=None,
scale=None, crs=None, crsTransform=None, maxPixels=None,
**kwargs):
"""Creates a task to export an EE Image to an EE Asset.
Args:
image: The image to be exported.
description: Human-readable name of the task.
assetId: The destination asset ID.
pyramidingPolicy: The pyramiding policy to apply to each band in the
image, a dictionary keyed by band name. Values must be
one of: "mean", "sample", "min", "max", or "mode".
Defaults to "mean". A special key, ".default", may be used to
change the default for all bands.
dimensions: The dimensions of the exported image. Takes either a
single positive integer as the maximum dimension or "WIDTHxHEIGHT"
where WIDTH and HEIGHT are each positive integers.
region: The lon,lat coordinates for a LinearRing or Polygon
specifying the region to export. Can be specified as a nested
lists of numbers or a serialized string. Defaults to the image's
region.
scale: The resolution in meters per pixel. Defaults to the
native resolution of the image assset unless a crsTransform
is specified.
crs: The coordinate reference system of the exported image's
projection. Defaults to the image's default projection.
crsTransform: A comma-separated string of 6 numbers describing
the affine transform of the coordinate reference system of the
exported image's projection, in the order: xScale, yShearing,
xShearing, yScale, xTranslation and yTranslation. Defaults to
the image's native CRS transform.
maxPixels: The maximum allowed number of pixels in the exported
image. The task will fail if the exported region covers more
pixels in the specified projection. Defaults to 100,000,000.
**kwargs: Holds other keyword arguments that may have been deprecated
such as 'crs_transform'.
Returns:
An unstarted Task that exports the image to Drive.
"""
# _CopyDictFilterNone must be called first because it copies locals to
# support deprecated arguments.
config = _CopyDictFilterNone(locals())
_ConvertToServerParams(config, 'image', Task.ExportDestination.ASSET)
if 'region' in config:
# Convert the region to a serialized form, if necessary.
config['region'] = _GetSerializedRegion(config['region'])
return _CreateTask(
Task.Type.EXPORT_IMAGE, image, description, config)
# Disable argument usage check; arguments are accessed using locals().
# pylint: disable=unused-argument
@staticmethod
def toCloudStorage(image, description='myExportImageTask',
bucket=None, fileNamePrefix=None,
dimensions=None, region=None, scale=None,
crs=None, crsTransform=None, maxPixels=None,
shardSize=None, fileDimensions=None,
skipEmptyTiles=None, fileFormat=None, formatOptions=None,
**kwargs):
"""Creates a task to export an EE Image to Google Cloud Storage.
Args:
image: The image to be exported.
description: Human-readable name of the task.
bucket: The name of a Cloud Storage bucket for the export.
fileNamePrefix: Cloud Storage object name prefix for the export.
Defaults to the name of the task.
dimensions: The dimensions of the exported image. Takes either a
single positive integer as the maximum dimension or "WIDTHxHEIGHT"
where WIDTH and HEIGHT are each positive integers.
region: The lon,lat coordinates for a LinearRing or Polygon
specifying the region to export. Can be specified as a nested
lists of numbers or a serialized string. Defaults to the image's
region.
scale: The resolution in meters per pixel. Defaults to the
native resolution of the image assset unless a crsTransform
is specified.
crs: The coordinate reference system of the exported image's
projection. Defaults to the image's default projection.
crsTransform: A comma-separated string of 6 numbers describing
the affine transform of the coordinate reference system of the
exported image's projection, in the order: xScale, yShearing,
xShearing, yScale, xTranslation and yTranslation. Defaults to
the image's native CRS transform.
maxPixels: The maximum allowed number of pixels in the exported
image. The task will fail if the exported region covers more
pixels in the specified projection. Defaults to 100,000,000.
shardSize: Size in pixels of the shards in which this image will be
computed. Defaults to 256.
fileDimensions: The dimensions in pixels of each image file, if the
image is too large to fit in a single file. May specify a
single number to indicate a square shape, or a tuple of two
dimensions to indicate (width,height). Note that the image will
still be clipped to the overall image dimensions. Must be a
multiple of shardSize.
skipEmptyTiles: If true, skip writing empty (i.e. fully-masked)
image tiles. Defaults to false.
fileFormat: The string file format to which the image is exported.
Currently only 'GeoTIFF' and 'TFRecord' are supported, defaults to
'GeoTIFF'.
formatOptions: A dictionary of string keys to format specific options.
**kwargs: Holds other keyword arguments that may have been deprecated
such as 'crs_transform'.
Returns:
An unstarted Task that exports the image to Google Cloud Storage.
"""
# _CopyDictFilterNone must be called first because it copies locals to
# support deprecated arguments.
config = _CopyDictFilterNone(locals())
_ConvertToServerParams(config, 'image', Task.ExportDestination.GCS)
ConvertFormatSpecificParams(config)
if 'region' in config:
# Convert the region to a serialized form, if necessary.
config['region'] = _GetSerializedRegion(config['region'])
return _CreateTask(
Task.Type.EXPORT_IMAGE, image, description, config)
@staticmethod
def toDrive(image, description='myExportImageTask', folder=None,
fileNamePrefix=None, dimensions=None, region=None,
scale=None, crs=None, crsTransform=None,
maxPixels=None, shardSize=None, fileDimensions=None,
skipEmptyTiles=None, fileFormat=None, formatOptions=None,
**kwargs):
"""Creates a task to export an EE Image to Drive.
Args:
image: The image to be exported.
description: Human-readable name of the task.
folder: The name of a unique folder in your Drive account to
export into. Defaults to the root of the drive.
fileNamePrefix: The Google Drive filename for the export.
Defaults to the name of the task.
dimensions: The dimensions of the exported image. Takes either a
single positive integer as the maximum dimension or "WIDTHxHEIGHT"
where WIDTH and HEIGHT are each positive integers.
region: The lon,lat coordinates for a LinearRing or Polygon
specifying the region to export. Can be specified as a nested
lists of numbers or a serialized string. Defaults to the image's
region.
scale: The resolution in meters per pixel. Defaults to the
native resolution of the image assset unless a crsTransform
is specified.
crs: The coordinate reference system of the exported image's
projection. Defaults to the image's default projection.
crsTransform: A comma-separated string of 6 numbers describing
the affine transform of the coordinate reference system of the
exported image's projection, in the order: xScale, yShearing,
xShearing, yScale, xTranslation and yTranslation. Defaults to
the image's native CRS transform.
maxPixels: The maximum allowed number of pixels in the exported
image. The task will fail if the exported region covers more
pixels in the specified projection. Defaults to 100,000,000.
shardSize: Size in pixels of the shards in which this image will be
computed. Defaults to 256.
fileDimensions: The dimensions in pixels of each image file, if the
image is too large to fit in a single file. May specify a
single number to indicate a square shape, or a tuple of two
dimensions to indicate (width,height). Note that the image will
still be clipped to the overall image dimensions. Must be a
multiple of shardSize.
skipEmptyTiles: If true, skip writing empty (i.e. fully-masked)
image tiles. Defaults to false.
fileFormat: The string file format to which the image is exported.
Currently only 'GeoTIFF' and 'TFRecord' are supported, defaults to
'GeoTIFF'.
formatOptions: A dictionary of string keys to format specific options.
**kwargs: Holds other keyword arguments that may have been deprecated
such as 'crs_transform', 'driveFolder', and 'driveFileNamePrefix'.
Returns:
An unstarted Task that exports the image to Drive.
"""
# _CopyDictFilterNone must be called first because it copies locals to
# support deprecated arguments.
config = _CopyDictFilterNone(locals())
# fileNamePrefix should be defaulted before converting to server params.
if 'fileNamePrefix' not in config:
config['fileNamePrefix'] = description
_ConvertToServerParams(config, 'image', Task.ExportDestination.DRIVE)
ConvertFormatSpecificParams(config)
if 'region' in config:
# Convert the region to a serialized form, if necessary.
config['region'] = _GetSerializedRegion(config['region'])
return _CreateTask(
Task.Type.EXPORT_IMAGE, image, description, config)
# pylint: enable=unused-argument
class map(object):
"""A class with a static method to start map export tasks."""
def __init__(self):
"""Forbids class instantiation."""
raise AssertionError('This class cannot be instantiated.')
# Disable argument usage check; arguments are accessed using locals().
# pylint: disable=unused-argument
@staticmethod
def toCloudStorage(image, description='myExportMapTask', bucket=None,
fileFormat=None, path=None, writePublicTiles=None,
maxZoom=None, scale=None, minZoom=None,
region=None, skipEmptyTiles=None, **kwargs):
"""Creates a task to export an Image as a pyramid of map tiles.
Exports a rectangular pyramid of map tiles for use with web map
viewers. The map tiles will be accompanied by a reference
index.html file that displays them using the Google Maps API,
and an earth.html file for opening the map on Google Earth.
Args:
image: The image to export as tiles.
description: Human-readable name of the task.
bucket: The destination bucket to write to.
fileFormat: The map tiles' file format, one of 'auto', 'png',
or 'jpeg'. Defaults to 'auto', which means that opaque tiles
will be encoded as 'jpg' and tiles with transparency will be
encoded as 'png'.
path: The string used as the output's path. A trailing '/'
is optional. Defaults to the task's description.
writePublicTiles: Whether to write public tiles instead of using the
bucket's default object ACL. Defaults to True and requires the
invoker to be an OWNER of bucket.
maxZoom: The maximum zoom level of the map tiles to export.
scale: The max image resolution in meters per pixel, as an alternative
to 'maxZoom'. The scale will be converted to the most appropriate
maximum zoom level at the equator.
minZoom: The optional minimum zoom level of the map tiles to export.
region: The lon,lat coordinates for a LinearRing or Polygon
specifying the region to export. Can be specified as a nested
lists of numbers or a serialized string. Map tiles will be
produced in the rectangular region containing this geometry.
Defaults to the image's region.
skipEmptyTiles: If true, skip writing empty (i.e. fully-transparent)
map tiles. Defaults to false.
**kwargs: Holds other keyword arguments that may have been deprecated
such as 'crs_transform'.
Returns:
An unstarted Task that exports the image to Google Cloud Storage.
"""
# _CopyDictFilterNone must be called first because it copies locals to
# support deprecated arguments.
config = _CopyDictFilterNone(locals())
# The path is defaulted before converting to server params so that it
# is properly converted into the server parameter 'outputPrefix'.
if 'path' not in config:
config['path'] = description
_ConvertToServerParams(config, 'image', Task.ExportDestination.GCS)
if 'fileFormat' not in config:
config['fileFormat'] = 'auto'
if 'writePublicTiles' not in config:
config['writePublicTiles'] = True
if 'region' in config:
# Convert the region to a serialized form, if necessary.
config['region'] = _GetSerializedRegion(config['region'])
return _CreateTask(
Task.Type.EXPORT_MAP, image, description, config)
# pylint: enable=unused-argument
class table(object):
"""A class with static methods to start table export tasks."""
def __init__(self):
"""Forbids class instantiation."""
raise AssertionError('This class cannot be instantiated.')
def __new__(cls, collection, description='myExportTableTask', config=None):
"""Export an EE FeatureCollection as a table.
The exported table will reside in Google Drive or Cloud Storage.
Args:
collection: The feature collection to be exported.
description: Human-readable name of the task.
config: A dictionary that will be copied and used as parameters
for the task:
- fileFormat: The output format: "CSV" (default), "GeoJSON", "KML",
or "KMZ".
If exporting to Google Drive (default):
- driveFolder: The name of a unique folder in your Drive
account to export into. Defaults to the root of the drive.
- driveFileNamePrefix: The Google Drive filename for the export.
Defaults to the name of the task.
If exporting to Google Cloud Storage:
- outputBucket: The name of a Cloud Storage bucket for the export.
- outputPrefix: Cloud Storage object name prefix for the export.
Returns:
An unstarted Task that exports the table.
"""
config = (config or {}).copy()
if 'driveFileNamePrefix' not in config and 'outputBucket' not in config:
config['driveFileNamePrefix'] = description
if 'fileFormat' not in config:
config['fileFormat'] = 'CSV'
return _CreateTask(
Task.Type.EXPORT_TABLE, collection, description, config)
# Disable argument usage check; arguments are accessed using locals().
# pylint: disable=unused-argument
@staticmethod
def toCloudStorage(collection, description='myExportTableTask',
bucket=None, fileNamePrefix=None,
fileFormat=None, selectors=None, **kwargs):
"""Creates a task to export a FeatureCollection to Google Cloud Storage.
Args:
collection: The feature collection to be exported.
description: Human-readable name of the task.
bucket: The name of a Cloud Storage bucket for the export.
fileNamePrefix: Cloud Storage object name prefix for the export.
Defaults to the name of the task.
fileFormat: The output format: "CSV" (default), "GeoJSON", "KML",
or "KMZ".
selectors: The list of properties to include in the output, as a list
of strings or a comma-separated string. By default, all properties
are included.
**kwargs: Holds other keyword arguments that may have been deprecated
such as 'outputBucket'.
Returns:
An unstarted Task that exports the table.
"""
# _CopyDictFilterNone must be called first because it copies locals to
# support deprecated arguments.
config = _CopyDictFilterNone(locals())
if 'fileFormat' not in config:
config['fileFormat'] = 'CSV'
_ConvertToServerParams(
config, 'collection', Task.ExportDestination.GCS)
return _CreateTask(
Task.Type.EXPORT_TABLE, collection, description, config)
@staticmethod
def toDrive(collection, description='myExportTableTask',
folder=None, fileNamePrefix=None, fileFormat=None,
selectors=None, **kwargs):
"""Creates a task to export a FeatureCollection to Google Cloud Storage.
Args:
collection: The feature collection to be exported.
description: Human-readable name of the task.
folder: The name of a unique folder in your Drive account to
export into. Defaults to the root of the drive.
fileNamePrefix: The Google Drive filename for the export.
Defaults to the name of the task.
fileFormat: The output format: "CSV" (default), "GeoJSON", "KML",
or "KMZ".
selectors: The list of properties to include in the output, as a list
of strings or a comma-separated string. By default, all properties
are included.
**kwargs: Holds other keyword arguments that may have been deprecated
such as 'driveFolder' and 'driveFileNamePrefix'.
Returns:
An unstarted Task that exports the table.
"""
# _CopyDictFilterNone must be called first because it copies locals to
# support deprecated arguments.
config = _CopyDictFilterNone(locals())
# fileNamePrefix should be defaulted before converting to server params.
if 'fileNamePrefix' not in config:
config['fileNamePrefix'] = description
if 'fileFormat' not in config:
config['fileFormat'] = 'CSV'
_ConvertToServerParams(
config, 'collection', Task.ExportDestination.DRIVE)
return _CreateTask(
Task.Type.EXPORT_TABLE, collection, description, config)
class video(object):
"""A class with static methods to start video export task."""
def __init__(self):
"""Forbids class instantiation."""
raise AssertionError('This class cannot be instantiated.')
def __new__(cls, collection, description='myExportVideoTask', config=None):
"""Exports an EE ImageCollection as a video.
The exported video will reside in Google Drive or Cloud Storage.
Args:
collection: The image collection to be exported. The collection must
only contain RGB images.
description: Human-readable name of the task.
config: A dictionary of configuration parameters for the task:
- region: The lon,lat coordinates for a LinearRing or Polygon
specifying the region to export. Can be specified as a nested
lists of numbers or a serialized string. Defaults to the first
image's region.
- scale: The resolution in meters per pixel.
- crs: The coordinate reference system of the exported video's
projection. Defaults to SR-ORG:6627.
- crs_transform: A comma-separated string of 6 numbers describing
the affine transform of the coordinate reference system of the
exported video's projection, in the order: xScale, yShearing,
xShearing, yScale, xTranslation and yTranslation. Defaults to
the image collection's native CRS transform.
- dimensions: The dimensions of the exported video. Takes either a
single positive integer as the maximum dimension or "WIDTHxHEIGHT"
where WIDTH and HEIGHT are each positive integers.
- framesPerSecond: A number between .1 and 100 describing the
framerate of the exported video.
- maxPixels: The maximum number of pixels per frame.
Defaults to 1e8 pixels per frame. By setting this explicitly,
you may raise or lower the limit.
If exporting to Google Drive (default):
- driveFolder: The name of a unique folder in your Drive account to
export into. Defaults to the root of the drive.
- driveFileNamePrefix: The Google Drive filename for the export.
Defaults to the name of the task.
If exporting to Google Cloud Storage:
- outputBucket: The name of a Cloud Storage bucket for the export.
- outputPrefix: Cloud Storage object name prefix for the export.
Returns:
An unstarted Task that exports the video.
"""
config = (config or {}).copy()
if 'crs' not in config:
config['crs'] = 'SR-ORG:6627'
if 'driveFileNamePrefix' not in config and 'outputBucket' not in config:
config['driveFileNamePrefix'] = description
if 'region' in config:
# Convert the region to a serialized form, if necessary.
config['region'] = _GetSerializedRegion(config['region'])
return _CreateTask(
Task.Type.EXPORT_VIDEO, collection, description, config)
# Disable argument usage check; arguments are accessed using locals().
# pylint: disable=unused-argument
@staticmethod
def toCloudStorage(collection, description='myExportVideoTask',
bucket=None, fileNamePrefix=None, framesPerSecond=None,
dimensions=None, region=None, scale=None, crs=None,
crsTransform=None, maxPixels=None,
maxFrames=None, **kwargs):
"""Creates a task to export an ImageCollection video to Cloud Storage.
Args:
collection: The image collection to be exported. The collection must
only contain RGB images.
description: Human-readable name of the task.
bucket: The name of a Cloud Storage bucket for the export.
fileNamePrefix: Cloud Storage object name prefix for the export.
Defaults to the task's description.
framesPerSecond: A number between .1 and 100 describing the
framerate of the exported video.
dimensions: The dimensions of the exported video. Takes either a
single positive integer as the maximum dimension or "WIDTHxHEIGHT"
where WIDTH and HEIGHT are each positive integers.
region: The lon,lat coordinates for a LinearRing or Polygon
specifying the region to export. Can be specified as a nested
lists of numbers or a serialized string. Defaults to the first
image's region.
scale: The resolution in meters per pixel.
crs: The coordinate reference system of the exported video's
projection. Defaults to SR-ORG:6627.
crsTransform: A comma-separated string of 6 numbers describing
the affine transform of the coordinate reference system of the
exported video's projection, in the order: xScale, yShearing,
xShearing, yScale, xTranslation and yTranslation. Defaults to
the image collection's native CRS transform.
maxPixels: The maximum number of pixels per frame.
Defaults to 1e8 pixels per frame. By setting this explicitly,
you may raise or lower the limit.
maxFrames: The maximum number of frames to export.
Defaults to 1000 frames. By setting this explicitly, you may
raise or lower the limit.
**kwargs: Holds other keyword arguments that may have been deprecated
such as 'crs_transform'.
Returns:
An unstarted Task that exports the image collection
to Google Cloud Storage.
"""
# _CopyDictFilterNone must be called first because it copies locals to
# support deprecated arguments.
config = _CopyDictFilterNone(locals())
if 'crs' not in config:
config['crs'] = 'SR-ORG:6627'
if 'fileNamePrefix' not in config:
config['fileNamePrefix'] = description
_ConvertToServerParams(config, 'collection', Task.ExportDestination.GCS)
if 'region' in config:
# Convert the region to a serialized form, if necessary.
config['region'] = _GetSerializedRegion(config['region'])
return _CreateTask(
Task.Type.EXPORT_VIDEO, collection, description, config)
@staticmethod
def toDrive(collection, description='myExportVideoTask',
folder=None, fileNamePrefix=None, framesPerSecond=None,
dimensions=None, region=None, scale=None, crs=None,
crsTransform=None, maxPixels=None, maxFrames=None, **kwargs):
"""Creates a task to export an ImageCollection as a video to Drive.
Args:
collection: The image collection to be exported. The collection must
only contain RGB images.
description: Human-readable name of the task.
folder: The name of a unique folder in your Drive account to
export into. Defaults to the root of the drive.
fileNamePrefix: The Google Drive filename for the export.
Defaults to the name of the task.
framesPerSecond: A number between .1 and 100 describing the
framerate of the exported video.
dimensions: The dimensions of the exported video. Takes either a
single positive integer as the maximum dimension or "WIDTHxHEIGHT"
where WIDTH and HEIGHT are each positive integers.
region: The lon,lat coordinates for a LinearRing or Polygon
specifying the region to export. Can be specified as a nested
lists of numbers or a serialized string. Defaults to the first
image's region.
scale: The resolution in meters per pixel.
crs: The coordinate reference system of the exported video's
projection. Defaults to SR-ORG:6627.
crsTransform: A comma-separated string of 6 numbers describing
the affine transform of the coordinate reference system of the
exported video's projection, in the order: xScale, yShearing,
xShearing, yScale, xTranslation and yTranslation. Defaults to
the image collection's native CRS transform.
maxPixels: The maximum number of pixels per frame.
Defaults to 1e8 pixels per frame. By setting this explicitly,
you may raise or lower the limit.
maxFrames: The maximum number of frames to export.
Defaults to 1000 frames. By setting this explicitly, you may
raise or lower the limit.
**kwargs: Holds other keyword arguments that may have been deprecated
such as 'crs_transform'.
Returns:
An unstarted Task that exports the image collection to Drive.
"""
# _CopyDictFilterNone must be called first because it copies locals to
# support deprecated arguments.
config = _CopyDictFilterNone(locals())
if 'crs' not in config:
config['crs'] = 'SR-ORG:6627'
if 'fileNamePrefix' not in config:
config['fileNamePrefix'] = description
_ConvertToServerParams(config, 'collection', Task.ExportDestination.DRIVE)
if 'region' in config:
# Convert the region to a serialized form, if necessary.
config['region'] = _GetSerializedRegion(config['region'])
return _CreateTask(
Task.Type.EXPORT_VIDEO, collection, description, config)
def _CheckConfigDisallowedPrefixes(config, prefix):
for key in config:
if key.startswith(prefix):
raise ee_exception.EEException(
'Export config parameter prefix \'{}\' disallowed, found \'{}\''.
format(prefix, key))
# Mapping from file formats to prefixes attached to format specific config.
FORMAT_PREFIX_MAP = {'GEOTIFF': 'tiff', 'TFRECORD': 'tfrecord'}
# Configuration field specifying file format for image exports.
IMAGE_FORMAT_FIELD = 'fileFormat'
# Image format-specific options dictionary config field.
IMAGE_FORMAT_OPTIONS_FIELD = 'formatOptions'
# Format-specific options permitted in formatOptions config parameter.
ALLOWED_FORMAT_OPTIONS = {
'tiffCloudOptimized', 'tiffFileDimensions', 'tfrecordPatchDimensions',
'tfrecordKernelSize', 'tfrecordCompressed', 'tfrecordMaxFileSize',
'tfrecordDefaultValue', 'tfrecordTensorDepths', 'tfrecordSequenceData',
'tfrecordCollapseBands', 'tfrecordMaskedThreshold'
}
def _ConvertConfigParams(config):
"""Converts numeric sequences into comma-separated string representations."""
updatedConfig = {}
for k, v in config.items():
if v and isinstance(v, (list, tuple)):
# Leave nested lists/tuples alone. We're only interested in converting
# lists of strings or numbers.
if not isinstance(v[0], (list, tuple)):
updatedConfig[k] = ','.join(str(e) for e in v)
return updatedConfig
# TODO(user): This method and its uses are very hack-y, and once we're using One
# Platform API we should stop sending arbitrary parameters from "options".
def ConvertFormatSpecificParams(configDict):
"""Mutates configDict into server params by extracting format options.
For example:
{'fileFormat': 'GeoTIFF', 'formatOptions': {'cloudOptimized': true}}
becomes:
{'fileFormat': 'GeoTIFF', 'tiffCloudOptimized': true}
Also performs checks to make sure any specified options are valid and/or
won't collide with top level arguments when converted to server-friendly
parameters.
Args:
configDict: A task config dict
Raises:
EEException: We were unable to create format specific parameters for the
server.
"""
formatString = 'GeoTIFF'
if IMAGE_FORMAT_FIELD in configDict:
formatString = configDict[IMAGE_FORMAT_FIELD]
formatString = formatString.upper()
if formatString not in FORMAT_PREFIX_MAP:
raise ee_exception.EEException(
'Invalid file format. Currently only \'GeoTIFF\' and \'TFRecord\' is '
'supported.')
if IMAGE_FORMAT_OPTIONS_FIELD in configDict:
options = configDict[IMAGE_FORMAT_OPTIONS_FIELD]
del configDict[IMAGE_FORMAT_OPTIONS_FIELD]
if set(options) & set(configDict):
raise ee_exception.EEException(
'Parameter specified at least twice: once in config, '
'and once in format options.')
prefix = FORMAT_PREFIX_MAP[formatString]
_CheckConfigDisallowedPrefixes(configDict, prefix)
prefixedOptions = {}
for key, value in options.items():
prefixedKey = prefix + key[:1].upper() + key[1:]
if prefixedKey not in ALLOWED_FORMAT_OPTIONS:
raise ee_exception.EEException(
'\'{}\' is not a valid option for \'{}\'.'.format(
key, formatString))
prefixedOptions[prefixedKey] = value
prefixedOptions.update(_ConvertConfigParams(prefixedOptions))
configDict.update(prefixedOptions)
def _CreateTask(task_type, ee_object, description, config):
"""Creates an export task.
Args:
task_type: The type of the task to create. One of Task.Type.
ee_object: The object to export.
description: Human-readable name of the task.
config: Custom config fields for the task.
Returns:
An unstarted export Task.
"""
full_config = {
'type': task_type,
'json': ee_object.serialize(),
'description': description,
'state': Task.State.UNSUBMITTED,
}
if config: full_config.update(config)
return Task(data.newTaskId()[0], full_config)
def _GetSerializedRegion(region):
"""Converts a region parameter to serialized form, if it isn't already."""
region_error = ee_exception.EEException(
'Invalid format for region property. '
'See Export.image() documentation for more details.')
if isinstance(region, six.string_types):
try:
region = json.loads(region)
except:
raise region_error
try:
geometry.Geometry.LineString(region)
except: # pylint: disable=bare-except
try:
geometry.Geometry.Polygon(region)
except:
raise region_error
return json.dumps(region)
def _CopyDictFilterNone(originalDict):
"""Copies a dictionary and filters out None values."""
return dict((k, v) for k, v in originalDict.items() if v is not None)
def _ConvertToServerParams(configDict, eeElementKey, destination):
"""Converts an export configuration to server friendly parameters.
Note that configDict is changed in place and not returned.
Args:
configDict: The configuration dictionary to be converted.
eeElementKey: The key used to access the EE element.
destination: The destination to export to.
"""
del configDict[eeElementKey]
if 'kwargs' in configDict:
configDict.update(configDict['kwargs'])
del configDict['kwargs']
if 'crsTransform' in configDict:
configDict['crs_transform'] = configDict.pop('crsTransform')
configDict.update(_ConvertConfigParams(configDict))
if destination is Task.ExportDestination.GCS:
if 'bucket' in configDict:
configDict['outputBucket'] = configDict.pop('bucket')
if 'fileNamePrefix' in configDict:
if 'outputPrefix' not in configDict:
configDict['outputPrefix'] = configDict.pop('fileNamePrefix')
else:
del configDict['fileNamePrefix']
# Only used with Export.map
if 'path' in configDict:
configDict['outputPrefix'] = configDict.pop('path')
elif destination is Task.ExportDestination.DRIVE:
if 'folder' in configDict:
configDict['driveFolder'] = configDict.pop('folder')
if 'fileNamePrefix' in configDict:
if 'driveFileNamePrefix' not in configDict:
configDict['driveFileNamePrefix'] = configDict.pop('fileNamePrefix')
else:
del configDict['fileNamePrefix']
elif destination is not Task.ExportDestination.ASSET:
raise ee_exception.EEException('Unknown export destination.')
|
|
"""
Support for ZWave HVAC devices.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/hvac.zwave/
"""
# Because we do not compile openzwave on CI
# pylint: disable=import-error
import logging
from homeassistant.components.hvac import DOMAIN
from homeassistant.components.hvac import HvacDevice
from homeassistant.components.zwave import (
ATTR_NODE_ID, ATTR_VALUE_ID, ZWaveDeviceEntity)
from homeassistant.components import zwave
from homeassistant.const import (TEMP_FAHRENHEIT, TEMP_CELSIUS)
_LOGGER = logging.getLogger(__name__)
CONF_NAME = 'name'
DEFAULT_NAME = 'ZWave Hvac'
REMOTEC = 0x5254
REMOTEC_ZXT_120 = 0x8377
REMOTEC_ZXT_120_THERMOSTAT = (REMOTEC, REMOTEC_ZXT_120, 0)
COMMAND_CLASS_SENSOR_MULTILEVEL = 0x31
COMMAND_CLASS_THERMOSTAT_MODE = 0x40
COMMAND_CLASS_THERMOSTAT_SETPOINT = 0x43
COMMAND_CLASS_THERMOSTAT_FAN_MODE = 0x44
COMMAND_CLASS_CONFIGURATION = 0x70
WORKAROUND_ZXT_120 = 'zxt_120'
DEVICE_MAPPINGS = {
REMOTEC_ZXT_120_THERMOSTAT: WORKAROUND_ZXT_120
}
ZXT_120_SET_TEMP = {
'Heat': 1,
'Cool': 2,
'Dry Air': 8,
'Auto Changeover': 10
}
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the ZWave Hvac devices."""
if discovery_info is None or zwave.NETWORK is None:
_LOGGER.debug("No discovery_info=%s or no NETWORK=%s",
discovery_info, zwave.NETWORK)
return
node = zwave.NETWORK.nodes[discovery_info[ATTR_NODE_ID]]
value = node.values[discovery_info[ATTR_VALUE_ID]]
value.set_change_verified(False)
add_devices([ZWaveHvac(value)])
_LOGGER.debug("discovery_info=%s and zwave.NETWORK=%s",
discovery_info, zwave.NETWORK)
# pylint: disable=too-many-arguments, abstract-method
class ZWaveHvac(ZWaveDeviceEntity, HvacDevice):
"""Represents a HeatControl hvac."""
# pylint: disable=too-many-public-methods, too-many-instance-attributes
def __init__(self, value):
"""Initialize the zwave hvac."""
from openzwave.network import ZWaveNetwork
from pydispatch import dispatcher
ZWaveDeviceEntity.__init__(self, value, DOMAIN)
self._node = value.node
self._target_temperature = None
self._current_temperature = None
self._current_operation = None
self._operation_list = None
self._current_operation_state = None
self._current_fan_mode = None
self._fan_list = None
self._current_swing_mode = None
self._swing_list = None
self._unit = None
self._zxt_120 = None
self.update_properties()
# register listener
dispatcher.connect(
self.value_changed, ZWaveNetwork.SIGNAL_VALUE_CHANGED)
# Make sure that we have values for the key before converting to int
if (value.node.manufacturer_id.strip() and
value.node.product_id.strip()):
specific_sensor_key = (int(value.node.manufacturer_id, 16),
int(value.node.product_id, 16),
value.index)
if specific_sensor_key in DEVICE_MAPPINGS:
if DEVICE_MAPPINGS[specific_sensor_key] == WORKAROUND_ZXT_120:
_LOGGER.debug("Remotec ZXT-120 Zwave Thermostat as HVAC")
self._zxt_120 = 1
def value_changed(self, value):
"""Called when a value has changed on the network."""
if self._value.value_id == value.value_id or \
self._value.node == value.node:
self.update_properties()
self.update_ha_state()
_LOGGER.debug("Value changed on network %s", value)
def update_properties(self):
"""Callback on data change for the registered node/value pair."""
# Set point
for value in self._node.get_values(
class_id=COMMAND_CLASS_THERMOSTAT_SETPOINT).values():
if int(value.data) != 0:
self._target_temperature = int(value.data)
# Operation Mode
for value in self._node.get_values(
class_id=COMMAND_CLASS_THERMOSTAT_MODE).values():
self._current_operation = value.data
self._operation_list = list(value.data_items)
_LOGGER.debug("self._operation_list=%s", self._operation_list)
# Current Temp
for value in self._node.get_values(
class_id=COMMAND_CLASS_SENSOR_MULTILEVEL).values():
self._current_temperature = int(value.data)
self._unit = value.units
# Fan Mode
for value in self._node.get_values(
class_id=COMMAND_CLASS_THERMOSTAT_FAN_MODE).values():
self._current_operation_state = value.data
self._fan_list = list(value.data_items)
_LOGGER.debug("self._fan_list=%s", self._fan_list)
_LOGGER.debug("self._current_operation_state=%s",
self._current_operation_state)
# Swing mode
if self._zxt_120 == 1:
for value in self._node.get_values(
class_id=COMMAND_CLASS_CONFIGURATION).values():
if value.command_class == 112 and value.index == 33:
self._current_swing_mode = value.data
self._swing_list = list(value.data_items)
_LOGGER.debug("self._swing_list=%s", self._swing_list)
@property
def should_poll(self):
"""No polling on ZWave."""
return False
@property
def current_fan_mode(self):
"""Return the fan speed set."""
return self._current_operation_state
@property
def fan_list(self):
"""List of available fan modes."""
return self._fan_list
@property
def current_swing_mode(self):
"""Return the swing mode set."""
return self._current_swing_mode
@property
def swing_list(self):
"""List of available swing modes."""
return self._swing_list
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
unit = self._unit
if unit == 'C':
return TEMP_CELSIUS
elif unit == 'F':
return TEMP_FAHRENHEIT
else:
_LOGGER.exception("unit_of_measurement=%s is not valid",
unit)
@property
def current_temperature(self):
"""Return the current temperature."""
return self._current_temperature
@property
def current_operation(self):
"""Return the current operation mode."""
return self._current_operation
@property
def operation_list(self):
"""List of available operation modes."""
return self._operation_list
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temperature
def set_temperature(self, temperature):
"""Set new target temperature."""
for value in self._node.get_values(
class_id=COMMAND_CLASS_THERMOSTAT_SETPOINT).values():
if value.command_class != 67:
continue
if self._zxt_120:
# ZXT-120 does not support get setpoint
self._target_temperature = temperature
if ZXT_120_SET_TEMP.get(self._current_operation) \
!= value.index:
continue
# ZXT-120 responds only to whole int
value.data = int(round(temperature, 0))
else:
value.data = int(temperature)
break
def set_fan_mode(self, fan):
"""Set new target fan mode."""
for value in self._node.get_values(
class_id=COMMAND_CLASS_THERMOSTAT_FAN_MODE).values():
if value.command_class == 68 and value.index == 0:
value.data = bytes(fan, 'utf-8')
break
def set_operation_mode(self, operation_mode):
"""Set new target operation mode."""
for value in self._node.get_values(
class_id=COMMAND_CLASS_THERMOSTAT_MODE).values():
if value.command_class == 64 and value.index == 0:
value.data = bytes(operation_mode, 'utf-8')
break
def set_swing_mode(self, swing_mode):
"""Set new target swing mode."""
if self._zxt_120 == 1:
for value in self._node.get_values(
class_id=COMMAND_CLASS_CONFIGURATION).values():
if value.command_class == 112 and value.index == 33:
value.data = bytes(swing_mode, 'utf-8')
break
|
|
"""Module with functions operating on IndexedBase, Indexed and Idx objects
- Check shape conformance
- Determine indices in resulting expression
etc.
Methods in this module could be implemented by calling methods on Expr
objects instead. When things stabilize this could be a useful
refactoring.
"""
from __future__ import print_function, division
from sympy.core.function import Function
from sympy.functions import exp, Piecewise
from sympy.tensor.indexed import Idx, Indexed
from sympy.core.compatibility import reduce
class IndexConformanceException(Exception):
pass
def _remove_repeated(inds):
"""Removes repeated objects from sequences
Returns a set of the unique objects and a tuple of all that have been
removed.
>>> from sympy.tensor.index_methods import _remove_repeated
>>> l1 = [1, 2, 3, 2]
>>> _remove_repeated(l1)
(set([1, 3]), (2,))
"""
sum_index = {}
for i in inds:
if i in sum_index:
sum_index[i] += 1
else:
sum_index[i] = 0
inds = [x for x in inds if not sum_index[x]]
return set(inds), tuple([ i for i in sum_index if sum_index[i] ])
def _get_indices_Mul(expr, return_dummies=False):
"""Determine the outer indices of a Mul object.
>>> from sympy.tensor.index_methods import _get_indices_Mul
>>> from sympy.tensor.indexed import IndexedBase, Idx
>>> i, j, k = map(Idx, ['i', 'j', 'k'])
>>> x = IndexedBase('x')
>>> y = IndexedBase('y')
>>> _get_indices_Mul(x[i, k]*y[j, k])
(set([i, j]), {})
>>> _get_indices_Mul(x[i, k]*y[j, k], return_dummies=True)
(set([i, j]), {}, (k,))
"""
inds = list(map(get_indices, expr.args))
inds, syms = list(zip(*inds))
inds = list(map(list, inds))
inds = list(reduce(lambda x, y: x + y, inds))
inds, dummies = _remove_repeated(inds)
symmetry = {}
for s in syms:
for pair in s:
if pair in symmetry:
symmetry[pair] *= s[pair]
else:
symmetry[pair] = s[pair]
if return_dummies:
return inds, symmetry, dummies
else:
return inds, symmetry
def _get_indices_Pow(expr):
"""Determine outer indices of a power or an exponential.
A power is considered a universal function, so that the indices of a Pow is
just the collection of indices present in the expression. This may be
viewed as a bit inconsistent in the special case:
x[i]**2 = x[i]*x[i] (1)
The above expression could have been interpreted as the contraction of x[i]
with itself, but we choose instead to interpret it as a function
lambda y: y**2
applied to each element of x (a universal function in numpy terms). In
order to allow an interpretation of (1) as a contraction, we need
contravariant and covariant Idx subclasses. (FIXME: this is not yet
implemented)
Expressions in the base or exponent are subject to contraction as usual,
but an index that is present in the exponent, will not be considered
contractable with its own base. Note however, that indices in the same
exponent can be contracted with each other.
>>> from sympy.tensor.index_methods import _get_indices_Pow
>>> from sympy import Pow, exp, IndexedBase, Idx
>>> A = IndexedBase('A')
>>> x = IndexedBase('x')
>>> i, j, k = map(Idx, ['i', 'j', 'k'])
>>> _get_indices_Pow(exp(A[i, j]*x[j]))
(set([i]), {})
>>> _get_indices_Pow(Pow(x[i], x[i]))
(set([i]), {})
>>> _get_indices_Pow(Pow(A[i, j]*x[j], x[i]))
(set([i]), {})
"""
base, exp = expr.as_base_exp()
binds, bsyms = get_indices(base)
einds, esyms = get_indices(exp)
inds = binds | einds
# FIXME: symmetries from power needs to check special cases, else nothing
symmetries = {}
return inds, symmetries
def _get_indices_Add(expr):
"""Determine outer indices of an Add object.
In a sum, each term must have the same set of outer indices. A valid
expression could be
x(i)*y(j) - x(j)*y(i)
But we do not allow expressions like:
x(i)*y(j) - z(j)*z(j)
FIXME: Add support for Numpy broadcasting
>>> from sympy.tensor.index_methods import _get_indices_Add
>>> from sympy.tensor.indexed import IndexedBase, Idx
>>> i, j, k = map(Idx, ['i', 'j', 'k'])
>>> x = IndexedBase('x')
>>> y = IndexedBase('y')
>>> _get_indices_Add(x[i] + x[k]*y[i, k])
(set([i]), {})
"""
inds = list(map(get_indices, expr.args))
inds, syms = list(zip(*inds))
# allow broadcast of scalars
non_scalars = [x for x in inds if x != set()]
if not non_scalars:
return set(), {}
if not all([x == non_scalars[0] for x in non_scalars[1:]]):
raise IndexConformanceException("Indices are not consistent: %s" % expr)
if not reduce(lambda x, y: x != y or y, syms):
symmetries = syms[0]
else:
# FIXME: search for symmetries
symmetries = {}
return non_scalars[0], symmetries
def get_indices(expr):
"""Determine the outer indices of expression ``expr``
By *outer* we mean indices that are not summation indices. Returns a set
and a dict. The set contains outer indices and the dict contains
information about index symmetries.
Examples
========
>>> from sympy.tensor.index_methods import get_indices
>>> from sympy import symbols
>>> from sympy.tensor import IndexedBase, Idx
>>> x, y, A = map(IndexedBase, ['x', 'y', 'A'])
>>> i, j, a, z = symbols('i j a z', integer=True)
The indices of the total expression is determined, Repeated indices imply a
summation, for instance the trace of a matrix A:
>>> get_indices(A[i, i])
(set(), {})
In the case of many terms, the terms are required to have identical
outer indices. Else an IndexConformanceException is raised.
>>> get_indices(x[i] + A[i, j]*y[j])
(set([i]), {})
:Exceptions:
An IndexConformanceException means that the terms ar not compatible, e.g.
>>> get_indices(x[i] + y[j]) #doctest: +SKIP
(...)
IndexConformanceException: Indices are not consistent: x(i) + y(j)
.. warning::
The concept of *outer* indices applies recursively, starting on the deepest
level. This implies that dummies inside parenthesis are assumed to be
summed first, so that the following expression is handled gracefully:
>>> get_indices((x[i] + A[i, j]*y[j])*x[j])
(set([i, j]), {})
This is correct and may appear convenient, but you need to be careful
with this as SymPy will happily .expand() the product, if requested. The
resulting expression would mix the outer ``j`` with the dummies inside
the parenthesis, which makes it a different expression. To be on the
safe side, it is best to avoid such ambiguities by using unique indices
for all contractions that should be held separate.
"""
# We call ourself recursively to determine indices of sub expressions.
# break recursion
if isinstance(expr, Indexed):
c = expr.indices
inds, dummies = _remove_repeated(c)
return inds, {}
elif expr is None:
return set(), {}
elif expr.is_Atom:
return set(), {}
elif isinstance(expr, Idx):
return set([expr]), {}
# recurse via specialized functions
else:
if expr.is_Mul:
return _get_indices_Mul(expr)
elif expr.is_Add:
return _get_indices_Add(expr)
elif expr.is_Pow or isinstance(expr, exp):
return _get_indices_Pow(expr)
elif isinstance(expr, Piecewise):
# FIXME: No support for Piecewise yet
return set(), {}
elif isinstance(expr, Function):
# Support ufunc like behaviour by returning indices from arguments.
# Functions do not interpret repeated indices across argumnts
# as summation
ind0 = set()
for arg in expr.args:
ind, sym = get_indices(arg)
ind0 |= ind
return ind0, sym
# this test is expensive, so it should be at the end
elif not expr.has(Indexed):
return set(), {}
raise NotImplementedError(
"FIXME: No specialized handling of type %s" % type(expr))
def get_contraction_structure(expr):
"""Determine dummy indices of ``expr`` and describe its structure
By *dummy* we mean indices that are summation indices.
The stucture of the expression is determined and described as follows:
1) A conforming summation of Indexed objects is described with a dict where
the keys are summation indices and the corresponding values are sets
containing all terms for which the summation applies. All Add objects
in the SymPy expression tree are described like this.
2) For all nodes in the SymPy expression tree that are *not* of type Add, the
following applies:
If a node discovers contractions in one of its arguments, the node
itself will be stored as a key in the dict. For that key, the
corresponding value is a list of dicts, each of which is the result of a
recursive call to get_contraction_structure(). The list contains only
dicts for the non-trivial deeper contractions, ommitting dicts with None
as the one and only key.
.. Note:: The presence of expressions among the dictinary keys indicates
multiple levels of index contractions. A nested dict displays nested
contractions and may itself contain dicts from a deeper level. In
practical calculations the summation in the deepest nested level must be
calculated first so that the outer expression can access the resulting
indexed object.
Examples
========
>>> from sympy.tensor.index_methods import get_contraction_structure
>>> from sympy import symbols, default_sort_key
>>> from sympy.tensor import IndexedBase, Idx
>>> x, y, A = map(IndexedBase, ['x', 'y', 'A'])
>>> i, j, k, l = map(Idx, ['i', 'j', 'k', 'l'])
>>> get_contraction_structure(x[i]*y[i] + A[j, j])
{(i,): set([x[i]*y[i]]), (j,): set([A[j, j]])}
>>> get_contraction_structure(x[i]*y[j])
{None: set([x[i]*y[j]])}
A multiplication of contracted factors results in nested dicts representing
the internal contractions.
>>> d = get_contraction_structure(x[i, i]*y[j, j])
>>> sorted(d.keys(), key=default_sort_key)
[None, x[i, i]*y[j, j]]
In this case, the product has no contractions:
>>> d[None]
set([x[i, i]*y[j, j]])
Factors are contracted "first":
>>> sorted(d[x[i, i]*y[j, j]], key=default_sort_key)
[{(i,): set([x[i, i]])}, {(j,): set([y[j, j]])}]
A parenthesized Add object is also returned as a nested dictionary. The
term containing the parenthesis is a Mul with a contraction among the
arguments, so it will be found as a key in the result. It stores the
dictionary resulting from a recursive call on the Add expression.
>>> d = get_contraction_structure(x[i]*(y[i] + A[i, j]*x[j]))
>>> sorted(d.keys(), key=default_sort_key)
[(x[j]*A[i, j] + y[i])*x[i], (i,)]
>>> d[(i,)]
set([(x[j]*A[i, j] + y[i])*x[i]])
>>> d[x[i]*(A[i, j]*x[j] + y[i])]
[{None: set([y[i]]), (j,): set([x[j]*A[i, j]])}]
Powers with contractions in either base or exponent will also be found as
keys in the dictionary, mapping to a list of results from recursive calls:
>>> d = get_contraction_structure(A[j, j]**A[i, i])
>>> d[None]
set([A[j, j]**A[i, i]])
>>> nested_contractions = d[A[j, j]**A[i, i]]
>>> nested_contractions[0]
{(j,): set([A[j, j]])}
>>> nested_contractions[1]
{(i,): set([A[i, i]])}
The description of the contraction structure may appear complicated when
represented with a string in the above examples, but it is easy to iterate
over:
>>> from sympy import Expr
>>> for key in d:
... if isinstance(key, Expr):
... continue
... for term in d[key]:
... if term in d:
... # treat deepest contraction first
... pass
... # treat outermost contactions here
"""
# We call ourself recursively to inspect sub expressions.
if isinstance(expr, Indexed):
junk, key = _remove_repeated(expr.indices)
return {key or None: set([expr])}
elif expr.is_Atom:
return {None: set([expr])}
elif expr.is_Mul:
junk, junk, key = _get_indices_Mul(expr, return_dummies=True)
result = {key or None: set([expr])}
# recurse on every factor
nested = []
for fac in expr.args:
facd = get_contraction_structure(fac)
if not (None in facd and len(facd) == 1):
nested.append(facd)
if nested:
result[expr] = nested
return result
elif expr.is_Pow or isinstance(expr, exp):
# recurse in base and exp separately. If either has internal
# contractions we must include ourselves as a key in the returned dict
b, e = expr.as_base_exp()
dbase = get_contraction_structure(b)
dexp = get_contraction_structure(e)
dicts = []
for d in dbase, dexp:
if not (None in d and len(d) == 1):
dicts.append(d)
result = {None: set([expr])}
if dicts:
result[expr] = dicts
return result
elif expr.is_Add:
# Note: we just collect all terms with identical summation indices, We
# do nothing to identify equivalent terms here, as this would require
# substitutions or pattern matching in expressions of unknown
# complexity.
result = {}
for term in expr.args:
# recurse on every term
d = get_contraction_structure(term)
for key in d:
if key in result:
result[key] |= d[key]
else:
result[key] = d[key]
return result
elif isinstance(expr, Piecewise):
# FIXME: No support for Piecewise yet
return {None: expr}
elif isinstance(expr, Function):
# Collect non-trivial contraction structures in each argument
# We do not report repeated indices in separate arguments as a
# contraction
deeplist = []
for arg in expr.args:
deep = get_contraction_structure(arg)
if not (None in deep and len(deep) == 1):
deeplist.append(deep)
d = {None: set([expr])}
if deeplist:
d[expr] = deeplist
return d
# this test is expensive, so it should be at the end
elif not expr.has(Indexed):
return {None: set([expr])}
raise NotImplementedError(
"FIXME: No specialized handling of type %s" % type(expr))
|
|
#!/usr/bin/python
import csv
import datetime
import os.path
import nltk
from nltk.collocations import *
import re
import tempfile
import shutil
global tagbagFirstFile
global tagbagSecondFile
print "* * * * * * *"
print "Thank you for choosing the semantic harmonization tool"
print "Let us begin with asking you some questions"
print "First, let's get some information about your first data source"
pathtofile1 = "demo.csv"
pathtofile3 = "demo2.csv"
pathtofile2 = "demo_copy.csv"
tagbagFirstFile = [""]
tagbagSecondFile = [""]
def processfirstfile():
pathtofile1 = raw_input('Enter the path for your data file ?: ')
filesource_1 = raw_input('Where did this file come from?: ')
removearticles(filesource_1)
toeknizeTags(1, "Source", filesource_1)
fileparent_1 = raw_input('Which system or process was responsbile for creating this data?: ')
removearticles(fileparent_1)
toeknizeTags(1, "Parent", fileparent_1)
entities_1 = raw_input('what kind of entities does this data set describe ?: ')
removearticles(entities_1)
toeknizeTags(1, "Entity", entities_1)
events_1 = raw_input('what kind of events does this data set describe ?: ')
removearticles(events_1)
toeknizeTags(1, "Entity", events_1)
application_1 = raw_input('Which application feeds into this data source ?: ')
removearticles(application_1)
toeknizeTags(1, "CreatingApplication", application_1)
consumers_1 = raw_input('Which application consumes this data source ?: ')
removearticles(consumers_1)
toeknizeTags(1, "Reader", consumers_1)
contents_1 = raw_input('Describe the contents of this data source ?: ')
removearticles(contents_1)
toeknizeTags(1, "Content", contents_1)
#pathtofile1 = "demo.csv"
outFile = open('/Users/Kumar/nltk_data/corpora/genesis/firstfileresponse.txt', 'w')
outFile.write(fileparent_1 + " " + entities_1 + " " + events_1 + " " + application_1 + " " + consumers_1 + " " + contents_1)
outFile.close()
responseTags = generateTags('/Users/Kumar/nltk_data/corpora/genesis/firstfileresponse.txt')
print "\n"
print "Tags Generated From User Answers (NLTK Output)"
print responseTags
print "\n"
for tags in responseTags:
tagbagFirstFile.append(tags)
shutil.copy(pathtofile1, '/Users/Kumar/nltk_data/corpora/genesis/temp.csv')
responseTags = generateTags('/Users/Kumar/nltk_data/corpora/genesis/temp.csv')
print "\n"
print "Tags Generated From Treating the Data File as a Document (NLTK Output)"
print responseTags
print "\n"
for tags in responseTags:
tagbagFirstFile.append(tags)
count = 0
isDate = False
isNum = False
headers = ""
firstfile = open(pathtofile1, 'rb')
try:
namefile = os.path.basename(pathtofile1)
tagbagFirstFile.append(namefile)
except:
print "unable to read filename"
print "Preparing to run light weight inference"
print "\n"
spamreader = csv.reader(firstfile, delimiter=' ', quotechar='|')
for row in spamreader:
if count == 0:
strs = ', '.join(row)
headers = strs.split(" ")
for head in headers:
tagbagFirstFile.append(head)
count = 1
else:
strs = ', '.join(row)
headers = strs.split(" ")
for head in headers:
try:
if isinstance(int(head), (int, long, float, complex)):
isNum = True
#print "numcheckpassed"
except:
#print "Numeric Check Failed"
isNum = False
try:
if isinstance(datetime.datetime.strptime(head, '%m/%d/%Y').date(), datetime.date):
isDate = True
#print "datecheckpassed"
except:
#print "Date Check Failed"
isDate = False
if isNum == False and isDate == False:
tagbagFirstFile.append(head)
if isNum == True:
tagbagFirstFile.append("HasMeasures")
print "Found Measures in the data"
if isNum == False:
tagbagFirstFile.append("HasDates")
print "Found Dates in the data"
isDate = False
isNum = False
#print ', '.join(row)
print "***************Tags*********************"
print tagbagFirstFile
print "***************Tags*********************"
def processsecondfile():
count = 0
headers = ""
isDate = False
isNum = False
pathtofile2 = raw_input('Enter the path for your data file ?: ')
filesource_2 = raw_input('Where did this file come from?: ')
removearticles(filesource_2)
toeknizeTags(2, "Source", filesource_2)
fileparent_2 = raw_input('Which system or process was responsbile for creating this data?: ')
removearticles(fileparent_2)
toeknizeTags(2, "Parent", fileparent_2)
entities_2 = raw_input('what kind of entities does this data set describe ?: ')
removearticles(entities_2)
toeknizeTags(2, "Entity", entities_2)
events_2 = raw_input('what kind of events does this data set describe ?: ')
removearticles(events_2)
toeknizeTags(2, "Entity", events_2)
application_2 = raw_input('Which application feeds into this data source ?: ')
removearticles(application_2)
toeknizeTags(2, "CreatingApplication", application_2)
consumers_2 = raw_input('Which application consumes this data source ?: ')
removearticles(consumers_2)
toeknizeTags(2, "Reader", consumers_2)
contents_2 = raw_input('Describe the contents of this data source ?: ')
removearticles(contents_2)
toeknizeTags(2, "Content", contents_2)
outFile = open('/Users/Kumar/nltk_data/corpora/genesis/secondfileresponse.txt', 'w')
outFile.write(fileparent_2 + " " + entities_2 + " " + events_2 + " " + application_2 + " " + consumers_2 + " " + contents_2)
outFile.close()
responseTags = generateTags('/Users/Kumar/nltk_data/corpora/genesis/secondfileresponse.txt')
for tags in responseTags:
tagbagSecondFile.append(tags)
print "\n"
print "Tags Generated From User Answers (NLTK Output)"
print "\n"
print responseTags
shutil.copy(pathtofile2, '/Users/Kumar/nltk_data/corpora/genesis/temp.csv')
responseTags = generateTags('/Users/Kumar/nltk_data/corpora/genesis/temp.csv')
print "\n"
print "Tags Generated From Treating the Data File as a Document (NLTK Output)"
print "\n"
print responseTags
for tags in responseTags:
tagbagSecondFile.append(tags)
firstfile = open(pathtofile2, 'rb')
try:
namefile = os.path.basename(pathtofile2)
tagbagSecondFile.append(namefile)
except:
print "unable to read filename"
spamreader = csv.reader(firstfile, delimiter=' ', quotechar='|')
print "Preparing to run light weight inference"
print "\n"
for row in spamreader:
if count == 0:
strs = ', '.join(row)
headers = strs.split(" ")
for head in headers:
tagbagSecondFile.append(head)
count = 1
else:
strs = ', '.join(row)
headers = strs.split(" ")
for head in headers:
try:
if isinstance(int(head), (int, long, float, complex)):
isNum = True
#print "numcheckpassed"
except:
#print "Numeric Check Failed"
isNum = False
try:
if isinstance(datetime.datetime.strptime(head, '%m/%d/%Y').date(), datetime.date):
isDate = True
#print "datecheckpassed"
except:
#print "Date Check Failed"
isDate = False
if isNum == False and isDate == False:
tagbagSecondFile.append(head)
if isNum == True:
tagbagSecondFile.append("HasMeasures")
print "Found Measures in the data"
if isNum == False:
tagbagSecondFile.append("HasDates")
print "Found Dates in the data"
isDate = False
isNum = False
#print ', '.join(row)
print "***************Tags*********************"
print tagbagSecondFile
print "***************Tags*********************"
def harmonizeOnTags():
intersectionList = set(tagbagSecondFile).intersection(tagbagFirstFile)
print "*********************************************"
if len(intersectionList) > 0.75*len(tagbagFirstFile) and len(intersectionList) > 0.75*len(tagbagSecondFile):
print "HARMONIZATION SCORE: These data sets are very similar"
elif len(intersectionList) > 0.50 *len(tagbagFirstFile) and len(intersectionList) > 0.50*len(tagbagSecondFile):
print "HARMONIZATION SCORE: These data sets are somewhat similar"
elif len(intersectionList) > 0.25*len(tagbagFirstFile) and len(intersectionList) > 0.25*len(tagbagSecondFile):
print "HARMONIZATION SCORE: These data sets are slightly similar"
else:
print "HARMONIZATION SCORE: Not many similarities found in these data sets"
print "*********************************************"
print "Found the following matching tags"
for tag in intersectionList:
print tag
print "*********************************************"
def generateTags(text):
bigram_measures = nltk.collocations.BigramAssocMeasures()
# change this to read in your data
finder = BigramCollocationFinder.from_words(
nltk.corpus.genesis.words(text))
# only bigrams that appear 3+ times
finder.apply_freq_filter(3)
# return the 5 n-grams with the highest PMI
return finder.nbest(bigram_measures.pmi, 5)
def removearticles(text):
re.sub('\s+(a|an|and|the|to|from|if|)(\s+)', '\2', text)
def toeknizeTags(file, question, text):
tagssplit = text.split(' ')
for tags in tagssplit:
newtag = question+ '_' + tags
if file == 1:
tagbagFirstFile.append(newtag)
else:
tagbagSecondFile.append(newtag)
def verifyTags(source):
acceptAll = raw_input('Type Yes to Accept All Tags As Is: ')
if acceptAll.lower() != "yes":
if source == 1:
for tags in tagbagFirstFile:
tagverdict = raw_input('Type Yes to Accept or type to edit: ' + tags + ": ")
if tagverdict.lower() == "yes":
print "Verdict Recorded"
else:
tagbagFirstFile.append(tagverdict)
tagbagFirstFile.remove(tags)
else:
for tags in tagbagSecondFile:
tagverdict2 = raw_input('Type Yes to Accept or type to edit: ' + tags + ": ")
if tagverdict2.lower() == "yes":
print "Tag Verdict Recorded"
else:
print "Updated Tag Recorded"
tagbagSecondFile.append(tagverdict2)
tagbagSecondFile.remove(tags)
processfirstfile()
verifyTags(1)
processsecondfile()
verifyTags(2)
harmonizeOnTags()
|
|
"""
This tutorial introduces the multilayer perceptron using Theano.
A multilayer perceptron is a logistic regressor where
instead of feeding the input to the logistic regression you insert a
intermediate layer, called the hidden layer, that has a nonlinear
activation function (usually tanh or sigmoid) . One can use many such
hidden layers making the architecture deep. The tutorial will also tackle
the problem of MNIST digit classification.
.. math::
f(x) = G( b^{(2)} + W^{(2)}( s( b^{(1)} + W^{(1)} x))),
References:
- textbooks: "Pattern Recognition and Machine Learning" -
Christopher M. Bishop, section 5
"""
from __future__ import print_function
__docformat__ = 'restructedtext en'
import os
import sys
import timeit
import gzip
import pickle
import numpy
import cPickle
import theano
import theano.tensor as T
from scipy import misc
from logistic_sgd import LogisticRegression
def load_data(dataset):
data_dir, data_file = os.path.split(dataset)
if data_dir == "" and not os.path.isfile(dataset):
new_path = os.path.join(
os.path.split(__file__)[0],
"..",
"data",
dataset
)
if os.path.isfile(new_path) or data_file == 'mnist.pkl.gz':
dataset = new_path
print('... loading data')
# Load the dataset
with gzip.open(dataset, 'rb') as f:
try:
train_set, valid_set, test_set = pickle.load(f, encoding='latin1')
except:
train_set, valid_set, test_set = pickle.load(f)
def shared_dataset(data_xy, borrow=True):
data_x, data_y = data_xy
shared_x = theano.shared(numpy.asarray(data_x,
dtype=theano.config.floatX),
borrow=borrow)
shared_y = theano.shared(numpy.asarray(data_y,
dtype=theano.config.floatX),
borrow=borrow)
return shared_x, T.cast(shared_y, 'int32')
max_shape = 3000
v_shape = 3000
test_set = (test_set[0][:max_shape],test_set[1][:max_shape])
valid_set = (valid_set[0][:v_shape],valid_set[1][:v_shape])
train_set = (train_set[0][:v_shape],train_set[1][:v_shape])
test_set_x, test_set_y = shared_dataset(test_set)
valid_set_x, valid_set_y = shared_dataset(valid_set)
train_set_x, train_set_y = shared_dataset(train_set)
rval = [(train_set_x, train_set_y), (valid_set_x, valid_set_y),
(test_set_x, test_set_y)]
return rval
# start-snippet-1
class HiddenLayer(object):
def __init__(self, rng, input, n_in, n_out, W=None, b=None,
activation=T.tanh):
"""
Typical hidden layer of a MLP: units are fully-connected and have
sigmoidal activation function. Weight matrix W is of shape (n_in,n_out)
and the bias vector b is of shape (n_out,).
NOTE : The nonlinearity used here is tanh
Hidden unit activation is given by: tanh(dot(input,W) + b)
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.dmatrix
:param input: a symbolic tensor of shape (n_examples, n_in)
:type n_in: int
:param n_in: dimensionality of input
:type n_out: int
:param n_out: number of hidden units
:type activation: theano.Op or function
:param activation: Non linearity to be applied in the hidden
layer
"""
self.input = input
# end-snippet-1
# `W` is initialized with `W_values` which is uniformely sampled
# from sqrt(-6./(n_in+n_hidden)) and sqrt(6./(n_in+n_hidden))
# for tanh activation function
# the output of uniform if converted using asarray to dtype
# theano.config.floatX so that the code is runable on GPU
# Note : optimal initialization of weights is dependent on the
# activation function used (among other things).
# For example, results presented in [Xavier10] suggest that you
# should use 4 times larger initial weights for sigmoid
# compared to tanh
# We have no info for other function, so we use the same as
# tanh.
if W is None:
W_values = numpy.asarray(
rng.uniform(
low=-numpy.sqrt(6. / (n_in + n_out)),
high=numpy.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out)
),
dtype=theano.config.floatX
)
if activation == theano.tensor.nnet.sigmoid:
W_values *= 4
W = theano.shared(value=W_values, name='W', borrow=True)
if b is None:
b_values = numpy.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
self.W = W
self.b = b
lin_output = T.dot(input, self.W) + self.b
self.output = (
lin_output if activation is None
else activation(lin_output)
)
# parameters of the model
self.params = [self.W, self.b]
# start-snippet-2
class MLP(object):
"""Multi-Layer Perceptron Class
A multilayer perceptron is a feedforward artificial neural network model
that has one layer or more of hidden units and nonlinear activations.
Intermediate layers usually have as activation function tanh or the
sigmoid function (defined here by a ``HiddenLayer`` class) while the
top layer is a softmax layer (defined here by a ``LogisticRegression``
class).
"""
def __init__(self, rng, input, n_in, n_hidden, n_out):
"""Initialize the parameters for the multilayer perceptron
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.TensorType
:param input: symbolic variable that describes the input of the
architecture (one minibatch)
:type n_in: int
:param n_in: number of input units, the dimension of the space in
which the datapoints lie
:type n_hidden: int
:param n_hidden: number of hidden units
:type n_out: int
:param n_out: number of output units, the dimension of the space in
which the labels lie
"""
# Since we are dealing with a one hidden layer MLP, this will translate
# into a HiddenLayer with a tanh activation function connected to the
# LogisticRegression layer; the activation function can be replaced by
# sigmoid or any other nonlinear function
self.hiddenLayer = HiddenLayer(
rng=rng,
input=input,
n_in=n_in,
n_out=n_hidden,
activation=T.tanh
)
# The logistic regression layer gets as input the hidden units
# of the hidden layer
self.logRegressionLayer = LogisticRegression(
input=self.hiddenLayer.output,
n_in=n_hidden,
n_out=n_out
)
# end-snippet-2 start-snippet-3
# L1 norm ; one regularization option is to enforce L1 norm to
# be small
self.L1 = (
abs(self.hiddenLayer.W).sum()
+ abs(self.logRegressionLayer.W).sum()
)
# square of L2 norm ; one regularization option is to enforce
# square of L2 norm to be small
self.L2_sqr = (
(self.hiddenLayer.W ** 2).sum()
+ (self.logRegressionLayer.W ** 2).sum()
)
# negative log likelihood of the MLP is given by the negative
# log likelihood of the output of the model, computed in the
# logistic regression layer
self.negative_log_likelihood = (
self.logRegressionLayer.negative_log_likelihood
)
# same holds for the function computing the number of errors
self.errors = self.logRegressionLayer.errors
# the parameters of the model are the parameters of the two layer it is
# made out of
self.params = self.hiddenLayer.params + self.logRegressionLayer.params
# end-snippet-3
# keep track of model input
self.input = input
def test_mlp(learning_rate=0.01, L1_reg=0.00, L2_reg=0.0001, n_epochs=500,
dataset='../data/mnist.pkl.gz', batch_size=20, n_hidden=50):
"""
Demonstrate stochastic gradient descent optimization for a multilayer
perceptron
This is demonstrated on MNIST.
:type learning_rate: float
:param learning_rate: learning rate used (factor for the stochastic
gradient
:type L1_reg: float
:param L1_reg: L1-norm's weight when added to the cost (see
regularization)
:type L2_reg: float
:param L2_reg: L2-norm's weight when added to the cost (see
regularization)
:type n_epochs: int
:param n_epochs: maximal number of epochs to run the optimizer
:type dataset: string
:param dataset: the path of the MNIST dataset file from
http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz
"""
datasets = load_data(dataset)
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0] // batch_size
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] // batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0] // batch_size
######################
# BUILD ACTUAL MODEL #
######################
print('... building the model')
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
x = T.matrix('x') # the data is presented as rasterized images
y = T.ivector('y') # the labels are presented as 1D vector of
# [int] labels
rng = numpy.random.RandomState(1234)
# construct the MLP class
classifier = MLP(
rng=rng,
input=x,
n_in=28 * 28,
n_hidden=n_hidden,
n_out=10
)
# start-snippet-4
# the cost we minimize during training is the negative log likelihood of
# the model plus the regularization terms (L1 and L2); cost is expressed
# here symbolically
cost = (
classifier.negative_log_likelihood(y)
+ L1_reg * classifier.L1
+ L2_reg * classifier.L2_sqr
)
# end-snippet-4
# compiling a Theano function that computes the mistakes that are made
# by the model on a minibatch
test_model = theano.function(
inputs=[index],
outputs=classifier.errors(y),
givens={
x: test_set_x[index * batch_size:(index + 1) * batch_size],
y: test_set_y[index * batch_size:(index + 1) * batch_size]
}
)
validate_model = theano.function(
inputs=[index],
outputs=classifier.errors(y),
givens={
x: valid_set_x[index * batch_size:(index + 1) * batch_size],
y: valid_set_y[index * batch_size:(index + 1) * batch_size]
}
)
# start-snippet-5
# compute the gradient of cost with respect to theta (sorted in params)
# the resulting gradients will be stored in a list gparams
gparams = [T.grad(cost, param) for param in classifier.params]
# specify how to update the parameters of the model as a list of
# (variable, update expression) pairs
# given two lists of the same length, A = [a1, a2, a3, a4] and
# B = [b1, b2, b3, b4], zip generates a list C of same size, where each
# element is a pair formed from the two lists :
# C = [(a1, b1), (a2, b2), (a3, b3), (a4, b4)]
updates = [
(param, param - learning_rate * gparam)
for param, gparam in zip(classifier.params, gparams)
]
# compiling a Theano function `train_model` that returns the cost, but
# in the same time updates the parameter of the model based on the rules
# defined in `updates`
train_model = theano.function(
inputs=[index],
outputs=cost,
updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# end-snippet-5
###############
# TRAIN MODEL #
###############
print('... training')
# early-stopping parameters
patience = 10000 # look as this many examples regardless
patience_increase = 2 # wait this much longer when a new best is
# found
improvement_threshold = 0.995 # a relative improvement of this much is
# considered significant
validation_frequency = min(n_train_batches, patience // 2)
# go through this many
# minibatche before checking the network
# on the validation set; in this case we
# check every epoch
best_validation_loss = numpy.inf
best_iter = 0
test_score = 0.
start_time = timeit.default_timer()
epoch = 0
done_looping = False
num = 0
numpy.save('../data/b/%i.npy'%(num),classifier.params[0].eval())
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
for minibatch_index in range(n_train_batches):
minibatch_avg_cost = train_model(minibatch_index)
# iteration number
iter = (epoch - 1) * n_train_batches + minibatch_index
if (iter + 1) % validation_frequency == 0:
# compute zero-one loss on validation set
validation_losses = [validate_model(i) for i
in range(n_valid_batches)]
this_validation_loss = numpy.mean(validation_losses)
print(
'epoch %i, minibatch %i/%i, validation error %f %%' %
(
epoch,
minibatch_index + 1,
n_train_batches,
this_validation_loss * 100.
)
)
# if we got the best validation score until now
if this_validation_loss < best_validation_loss:
res = classifier.params[0].eval()
num = num + 1
numpy.save('../data/b/%i.npy'%(num),classifier.params[1].eval())
#improve patience if loss improvement is good enough
if (
this_validation_loss < best_validation_loss *
improvement_threshold
):
patience = max(patience, iter * patience_increase)
best_validation_loss = this_validation_loss
best_iter = iter
# test it on the test set
test_losses = [test_model(i) for i
in range(n_test_batches)]
test_score = numpy.mean(test_losses)
print((' epoch %i, minibatch %i/%i, test error of '
'best model %f %%') %
(epoch, minibatch_index + 1, n_train_batches,
test_score * 100.))
# with open('../data/mlp_model.pkl', 'wb') as f:
# cPickle.dump((classifier.params, classifier.logRegressionLayer.y_pred,
# classifier.input), f)
if patience <= iter:
done_looping = True
break
end_time = timeit.default_timer()
print(('Optimization complete. Best validation score of %f %% '
'obtained at iteration %i, with test performance %f %%') %
(best_validation_loss * 100., best_iter + 1, test_score * 100.))
print(('The code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((end_time - start_time) / 60.)), file=sys.stderr)
def predict(dataset, n_hidden, n_in, n_out):
datasets = load_data(dataset)
test_set_x, test_set_y = datasets[2]
test_set_x = test_set_x.get_value()
test_set_y = test_set_y.eval()
x = T.matrix('x') # the data is presented as rasterized images
y = T.ivector('y') # the labels are presented as 1D vector of
# [int] labels
rng = numpy.random.RandomState(1234)
# construct the MLP class
classifier = MLP(
rng=rng,
input=x,
n_in=28 * 28,
n_hidden=n_hidden,
n_out=10
)
classifier.params, classifier.logRegressionLayer.y_pred, classifier.input = cPickle.load(open('../data/mlp_model.pkl'))
predict_model = theano.function(inputs=[classifier.input], outputs=classifier.logRegressionLayer.y_pred)
with gzip.open('../data/kaggle_test.pkl.gz', 'rb') as f:
test_data = pickle.load(f)
predicted_values = predict_model(test_data/255)
result = numpy.vstack((numpy.arange(predicted_values.shape[0])+1,predicted_values))
res = result.T
import csv
numpy.savetxt("../data/result_mlp.csv",res,fmt=('%d','%d'),delimiter=',',header='ImageId,Label')
if __name__ == '__main__':
test_mlp()
# predict('../data/k_mnist.pkl.gz',500,28*28,10)
|
|
# Copyright 2015-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Resource Scheduling Offhours
============================
Custodian provides for time based filters, that allow for taking periodic
action on a resource, with resource schedule customization based on tag values.
A common use is offhours scheduling for asgs and instances.
Features
========
- Flexible offhours scheduling with opt-in, opt-out selection, and timezone
support.
- Resume during offhours support.
- Can be combined with other filters to get a particular set (
resources with tag, vpc, etc).
- Can be combined with arbitrary actions
Policy Configuration
====================
We provide an `onhour` and `offhour` time filter, each should be used in a
different policy, they support the same configuration options:
- **weekends**: default true, whether to leave resources off for the weekend
- **weekend-only**: default false, whether to turn the resource off only on
the weekend
- **default_tz**: which timezone to utilize when evaluating time **(REQUIRED)**
- **tag**: which resource tag name to use for per-resource configuration
(schedule and timezone overrides and opt-in/opt-out); default is
``maid_offhours``.
- **opt-out**: Determines the behavior for resources which do not have a tag
matching the one specified for **tag**. Values can be either ``false`` (the
default) where the policy operates on an opt-in basis and resources must have
the tag in order to be acted on by the policy, or ``true`` where the policy
operates on an opt-out basis, and resources without the tag are acted on by
the policy.
- **onhour**: the default time to start/run resources, specified as 0-23
- **offhour**: the default time to stop/suspend resources, specified as 0-23
This example policy overrides most of the defaults for an offhour policy:
.. code-block:: yaml
policies:
- name: offhours-stop
resource: ec2
filters:
- type: offhour
weekends: false
default_tz: pt
tag: downtime
opt-out: true
onhour: 8
offhour: 20
Tag Based Configuration
=======================
Resources can use a special tag to override the default configuration on a
per-resource basis. Note that the name of the tag is configurable via the
``tag`` option in the policy; the examples below use the default tag name,
``maid_offhours``.
The value of the tag must be one of the following:
- **(empty)** or **on** - An empty tag value or a value of "on" implies night
and weekend offhours using the default time zone configured in the policy
(tz=est if unspecified) and the default onhour and offhour values configured
in the policy.
- **off** - If offhours is configured to run in opt-out mode, this tag can be
specified to disable offhours on a given instance. If offhours is configured
to run in opt-in mode, this tag will have no effect (the resource will still
be opted out).
- a semicolon-separated string composed of one or more of the following
components, which override the defaults specified in the policy:
* ``tz=<timezone>`` to evaluate with a resource-specific timezone, where
``<timezone>`` is either one of the supported timezone aliases defined in
:py:attr:`c7n.filters.offhours.Time.TZ_ALIASES` (such as ``pt``) or the name
of a geographic timezone identifier in
[IANA's tzinfo database](https://www.iana.org/time-zones), such as
``Americas/Los_Angeles``. *(Note all timezone aliases are
referenced to a locality to ensure taking into account local daylight
savings time, if applicable.)*
* ``off=(time spec)`` and/or ``on=(time spec)`` matching time specifications
supported by :py:class:`c7n.filters.offhours.ScheduleParser` as described
in the next section.
ScheduleParser Time Specifications
----------------------------------
Each time specification follows the format ``(days,hours)``. Multiple time
specifications can be combined in square-bracketed lists, i.e.
``[(days,hours),(days,hours),(days,hours)]``.
**Examples**::
# up mon-fri from 7am-7pm; eastern time
off=(M-F,19);on=(M-F,7)
# up mon-fri from 6am-9pm; up sun from 10am-6pm; pacific time
off=[(M-F,21),(U,18)];on=[(M-F,6),(U,10)];tz=pt
**Possible values**:
+------------+----------------------+
| field | values |
+============+======================+
| days | M, T, W, H, F, S, U |
+------------+----------------------+
| hours | 0, 1, 2, ..., 22, 23 |
+------------+----------------------+
Days can be specified in a range (ex. M-F).
Policy examples
===============
Turn ec2 instances on and off
.. code-block:: yaml
policies:
- name: offhours-stop
resource: ec2
filters:
- type: offhour
actions:
- stop
- name: offhours-start
resource: ec2
filters:
- type: onhour
actions:
- start
Here's doing the same with auto scale groups
.. code-block:: yaml
policies:
- name: asg-offhours-stop
resource: asg
filters:
- offhour
actions:
- suspend
- name: asg-onhours-start
resource: asg
filters:
- onhour
actions:
- resume
Additional policy examples and resource-type-specific information can be seen in
the :ref:`EC2 Offhours <ec2offhours>` and :ref:`ASG Offhours <asgoffhours>`
use cases.
Resume During Offhours
======================
These policies are evaluated hourly; during each run (once an hour),
cloud-custodian will act on **only** the resources tagged for that **exact**
hour. In other words, if a resource has an offhours policy of
stopping/suspending at 23:00 Eastern daily and starting/resuming at 06:00
Eastern daily, and you run cloud-custodian once an hour via Lambda, that
resource will only be stopped once a day sometime between 23:00 and 23:59, and
will only be started once a day sometime between 06:00 and 06:59. If the current
hour does not *exactly* match the hour specified in the policy, nothing will be
done at all.
As a result of this, if custodian stops an instance or suspends an ASG and you
need to start/resume it, you can safely do so manually and custodian won't touch
it again until the next day.
ElasticBeanstalk, EFS and Other Services with Tag Value Restrictions
====================================================================
A number of AWS services have restrictions on the characters that can be used
in tag values, such as `ElasticBeanstalk <http://docs.aws.amazon.com/elasticbean
stalk/latest/dg/using-features.tagging.html>`_ and `EFS <http://docs.aws.amazon.
com/efs/latest/ug/API_Tag.html>`_. In particular, these services do not allow
parenthesis, square brackets, commas, or semicolons, or empty tag values. This
proves to be problematic with the tag-based schedule configuration described
above. The best current workaround is to define a separate policy with a unique
``tag`` name for each unique schedule that you want to use, and then tag
resources with that tag name and a value of ``on``. Note that this can only be
used in opt-in mode, not opt-out.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# note we have to module import for our testing mocks
import datetime
import logging
from os.path import join
from dateutil import zoneinfo
from c7n.filters import Filter, FilterValidationError
from c7n.utils import type_schema, dumps
log = logging.getLogger('custodian.offhours')
def brackets_removed(u):
return u.translate({ord('['): None, ord(']'): None})
def parens_removed(u):
return u.translate({ord('('): None, ord(')'): None})
class Time(Filter):
schema = {
'type': 'object',
'properties': {
'tag': {'type': 'string'},
'default_tz': {'type': 'string'},
'weekends': {'type': 'boolean'},
'weekends-only': {'type': 'boolean'},
'opt-out': {'type': 'boolean'},
}
}
time_type = None
# Defaults and constants
DEFAULT_TAG = "maid_offhours"
DEFAULT_TZ = 'et'
TZ_ALIASES = {
'pdt': 'America/Los_Angeles',
'pt': 'America/Los_Angeles',
'pst': 'America/Los_Angeles',
'ast': 'America/Phoenix',
'at': 'America/Phoenix',
'est': 'America/New_York',
'edt': 'America/New_York',
'et': 'America/New_York',
'cst': 'America/Chicago',
'cdt': 'America/Chicago',
'ct': 'America/Chicago',
'mst': 'America/Denver',
'mdt': 'America/Denver',
'mt': 'America/Denver',
'gmt': 'Etc/GMT',
'gt': 'Etc/GMT',
'bst': 'Europe/London',
'ist': 'Europe/Dublin',
'cet': 'Europe/Berlin',
# Technically IST (Indian Standard Time), but that's the same as Ireland
'it': 'Asia/Kolkata',
'jst': 'Asia/Tokyo',
'kst': 'Asia/Seoul',
'sgt': 'Asia/Singapore',
'aet': 'Australia/Sydney',
'brt': 'America/Sao_Paulo'
}
def __init__(self, data, manager=None):
super(Time, self).__init__(data, manager)
self.default_tz = self.data.get('default_tz', self.DEFAULT_TZ)
self.weekends = self.data.get('weekends', True)
self.weekends_only = self.data.get('weekends-only', False)
self.opt_out = self.data.get('opt-out', False)
self.tag_key = self.data.get('tag', self.DEFAULT_TAG).lower()
self.default_schedule = self.get_default_schedule()
self.parser = ScheduleParser(self.default_schedule)
self.id_key = None
self.opted_out = []
self.parse_errors = []
self.enabled_count = 0
def validate(self):
if self.get_tz(self.default_tz) is None:
raise FilterValidationError(
"Invalid timezone specified %s" % self.default_tz)
hour = self.data.get("%shour" % self.time_type, self.DEFAULT_HR)
if hour not in self.parser.VALID_HOURS:
raise FilterValidationError("Invalid hour specified %s" % hour)
return self
def process(self, resources, event=None):
resources = super(Time, self).process(resources)
if self.parse_errors and self.manager and self.manager.log_dir:
self.log.warning("parse errors %d", len(self.parse_errors))
with open(join(
self.manager.log_dir, 'parse_errors.json'), 'w') as fh:
dumps(self.parse_errors, fh=fh)
self.parse_errors = []
if self.opted_out and self.manager and self.manager.log_dir:
self.log.debug("disabled count %d", len(self.opted_out))
with open(join(
self.manager.log_dir, 'opted_out.json'), 'w') as fh:
dumps(self.opted_out, fh=fh)
self.opted_out = []
return resources
def __call__(self, i):
value = self.get_tag_value(i)
# Sigh delayed init, due to circle dep, process/init would be better
# but unit testing is calling this direct.
if self.id_key is None:
self.id_key = (
self.manager is None and 'InstanceId' or self.manager.get_model().id)
# The resource tag is not present, if we're not running in an opt-out
# mode, we're done.
if value is False:
if not self.opt_out:
return False
value = "" # take the defaults
# Resource opt out, track and record
if 'off' == value:
self.opted_out.append(i)
return False
else:
self.enabled_count += 1
try:
return self.process_resource_schedule(i, value, self.time_type)
except:
log.exception(
"%s failed to process resource:%s value:%s",
self.__class__.__name__, i[self.id_key], value)
return False
def process_resource_schedule(self, i, value, time_type):
"""Does the resource tag schedule and policy match the current time."""
rid = i[self.id_key]
# this is to normalize trailing semicolons which when done allows
# dateutil.parser.parse to process: value='off=(m-f,1);' properly.
# before this normalization, some cases would silently fail.
value = ';'.join(filter(None, value.split(';')))
if self.parser.has_resource_schedule(value, time_type):
schedule = self.parser.parse(value)
elif self.parser.keys_are_valid(value):
# respect timezone from tag
raw_data = self.parser.raw_data(value)
if 'tz' in raw_data:
schedule = dict(self.default_schedule)
schedule['tz'] = raw_data['tz']
else:
schedule = self.default_schedule
else:
schedule = None
if schedule is None:
log.warning(
"Invalid schedule on resource:%s value:%s", rid, value)
self.parse_errors.append((rid, value))
return False
tz = self.get_tz(schedule['tz'])
if not tz:
log.warning(
"Could not resolve tz on resource:%s value:%s", rid, value)
self.parse_errors.append((rid, value))
return False
now = datetime.datetime.now(tz).replace(
minute=0, second=0, microsecond=0)
return self.match(now, schedule)
def match(self, now, schedule):
time = schedule.get(self.time_type, ())
for item in time:
days, hour = item.get("days"), item.get('hour')
if now.weekday() in days and now.hour == hour:
return True
return False
def get_tag_value(self, i):
"""Get the resource's tag value specifying its schedule."""
# Look for the tag, Normalize tag key and tag value
found = False
for t in i.get('Tags', ()):
if t['Key'].lower() == self.tag_key:
found = t['Value']
break
if found is False:
return False
# enforce utf8, or do translate tables via unicode ord mapping
value = found.lower().encode('utf8').decode('utf8')
# Some folks seem to be interpreting the docs quote marks as
# literal for values.
value = value.strip("'").strip('"')
return value
@classmethod
def get_tz(cls, tz):
return zoneinfo.gettz(cls.TZ_ALIASES.get(tz, tz))
def get_default_schedule(self):
raise NotImplementedError("use subclass")
class OffHour(Time):
schema = type_schema(
'offhour', rinherit=Time.schema, required=['offhour', 'default_tz'],
offhour={'type': 'integer', 'minimum': 0, 'maximum': 23})
time_type = "off"
DEFAULT_HR = 19
def get_default_schedule(self):
default = {'tz': self.default_tz, self.time_type: [
{'hour': self.data.get(
"%shour" % self.time_type, self.DEFAULT_HR)}]}
if self.weekends_only:
default[self.time_type][0]['days'] = [4]
elif self.weekends:
default[self.time_type][0]['days'] = tuple(range(5))
else:
default[self.time_type][0]['days'] = tuple(range(7))
return default
class OnHour(Time):
schema = type_schema(
'onhour', rinherit=Time.schema, required=['onhour', 'default_tz'],
onhour={'type': 'integer', 'minimum': 0, 'maximum': 23})
time_type = "on"
DEFAULT_HR = 7
def get_default_schedule(self):
default = {'tz': self.default_tz, self.time_type: [
{'hour': self.data.get(
"%shour" % self.time_type, self.DEFAULT_HR)}]}
if self.weekends_only:
# turn on monday
default[self.time_type][0]['days'] = [0]
elif self.weekends:
default[self.time_type][0]['days'] = tuple(range(5))
else:
default[self.time_type][0]['days'] = tuple(range(7))
return default
class ScheduleParser(object):
"""Parses tag values for custom on/off hours schedules.
At the minimum the ``on`` and ``off`` values are required. Each of
these must be seperated by a ``;`` in the format described below.
**Schedule format**::
# up mon-fri from 7am-7pm; eastern time
off=(M-F,19);on=(M-F,7)
# up mon-fri from 6am-9pm; up sun from 10am-6pm; pacific time
off=[(M-F,21),(U,18)];on=[(M-F,6),(U,10)];tz=pt
**Possible values**:
+------------+----------------------+
| field | values |
+============+======================+
| days | M, T, W, H, F, S, U |
+------------+----------------------+
| hours | 0, 1, 2, ..., 22, 23 |
+------------+----------------------+
Days can be specified in a range (ex. M-F).
If the timezone is not supplied, it is assumed ET (eastern time), but this
default can be configurable.
**Parser output**:
The schedule parser will return a ``dict`` or ``None`` (if the schedule is
invalid)::
# off=[(M-F,21),(U,18)];on=[(M-F,6),(U,10)];tz=pt
{
off: [
{ days: "M-F", hour: 21 },
{ days: "U", hour: 18 }
],
on: [
{ days: "M-F", hour: 6 },
{ days: "U", hour: 10 }
],
tz: "pt"
}
"""
DAY_MAP = {'m': 0, 't': 1, 'w': 2, 'h': 3, 'f': 4, 's': 5, 'u': 6}
VALID_HOURS = tuple(range(24))
def __init__(self, default_schedule):
self.default_schedule = default_schedule
self.cache = {}
@staticmethod
def raw_data(tag_value):
"""convert the tag to a dictionary, taking values as is
This method name and purpose are opaque... and not true.
"""
data = {}
pieces = []
for p in tag_value.split(' '):
pieces.extend(p.split(';'))
# parse components
for piece in pieces:
kv = piece.split('=')
# components must by key=value
if not len(kv) == 2:
continue
key, value = kv
data[key] = value
return data
def keys_are_valid(self, tag_value):
"""test that provided tag keys are valid"""
for key in ScheduleParser.raw_data(tag_value):
if key not in ('on', 'off', 'tz'):
return False
return True
def parse(self, tag_value):
# check the cache
if tag_value in self.cache:
return self.cache[tag_value]
schedule = {}
if not self.keys_are_valid(tag_value):
return None
# parse schedule components
pieces = tag_value.split(';')
for piece in pieces:
kv = piece.split('=')
# components must by key=value
if not len(kv) == 2:
return None
key, value = kv
if key != 'tz':
value = self.parse_resource_schedule(value)
if value is None:
return None
schedule[key] = value
# add default timezone, if none supplied or blank
if not schedule.get('tz'):
schedule['tz'] = self.default_schedule['tz']
# cache
self.cache[tag_value] = schedule
return schedule
@staticmethod
def has_resource_schedule(tag_value, time_type):
raw_data = ScheduleParser.raw_data(tag_value)
# note time_type is set to 'on' or 'off' and raw_data is a dict
return time_type in raw_data
def parse_resource_schedule(self, lexeme):
parsed = []
exprs = brackets_removed(lexeme).split(',(')
for e in exprs:
tokens = parens_removed(e).split(',')
# custom hours must have two parts: (<days>, <hour>)
if not len(tokens) == 2:
return None
if not tokens[1].isdigit():
return None
hour = int(tokens[1])
if hour not in self.VALID_HOURS:
return None
days = self.expand_day_range(tokens[0])
if not days:
return None
parsed.append({'days': days, 'hour': hour})
return parsed
def expand_day_range(self, days):
# single day specified
if days in self.DAY_MAP:
return [self.DAY_MAP[days]]
day_range = [d for d in map(self.DAY_MAP.get, days.split('-'))
if d is not None]
if not len(day_range) == 2:
return None
# support wrap around days aka friday-monday = 4,5,6,0
if day_range[0] > day_range[1]:
return list(range(day_range[0], 7)) + list(range(day_range[1] + 1))
return list(range(min(day_range), max(day_range) + 1))
|
|
###############################################################################
##
## Copyright (C) 2014-2016, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
# search functions for entity-type objects
# notes in plain text, not html, should be fix later
from __future__ import division
import datetime
import re
import time
import unittest
from vistrails.core.query import extract_text
################################################################################
class SearchParseError(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
class SearchStmt(object):
def __init__(self, content):
self.text = content
self.content = re.compile('.*'+content+'.*', re.MULTILINE | re.IGNORECASE)
def match(self, entity):
return True
def matchModule(self, v, m):
return True
def run(self, v, n):
pass
def __call__(self):
"""Make SearchStmt behave just like a QueryObject."""
return self
class TimeSearchStmt(SearchStmt):
oneSecond = 1.0
oneMinute = oneSecond * 60.0
oneHour = oneMinute * 60.0
oneDay = oneHour * 24.0
oneWeek = oneDay * 7.0
oneMonth = oneDay * 31.0 # wrong, I know
oneYear = oneDay * 365.0 # wrong, I know
amounts = {'seconds': oneSecond,
'minutes': oneMinute,
'hours': oneHour,
'days': oneDay,
'weeks': oneWeek,
'months': oneMonth,
'years': oneYear}
months = {'january': 1,
'february': 2,
'march': 3,
'april': 4,
'may': 5,
'june': 6,
'july': 7,
'august': 8,
'september': 9,
'october': 10,
'november': 11,
'december': 12}
dateEntry = r'([^\,\/\: ]+)'
timeEntry = r'(\d?\d?)'
dateSep = r' *[\,\/\- ] *'
timeSep = r' *: *'
sep = r' *'
start = r'^ *'
finish = r' *$'
twoEntryDate = (dateEntry+
dateSep+
dateEntry)
threeEntryDate = (dateEntry+
dateSep+
dateEntry+
dateSep+
dateEntry)
twoEntryTime = (timeEntry+
timeSep+
timeEntry)
threeEntryTime = (timeEntry+
timeSep+
timeEntry+
timeSep+
timeEntry)
dateRE = [re.compile((start+
twoEntryDate+
finish)), # Mar 12 Mar, 12
re.compile((start+
threeEntryDate+
finish)), # Mar, 12, 2006 2006 Mar 12 etc
re.compile((start+
twoEntryTime+
finish)),
re.compile((start+
threeEntryTime+
finish)),
re.compile((start+
twoEntryDate+
sep+
twoEntryTime+
finish)),
re.compile((start+
twoEntryDate+
sep+
threeEntryTime+
finish)),
re.compile((start+
threeEntryDate+
sep+
twoEntryTime+
finish)),
re.compile((start+
threeEntryDate+
sep+
threeEntryTime+
finish)),
re.compile((start+
twoEntryTime+
sep+
twoEntryDate+
finish)),
re.compile((start+
twoEntryTime+
sep+
threeEntryDate+
finish)),
re.compile((start+
threeEntryTime+
sep+
twoEntryDate+
finish)),
re.compile((start+
threeEntryTime+
sep+
threeEntryDate+
finish))]
def __init__(self, date):
self.date = self.parseDate(date)
def parseDate(self, dateStr):
def parseAgo(s):
[amount, unit] = s.split(' ')
try:
amount = float(amount)
except ValueError:
raise SearchParseError("Expected a number, got %s" % amount)
if amount <= 0:
raise SearchParseError("Expected a positive number, got %s" % amount)
unitRe = re.compile('^'+unit)
keys = [k
for k in TimeSearchStmt.amounts.keys()
if unitRe.match(k)]
if len(keys) == 0:
raise SearchParseError("Time unit unknown: %s" % unit)
elif len(keys) > 1:
raise SearchParseError("Time unit ambiguous: %s matches %s" % (unit, keys))
return round(time.time()) - TimeSearchStmt.amounts[keys[0]] * amount
def guessDate(unknownEntries, year=None):
def guessStrMonth(s):
monthRe = re.compile('^'+s)
keys = [k
for k in TimeSearchStmt.months.keys()
if monthRe.match(k)]
if len(keys) == 0:
raise SearchParseError("Unknown month: %s" % s)
elif len(keys) > 1:
raise SearchParseError("Ambiguous month: %s matches %s" % (s, keys))
return TimeSearchStmt.months[keys[0]]
if not year:
m = None
# First heuristic: if month comes first, then year comes last
try:
e0 = int(unknownEntries[0])
except ValueError:
m = guessStrMonth(unknownEntries[0])
try:
d = int(unknownEntries[1])
except ValueError:
raise SearchParseError("Expected day, got %s" % unknownEntries[1])
try:
y = int(unknownEntries[2])
except ValueError:
raise SearchParseError("Expected year, got %s" % unknownEntries[2])
return (y, m, d)
# Second heuristic: if month comes last, then year comes first
try:
e2 = int(unknownEntries[2])
except ValueError:
m = guessStrMonth(unknownEntries[2])
try:
d = int(unknownEntries[1])
except ValueError:
raise SearchParseError("Expected day, got %s" % unknownEntries[1])
try:
y = int(unknownEntries[0])
except ValueError:
raise SearchParseError("Expected year, got %s" % unknownEntries[0])
return (y, m, d)
# If month is the middle one, decide day and year by size
# (year is largest, hopefully year was entered using 4 digits)
try:
e1 = int(unknownEntries[1])
except ValueError:
m = guessStrMonth(unknownEntries[1])
try:
d = int(unknownEntries[2])
except ValueError:
raise SearchParseError("Expected day or year, got %s" % unknownEntries[2])
try:
y = int(unknownEntries[0])
except ValueError:
raise SearchParseError("Expected year or year, got %s" % unknownEntries[0])
return (max(y,d), m, min(y, d))
lst = [(e0,0),(e1,1),(e2,2)]
lst.sort()
return guessDate([str(lst[0][0]),
str(lst[1][0])],
year=e2)
# We know year, decide month using similar heuristics - try string month first,
# then decide which is possible
try:
e0 = int(unknownEntries[0])
except ValueError:
m = guessStrMonth(unknownEntries[0])
try:
d = int(unknownEntries[1])
except ValueError:
raise SearchParseError("Expected day, got %s" % unknownEntries[1])
return (year, m, d)
try:
e1 = int(unknownEntries[1])
except ValueError:
m = guessStrMonth(unknownEntries[1])
try:
d = int(unknownEntries[0])
except ValueError:
raise SearchParseError("Expected day, got %s" % unknownEntries[0])
return (year, m, d)
if e0 > 12:
return (year, e1, e0)
else:
return (year, e0, e1)
dateStr = dateStr.lower().lstrip().rstrip()
if dateStr.endswith(" ago"):
return parseAgo(dateStr[:-4])
if dateStr == "yesterday":
lst = list(time.localtime(round(time.time()) - TimeSearchStmt.oneDay))
# Reset hour, minute, second
lst[3] = 0
lst[4] = 0
lst[5] = 0
return time.mktime(lst)
if dateStr == "today":
lst = list(time.localtime())
# Reset hour, minute, second
lst[3] = 0
lst[4] = 0
lst[5] = 0
return time.mktime(lst)
if dateStr.startswith("this "):
rest = dateStr[5:]
lst = list(time.localtime(round(time.time())))
if rest == "minute":
lst[5] = 0
elif rest == "hour":
lst[5] = 0
lst[4] = 0
elif rest == "day":
lst[5] = 0
lst[4] = 0
lst[3] = 0
elif rest == "week": # weeks start on monday
lst[5] = 0
lst[4] = 0
lst[3] = 0
# This hack saves me the hassle of computing negative days, months, etc
lst = list(time.localtime(time.mktime(lst) - TimeSearchStmt.oneDay * lst[6]))
elif rest == "month":
lst[5] = 0
lst[4] = 0
lst[3] = 0
lst[2] = 1
elif rest == "year":
lst[5] = 0
lst[4] = 0
lst[3] = 0
lst[2] = 1
lst[1] = 1
return time.mktime(lst)
result = [x.match(dateStr) for x in TimeSearchStmt.dateRE]
this = list(time.localtime())
def setTwoDate(g):
d = guessDate(g, year=this[0])
this[0] = d[0]
this[1] = d[1]
this[2] = d[2]
def setThreeDate(g):
d = guessDate(g)
this[0] = d[0]
this[1] = d[1]
this[2] = d[2]
def setTwoTime(g):
this[3] = int(g[0])
this[4] = int(g[1])
this[5] = 0
def setThreeTime(g):
this[3] = int(g[0])
this[4] = int(g[1])
this[5] = int(g[2])
if result[0]:
setTwoDate(result[0].groups())
setTwoTime([0,0])
elif result[1]:
setThreeDate(result[1].groups())
setTwoTime([0,0])
elif result[2]:
setTwoTime(result[2].groups())
elif result[3]:
setThreeTime(result[3].groups())
elif result[4]:
g = result[4].groups()
setTwoDate([g[0], g[1]])
setTwoTime([g[2], g[3]])
elif result[5]:
g = result[5].groups()
setTwoDate([g[0], g[1]])
setThreeTime([g[2], g[3], g[4]])
elif result[6]:
g = result[6].groups()
setThreeDate([g[0], g[1], g[2]])
setTwoTime([g[3], g[4]])
elif result[7]:
g = result[7].groups()
setThreeDate([g[0], g[1], g[2]])
setThreeTime([g[3], g[4], g[5]])
elif result[8]:
g = result[8].groups()
setTwoTime([g[0], g[1]])
setTwoDate([g[2], g[3]])
elif result[9]:
g = result[9].groups()
setTwoTime([g[0], g[1]])
setThreeDate([g[2], g[3], g[4]])
elif result[10]:
g = result[10].groups()
setThreeTime([g[0], g[1], g[2]])
setTwoDate([g[3], g[4]])
elif result[11]:
g = result[11].groups()
setThreeTime([g[0], g[1], g[2]])
setThreeDate([g[3], g[4],g[5]])
else:
raise SearchParseError("Expected a date, got '%s'" % dateStr)
return time.mktime(this)
class BeforeSearchStmt(TimeSearchStmt):
def match(self, entity):
if not entity.mod_time:
return False
t = time.mktime(entity.mod_time)
return t <= self.date
class AfterSearchStmt(TimeSearchStmt):
def match(self, entity):
if not entity.mod_time:
return False
t = time.mktime(entity.mod_time)
return t >= self.date
class UserSearchStmt(SearchStmt):
def match(self, entity):
if not entity.user:
return False
return self.content.match(entity.user)
class NotesSearchStmt(SearchStmt):
def match(self, entity):
if entity.description:
plainNotes = extract_text(entity.description)
return self.content.search(plainNotes)
return False
class NameSearchStmt(SearchStmt):
def match(self, entity):
return self.content.match(entity.name)
class AndSearchStmt(SearchStmt):
def __init__(self, lst):
self.matchList = lst
def match(self, entity):
for s in self.matchList:
if not s.match(entity):
return False
return True
class OrSearchStmt(SearchStmt):
def __init__(self, lst):
self.matchList = lst
def match(self, entity):
for s in self.matchList:
if s.match(entity):
return True
return False
class NotSearchStmt(SearchStmt):
def __init__(self, stmt):
self.stmt = stmt
def match(self, entity):
return not self.stmt.match(entity)
class TrueSearch(SearchStmt):
def __init__(self):
pass
def match(self, entity):
return True
################################################################################
class SearchCompiler(object):
SEPARATOR = -1
def __init__(self, searchStr):
self.searchStmt = self.compile(searchStr)
def compile(self, searchStr):
lst = []
t1 = searchStr.split(' ')
while t1:
tok = t1[0]
cmd = tok.split(':')
if not SearchCompiler.dispatch.has_key(cmd[0]):
fun = SearchCompiler.parseAny
else:
fun = SearchCompiler.dispatch[cmd[0]]
if len(cmd) > 1:
[search, rest] = fun(self, cmd[1:] + t1[1:])
else:
[search, rest] = fun(self, t1)
lst.append(search)
t1 = rest
return AndSearchStmt(lst)
def parseUser(self, tokStream):
if len(tokStream) == 0:
raise SearchParseError('Expected token, got end of search')
return (UserSearchStmt(tokStream[0]), tokStream[1:])
def parseAny(self, tokStream):
if len(tokStream) == 0:
raise SearchParseError('Expected token, got end of search')
tok = tokStream[0]
return (OrSearchStmt([UserSearchStmt(tok),
NotesSearchStmt(tok),
NameSearchStmt(tok)]), tokStream[1:])
def parseNotes(self, tokStream):
if len(tokStream) == 0:
raise SearchParseError('Expected token, got end of search')
lst = []
while len(tokStream):
tok = tokStream[0]
if ':' in tok:
return (AndSearchStmt(lst), tokStream)
lst.append(NotesSearchStmt(tok))
tokStream = tokStream[1:]
return (AndSearchStmt(lst), [])
def parseName(self, tokStream):
if len(tokStream) == 0:
raise SearchParseError('Expected token, got end of search')
lst = []
while len(tokStream):
tok = tokStream[0]
if ':' in tok:
return (AndSearchStmt(lst), tokStream)
lst.append(NameSearchStmt(tok))
tokStream = tokStream[1:]
return (AndSearchStmt(lst), [])
def parseBefore(self, tokStream):
old_tokstream = tokStream
try:
if len(tokStream) == 0:
raise SearchParseError('Expected token, got end of search')
lst = []
while len(tokStream):
tok = tokStream[0]
# ugly, special case times
if (':' in tok and
not TimeSearchStmt.dateRE[2].match(tok) and
not TimeSearchStmt.dateRE[3].match(tok)):
return (BeforeSearchStmt(" ".join(lst)), tokStream)
lst.append(tok)
tokStream = tokStream[1:]
return (BeforeSearchStmt(" ".join(lst)), [])
except SearchParseError, e:
if 'Expected a date' in e.args[0]:
try:
return self.parseAny(old_tokstream)
except SearchParseError, e2:
print "Another exception...", e2.args[0]
raise e
else:
raise
def parseAfter(self, tokStream):
try:
if len(tokStream) == 0:
raise SearchParseError('Expected token, got end of search')
lst = []
while len(tokStream):
tok = tokStream[0]
# ugly, special case times
if (':' in tok and
not TimeSearchStmt.dateRE[2].match(tok) and
not TimeSearchStmt.dateRE[3].match(tok)):
return (AfterSearchStmt(" ".join(lst)), tokStream)
lst.append(tok)
tokStream = tokStream[1:]
return (AfterSearchStmt(" ".join(lst)), [])
except SearchParseError, e:
if 'Expected a date' in e.args[0]:
try:
return self.parseAny(['after'] + tokStream)
except SearchParseError, e2:
print "Another exception...", e2.args[0]
raise e
else:
raise
dispatch = {'user': parseUser,
'notes': parseNotes,
'before': parseBefore,
'after': parseAfter,
'name': parseName,
'any': parseAny}
################################################################################
class TestSearch(unittest.TestCase):
def test1(self):
self.assertEquals((TimeSearchStmt('1 day ago').date -
TimeSearchStmt('2 days ago').date), TimeSearchStmt.oneDay)
def test2(self):
self.assertEquals((TimeSearchStmt('12 mar 2006').date -
TimeSearchStmt('11 mar 2006').date), TimeSearchStmt.oneDay)
def test3(self):
# This will fail if year flips during execution. Oh well :)
yr = datetime.datetime.today().year
self.assertEquals((TimeSearchStmt('12 mar').date -
TimeSearchStmt('12 mar %d' % yr).date), 0.0)
def test4(self):
# This will fail if year flips during execution. Oh well :)
yr = datetime.datetime.today().year
self.assertEquals((TimeSearchStmt('mar 12').date -
TimeSearchStmt('12 mar %d' % yr).date), 0.0)
def test5(self):
yr = datetime.datetime.today().year
self.assertEquals((TimeSearchStmt('03 15').date -
TimeSearchStmt('15 mar %d' % yr).date), 0.0)
def test6(self):
self.assertEquals((TimeSearchStmt('03/15/2006').date -
TimeSearchStmt('15 mar 2006').date), 0.0)
def test7(self):
self.assertEquals((TimeSearchStmt('1 day ago').date -
TimeSearchStmt('24 hours ago').date), 0.0)
def test8(self):
self.assertEquals((TimeSearchStmt('1 hour ago').date -
TimeSearchStmt('60 minutes ago').date), 0.0)
def test9(self):
self.assertEquals((TimeSearchStmt('1 minute ago').date -
TimeSearchStmt('60 seconds ago').date), 0.0)
def test10(self):
self.assertEquals((TimeSearchStmt('1 week ago').date -
TimeSearchStmt('7 days ago').date), 0.0)
def test11(self):
self.assertEquals((TimeSearchStmt('1 month ago').date -
TimeSearchStmt('31 days ago').date), 0.0)
def test12(self):
self.assertEquals(TimeSearchStmt('12 mar 2007 21:00:00').date,
TimeSearchStmt('21:00:00 12 mar 2007').date)
def test13(self):
# This will fail if year flips during execution. Oh well :)
yr = datetime.datetime.today().year
self.assertEquals(TimeSearchStmt('12 mar %d 21:00' % yr).date,
TimeSearchStmt('21:00:00 12 mar').date)
def test14(self):
self.assertEquals(TimeSearchStmt('13 apr 2006 21:00').date,
TimeSearchStmt('04/13/2006 21:00:00').date)
def test15(self):
import vistrails.core.vistrail
from vistrails.core.db.locator import XMLFileLocator
import vistrails.core.system
v = XMLFileLocator(vistrails.core.system.vistrails_root_directory() +
'/tests/resources/dummy.xml').load()
# FIXME: Add notes to this.
# self.assertTrue(NotesSearchStmt('mapper').match(v.actionMap[36]))
# self.assertFalse(NotesSearchStmt('-qt-block-indent').match(v.actionMap[36]))
# test16 and 17 now pass.
# def test16(self):
# self.assertRaises(SearchParseError, lambda *args: SearchCompiler('before:'))
# def test17(self):
# self.assertRaises(SearchParseError, lambda *args: SearchCompiler('after:yesterday before:lalala'))
def test18(self):
self.assertEquals(TimeSearchStmt(' 13 apr 2006 ').date,
TimeSearchStmt(' 13 apr 2006 ').date)
def test19(self):
self.assertEquals(SearchCompiler('before:13 apr 2006 12:34:56').searchStmt.matchList[0].date,
BeforeSearchStmt('13 apr 2006 12:34:56').date)
def test20(self):
self.assertEquals(SearchCompiler('after:yesterday').searchStmt.matchList[0].date,
SearchCompiler('before:yesterday').searchStmt.matchList[0].date)
def test21(self):
self.assertEquals(SearchCompiler('after:today').searchStmt.matchList[0].date,
SearchCompiler('before:today').searchStmt.matchList[0].date)
def test22(self):
self.assertEquals(SearchCompiler('before:today').searchStmt.matchList[0].date,
SearchCompiler('before:this day').searchStmt.matchList[0].date)
def test23(self):
t = time.localtime()
import vistrails.core.utils
inv = vistrails.core.utils.invert(TimeSearchStmt.months)
m = inv[t[1]]
self.assertEquals(SearchCompiler('after:%s %s %s' % (t[0], m, t[2])).searchStmt.matchList[0].date,
SearchCompiler('after:today').searchStmt.matchList[0].date)
def test24(self):
# Test compiling these searches
SearchCompiler('before')
SearchCompiler('after')
if __name__ == '__main__':
unittest.main()
|
|
'''tzinfo timezone information for Europe/Ljubljana.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Ljubljana(DstTzInfo):
'''Europe/Ljubljana timezone definition. See datetime.tzinfo for details'''
zone = 'Europe/Ljubljana'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1941,4,18,22,0,0),
d(1942,11,2,1,0,0),
d(1943,3,29,1,0,0),
d(1943,10,4,1,0,0),
d(1944,4,3,1,0,0),
d(1944,10,2,1,0,0),
d(1945,5,8,1,0,0),
d(1945,9,16,1,0,0),
d(1982,11,26,23,0,0),
d(1983,3,27,1,0,0),
d(1983,9,25,1,0,0),
d(1984,3,25,1,0,0),
d(1984,9,30,1,0,0),
d(1985,3,31,1,0,0),
d(1985,9,29,1,0,0),
d(1986,3,30,1,0,0),
d(1986,9,28,1,0,0),
d(1987,3,29,1,0,0),
d(1987,9,27,1,0,0),
d(1988,3,27,1,0,0),
d(1988,9,25,1,0,0),
d(1989,3,26,1,0,0),
d(1989,9,24,1,0,0),
d(1990,3,25,1,0,0),
d(1990,9,30,1,0,0),
d(1991,3,31,1,0,0),
d(1991,9,29,1,0,0),
d(1992,3,29,1,0,0),
d(1992,9,27,1,0,0),
d(1993,3,28,1,0,0),
d(1993,9,26,1,0,0),
d(1994,3,27,1,0,0),
d(1994,9,25,1,0,0),
d(1995,3,26,1,0,0),
d(1995,9,24,1,0,0),
d(1996,3,31,1,0,0),
d(1996,10,27,1,0,0),
d(1997,3,30,1,0,0),
d(1997,10,26,1,0,0),
d(1998,3,29,1,0,0),
d(1998,10,25,1,0,0),
d(1999,3,28,1,0,0),
d(1999,10,31,1,0,0),
d(2000,3,26,1,0,0),
d(2000,10,29,1,0,0),
d(2001,3,25,1,0,0),
d(2001,10,28,1,0,0),
d(2002,3,31,1,0,0),
d(2002,10,27,1,0,0),
d(2003,3,30,1,0,0),
d(2003,10,26,1,0,0),
d(2004,3,28,1,0,0),
d(2004,10,31,1,0,0),
d(2005,3,27,1,0,0),
d(2005,10,30,1,0,0),
d(2006,3,26,1,0,0),
d(2006,10,29,1,0,0),
d(2007,3,25,1,0,0),
d(2007,10,28,1,0,0),
d(2008,3,30,1,0,0),
d(2008,10,26,1,0,0),
d(2009,3,29,1,0,0),
d(2009,10,25,1,0,0),
d(2010,3,28,1,0,0),
d(2010,10,31,1,0,0),
d(2011,3,27,1,0,0),
d(2011,10,30,1,0,0),
d(2012,3,25,1,0,0),
d(2012,10,28,1,0,0),
d(2013,3,31,1,0,0),
d(2013,10,27,1,0,0),
d(2014,3,30,1,0,0),
d(2014,10,26,1,0,0),
d(2015,3,29,1,0,0),
d(2015,10,25,1,0,0),
d(2016,3,27,1,0,0),
d(2016,10,30,1,0,0),
d(2017,3,26,1,0,0),
d(2017,10,29,1,0,0),
d(2018,3,25,1,0,0),
d(2018,10,28,1,0,0),
d(2019,3,31,1,0,0),
d(2019,10,27,1,0,0),
d(2020,3,29,1,0,0),
d(2020,10,25,1,0,0),
d(2021,3,28,1,0,0),
d(2021,10,31,1,0,0),
d(2022,3,27,1,0,0),
d(2022,10,30,1,0,0),
d(2023,3,26,1,0,0),
d(2023,10,29,1,0,0),
d(2024,3,31,1,0,0),
d(2024,10,27,1,0,0),
d(2025,3,30,1,0,0),
d(2025,10,26,1,0,0),
d(2026,3,29,1,0,0),
d(2026,10,25,1,0,0),
d(2027,3,28,1,0,0),
d(2027,10,31,1,0,0),
d(2028,3,26,1,0,0),
d(2028,10,29,1,0,0),
d(2029,3,25,1,0,0),
d(2029,10,28,1,0,0),
d(2030,3,31,1,0,0),
d(2030,10,27,1,0,0),
d(2031,3,30,1,0,0),
d(2031,10,26,1,0,0),
d(2032,3,28,1,0,0),
d(2032,10,31,1,0,0),
d(2033,3,27,1,0,0),
d(2033,10,30,1,0,0),
d(2034,3,26,1,0,0),
d(2034,10,29,1,0,0),
d(2035,3,25,1,0,0),
d(2035,10,28,1,0,0),
d(2036,3,30,1,0,0),
d(2036,10,26,1,0,0),
d(2037,3,29,1,0,0),
d(2037,10,25,1,0,0),
]
_transition_info = [
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
]
Ljubljana = Ljubljana()
|
|
""" The CLI entry point for exporting projects from the mbed tools to any of the
supported IDEs or project structures.
"""
import sys
from os.path import join, abspath, dirname, exists, basename
ROOT = abspath(join(dirname(__file__), ".."))
sys.path.insert(0, ROOT)
from shutil import move, rmtree
from argparse import ArgumentParser
from os.path import normpath, realpath
from tools.paths import EXPORT_DIR, MBED_HAL, MBED_LIBRARIES, MBED_TARGETS_PATH
from tools.settings import BUILD_DIR
from tools.export import EXPORTERS, mcu_ide_matrix, mcu_ide_list, export_project, get_exporter_toolchain
from tools.tests import TESTS, TEST_MAP
from tools.tests import test_known, test_name_known, Test
from tools.targets import TARGET_NAMES
from tools.utils import argparse_filestring_type, argparse_profile_filestring_type, argparse_many, args_error
from tools.utils import argparse_force_lowercase_type
from tools.utils import argparse_force_uppercase_type
from tools.utils import print_large_string
from tools.options import extract_profile, list_profiles, extract_mcus
def setup_project(ide, target, program=None, source_dir=None, build=None, export_path=None):
"""Generate a name, if not provided, and find dependencies
Positional arguments:
ide - IDE or project structure that will soon be exported to
target - MCU that the project will build for
Keyword arguments:
program - the index of a test program
source_dir - the directory, or directories that contain all of the sources
build - a directory that will contain the result of the export
"""
# Some libraries have extra macros (called by exporter symbols) to we need
# to pass them to maintain compilation macros integrity between compiled
# library and header files we might use with it
if source_dir:
# --source is used to generate IDE files to toolchain directly
# in the source tree and doesn't generate zip file
project_dir = export_path or source_dir[0]
if program:
project_name = TESTS[program]
else:
project_name = basename(normpath(realpath(source_dir[0])))
src_paths = source_dir
lib_paths = None
else:
test = Test(program)
if not build:
# Substitute the mbed library builds with their sources
if MBED_LIBRARIES in test.dependencies:
test.dependencies.remove(MBED_LIBRARIES)
test.dependencies.append(MBED_HAL)
test.dependencies.append(MBED_TARGETS_PATH)
src_paths = [test.source_dir]
lib_paths = test.dependencies
project_name = "_".join([test.id, ide, target])
project_dir = join(EXPORT_DIR, project_name)
return project_dir, project_name, src_paths, lib_paths
def export(target, ide, build=None, src=None, macros=None, project_id=None,
zip_proj=False, build_profile=None, export_path=None, silent=False,
app_config=None):
"""Do an export of a project.
Positional arguments:
target - MCU that the project will compile for
ide - the IDE or project structure to export to
Keyword arguments:
build - to use the compiled mbed libraries or not
src - directory or directories that contain the source to export
macros - extra macros to add to the project
project_id - the name of the project
clean - start from a clean state before exporting
zip_proj - create a zip file or not
Returns an object of type Exporter (tools/exports/exporters.py)
"""
project_dir, name, src, lib = setup_project(ide, target, program=project_id,
source_dir=src, build=build, export_path=export_path)
zip_name = name+".zip" if zip_proj else None
return export_project(src, project_dir, target, ide, name=name,
macros=macros, libraries_paths=lib, zip_proj=zip_name,
build_profile=build_profile, silent=silent,
app_config=app_config)
def main():
"""Entry point"""
# Parse Options
parser = ArgumentParser()
targetnames = TARGET_NAMES
targetnames.sort()
toolchainlist = EXPORTERS.keys()
toolchainlist.sort()
parser.add_argument("-m", "--mcu",
metavar="MCU",
type=str.upper,
help="generate project for the given MCU ({})".format(
', '.join(targetnames)))
parser.add_argument("-i",
dest="ide",
type=argparse_force_lowercase_type(
toolchainlist, "toolchain"),
help="The target IDE: %s"% str(toolchainlist))
parser.add_argument("-c", "--clean",
action="store_true",
default=False,
help="clean the export directory")
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument(
"-p",
type=test_known,
dest="program",
help="The index of the desired test program: [0-%s]"% (len(TESTS)-1))
group.add_argument("-n",
type=test_name_known,
dest="program",
help="The name of the desired test program")
parser.add_argument("-b",
dest="build",
default=False,
action="store_true",
help="use the mbed library build, instead of the sources")
group.add_argument("-L", "--list-tests",
action="store_true",
dest="list_tests",
default=False,
help="list available programs in order and exit")
group.add_argument("-S", "--list-matrix",
dest="supported_ides",
default=False,
const="matrix",
choices=["matrix", "ides"],
nargs="?",
help="displays supported matrix of MCUs and IDEs")
parser.add_argument("-E",
action="store_true",
dest="supported_ides_html",
default=False,
help="writes tools/export/README.md")
parser.add_argument("--source",
action="append",
type=argparse_filestring_type,
dest="source_dir",
default=[],
help="The source (input) directory")
parser.add_argument("-D",
action="append",
dest="macros",
help="Add a macro definition")
parser.add_argument("--profile", dest="profile", action="append",
type=argparse_profile_filestring_type,
help="Build profile to use. Can be either path to json" \
"file or one of the default one ({})".format(", ".join(list_profiles())),
default=[])
parser.add_argument("--update-packs",
dest="update_packs",
action="store_true",
default=False)
parser.add_argument("--app-config",
dest="app_config",
default=None)
options = parser.parse_args()
# Print available tests in order and exit
if options.list_tests is True:
print '\n'.join([str(test) for test in sorted(TEST_MAP.values())])
sys.exit()
# Only prints matrix of supported IDEs
if options.supported_ides:
if options.supported_ides == "matrix":
print_large_string(mcu_ide_matrix())
elif options.supported_ides == "ides":
print mcu_ide_list()
exit(0)
# Only prints matrix of supported IDEs
if options.supported_ides_html:
html = mcu_ide_matrix(verbose_html=True)
try:
with open("./export/README.md", "w") as readme:
readme.write("Exporter IDE/Platform Support\n")
readme.write("-----------------------------------\n")
readme.write("\n")
readme.write(html)
except IOError as exc:
print "I/O error({0}): {1}".format(exc.errno, exc.strerror)
except:
print "Unexpected error:", sys.exc_info()[0]
raise
exit(0)
if options.update_packs:
from tools.arm_pack_manager import Cache
cache = Cache(True, True)
cache.cache_descriptors()
# Target
if not options.mcu:
args_error(parser, "argument -m/--mcu is required")
# Toolchain
if not options.ide:
args_error(parser, "argument -i is required")
# Clean Export Directory
if options.clean:
if exists(EXPORT_DIR):
rmtree(EXPORT_DIR)
zip_proj = not bool(options.source_dir)
if (options.program is None) and (not options.source_dir):
args_error(parser, "one of -p, -n, or --source is required")
exporter, toolchain_name = get_exporter_toolchain(options.ide)
mcu = extract_mcus(parser, options)[0]
if not exporter.is_target_supported(mcu):
args_error(parser, "%s not supported by %s"%(mcu,options.ide))
profile = extract_profile(parser, options, toolchain_name, fallback="debug")
if options.clean:
rmtree(BUILD_DIR)
export(mcu, options.ide, build=options.build,
src=options.source_dir, macros=options.macros,
project_id=options.program, zip_proj=zip_proj,
build_profile=profile, app_config=options.app_config)
if __name__ == "__main__":
main()
|
|
#----------------------------------------------------------------------
# Copyright (c) 2010-2013 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
'''
URN creation and verification utilities.
'''
import re
from ext.sfa.util.xrn import Xrn # for URN_PREFIX
class URN(object):
"""
A class that creates and extracts values from URNs
URN Convention:
urn:publicid:IDN+<authority>+<type>+<name>
Authority, type, and name are public ids transcribed into URN format
By convention a CH's name should be "ch" and an AM's should be "am"
The authority of the CH should be the prefix for all of your AM and user authorities
For instance: CH authority = "gcf//gpo//bbn", AM authority = "gcf//gpo/bbn//am1", user authority = "gcf//gpo//bbn"
EXAMPLES:
ch_urn = URN("gcf//gpo//bbn", "authority", "sa").urn_string() for a clearinghouse URN
am1_urn = URN("gcf//gpo//bbn//site1", "authority", "am").urn_string() for an AM at this authority
Looks like urn:publicid:IDN+gcf:gpo:bbn:site1+authority+am
am2_urn = URN("gcf//gpo//bbn//site2", "authority", "am").urn_string() for a second AM at this authority
Looks like urn:publicid:IDN+gcf:gpo:bbn:site2+authority+am
user_urn = URN("gcf//gpo//bbn", "user", "jane").urn_string() for a user made by the clearinghouse
Looks like urn:publicid:IDN+gcf:gpo:bbn+user+jane
slice_urn = URN("gcf//gpo//bbn", "slice", "my-great-experiment").urn_string()
Looks like urn:publicid:IDN+gcf:gpo:bbn+slice+my-great-experiment
resource_at_am1_urn = URN("gcf//gpo//bbn/site1", "node", "LinuxBox23").urn_string() for Linux Machine 23 managed by AM1 (at site 1)
Looks like urn:publicid:IDN+gcf:gpo:bbn:site1+node+LinuxBox23
"""
def __init__(self, authority=None, type=None, name=None, urn=None):
if not urn is None:
if not is_valid_urn(urn):
raise ValueError("Invalid URN %s" % urn)
spl = urn.split('+')
self.authority = urn_to_string_format(spl[1])
self.type = urn_to_string_format(spl[2])
self.name = urn_to_string_format('+'.join(spl[3:]))
self.urn = urn
else:
if not authority or not type or not name:
raise ValueError("Must provide either all of authority, type, and name, or a urn must be provided")
for i in [authority, type, name]:
if i.strip() == '':
raise ValueError("Parameter to create_urn was empty string")
self.authority = authority
self.type = type
self.name = name
# FIXME: check these are valid more?
if not is_valid_urn_string(authority):
authority = string_to_urn_format(authority)
if not is_valid_urn_string(type):
type = string_to_urn_format(type)
if not is_valid_urn_string(name):
name = string_to_urn_format(name)
self.urn = '%s+%s+%s+%s' % (Xrn.URN_PREFIX, authority, type, name)
if not is_valid_urn(self.urn):
raise ValueError("Failed to create valid URN from args %s, %s, %s" % (self.authority, self.type, self.name))
def __str__(self):
return self.urn_string()
def urn_string(self):
return self.urn
def getAuthority(self):
'''Get the authority in un-escaped publicid format'''
return self.authority
def getType(self):
'''Get the URN type in un-escaped publicid format'''
return self.type
def getName(self):
'''Get the name in un-escaped publicid format'''
return self.name
# Translate publicids to URN format.
# The order of these rules matters
# because we want to catch things like double colons before we
# translate single colons. This is only a subset of the rules.
# See the GENI Wiki: GAPI_Identifiers
# See http://www.faqs.org/rfcs/rfc3151.html
publicid_xforms = [('%', '%25'),
(';', '%3B'),
('+', '%2B'),
(' ', '+' ), # note you must first collapse WS
('#', '%23'),
('?', '%3F'),
("'", '%27'),
('::', ';' ),
(':', '%3A'),
('//', ':' ),
('/', '%2F')]
# FIXME: See sfa/util/xrn/Xrn.URN_PREFIX which is ...:IDN
publicid_urn_prefix = 'urn:publicid:'
def nameFromURN(instr):
'''Get the name from the given URN, or empty if not a valid URN'''
if not instr:
return ""
try:
urn = URN(urn=instr)
return urn.getName()
except Exception, e:
# print 'exception parsing urn: %s' % e
return ""
# validate urn
# Note that this is not sufficient but it is necessary
def is_valid_urn_string(instr):
'''Could this string be part of a URN'''
if instr is None or not isinstance(instr, str):
return False
#No whitespace
# no # or ? or /
if re.search("[\s|\?\/\#]", instr) is None:
return True
return False
# Note that this is not sufficient but it is necessary
def is_valid_urn(inurn):
''' Check that this string is a valid URN'''
# FIXME: This should pull out the type and do the type specific
# checks that are currently below
return is_valid_urn_string(inurn) and inurn.startswith(publicid_urn_prefix)
def is_valid_urn_bytype(inurn, urntype, logger=None):
if not is_valid_urn(inurn):
return False
urnObj = URN(urn=inurn)
if not urntype:
urntype = ""
urntype = urntype.lower()
if not urnObj.getType().lower() == urntype:
if logger:
logger.warn("URN %s not of right type: %s, not %s", inurn, urnObj.getType().lower(), urntype)
return False
name = urnObj.getName()
if urntype == 'slice':
# Slice names are <=19 characters, only alphanumeric plus hyphen (no hyphen in first character): '^[a-zA-Z0-9][-a-zA-Z0-9]{0,18}$'
if len(name) > 19:
if logger:
logger.warn("URN %s too long. Slice names are max 19 characters", inurn)
return False
if not re.match("^[a-zA-Z0-9][-a-zA-Z0-9]{0,18}$", name):
if logger:
logger.warn("Slice names may only be alphanumeric plus hyphen (no leading hyphen): %s", name)
return False
elif urntype == 'sliver':
# May use only alphanumeric characters plus hyphen
if not re.match("^[-a-zA-Z0-9]+$", name):
if logger:
logger.warn("Sliver names may only be alphanumeric plus hyphen: %s", name)
return False
elif urntype == 'user':
# Usernames should begin with a letter and be alphanumeric or underscores; no hyphen or '.': ('^[a-zA-Z][\w]{0,7}$').
# Usernames are limited to 8 characters.
if len(name) > 8:
if logger:
logger.warn("URN %s too long. User names are max 8 characters", inurn)
return False
if not re.match("^[a-zA-Z][\w]{0,7}$", name):
if logger:
logger.warn("User names may only be alphanumeric plus underscore, beginning with a letter: %s", name)
return False
return True
def urn_to_publicid(urn):
'''Convert a URN like urn:publicid:... to a publicid'''
# Remove prefix
if urn is None or not is_valid_urn(urn):
# Erroneous urn for conversion
raise ValueError('Invalid urn: ' + urn)
publicid = urn[len(publicid_urn_prefix):]
# return the un-escaped string
return urn_to_string_format(publicid)
def publicid_to_urn(id):
'''Convert a publicid to a urn like urn:publicid:.....'''
# prefix with 'urn:publicid:' and escape chars
return publicid_urn_prefix + string_to_urn_format(id)
def string_to_urn_format(instr):
'''Make a string URN compatible, collapsing whitespace and escaping chars'''
if instr is None or instr.strip() == '':
raise ValueError("Empty string cant be in a URN")
# Collapse whitespace
instr = ' '.join(instr.strip().split())
for a, b in publicid_xforms:
instr = instr.replace(a, b)
return instr
def urn_to_string_format(urnstr):
'''Turn a part of a URN into publicid format, undoing transforms'''
if urnstr is None or urnstr.strip() == '':
return urnstr
publicid = urnstr
# Validate it is reasonable URN string?
for a, b in reversed(publicid_xforms):
publicid = publicid.replace(b, a)
return publicid
|
|
#Embedded file name: ACEStream\Core\APIImplementation\DownloadRuntimeConfig.pyo
import sys
import binascii
from ACEStream.Core.simpledefs import *
from ACEStream.Core.DownloadConfig import DownloadConfigInterface
from ACEStream.Core.exceptions import OperationNotPossibleAtRuntimeException
from ACEStream.Core.Utilities.logger import log, log_exc
DEBUG = False
class DownloadRuntimeConfig(DownloadConfigInterface):
def set_max_speed(self, direct, speed, auto_limit = False):
if DEBUG:
log('DownloadRuntimeConfig::set_max_speed:', binascii.hexlify(self.get_hash()), direct, speed, auto_limit)
self.dllock.acquire()
try:
if self.sd is not None:
set_max_speed_lambda = lambda : self.sd is not None and self.sd.set_max_speed(direct, speed, auto_limit)
self.session.lm.rawserver.add_task(set_max_speed_lambda, 0)
DownloadConfigInterface.set_max_speed(self, direct, speed, auto_limit)
finally:
self.dllock.release()
def get_max_speed(self, direct):
self.dllock.acquire()
try:
return DownloadConfigInterface.get_max_speed(self, direct)
finally:
self.dllock.release()
def set_player_buffer_time(self, value):
if DEBUG:
log('DownloadRuntimeConfig::set_player_buffer_time', binascii.hexlify(self.get_hash()), value)
self.dllock.acquire()
try:
if self.sd is not None:
set_player_buffer_time_lambda = lambda : self.sd is not None and self.sd.set_player_buffer_time(value)
self.session.lm.rawserver.add_task(set_player_buffer_time_lambda, 0)
if self.dd is not None:
set_player_buffer_time_lambda = lambda : self.dd is not None and self.dd.set_player_buffer_time(value)
self.session.lm.rawserver.add_task(set_player_buffer_time_lambda, 0)
DownloadConfigInterface.set_player_buffer_time(self, value)
finally:
self.dllock.release()
def get_player_buffer_time(self):
self.dllock.acquire()
try:
return DownloadConfigInterface.get_player_buffer_time(self)
finally:
self.dllock.release()
def set_live_buffer_time(self, value):
if DEBUG:
log('DownloadRuntimeConfig::set_live_buffer_time', binascii.hexlify(self.get_hash()), value)
self.dllock.acquire()
try:
if self.sd is not None:
set_live_buffer_time_lambda = lambda : self.sd is not None and self.sd.set_live_buffer_time(value)
self.session.lm.rawserver.add_task(set_live_buffer_time_lambda, 0)
if self.dd is not None:
set_live_buffer_time_lambda = lambda : self.dd is not None and self.dd.set_live_buffer_time(value)
self.session.lm.rawserver.add_task(set_live_buffer_time_lambda, 0)
DownloadConfigInterface.set_live_buffer_time(self, value)
finally:
self.dllock.release()
def get_live_buffer_time(self):
self.dllock.acquire()
try:
return DownloadConfigInterface.get_live_buffer_time(self)
finally:
self.dllock.release()
def set_wait_sufficient_speed(self, value):
if DEBUG:
log('DownloadRuntimeConfig::set_wait_sufficient_speed:', binascii.hexlify(self.get_hash()), value)
self.dllock.acquire()
try:
if self.sd is not None:
set_wait_sufficient_speed_lambda = lambda : self.sd is not None and self.sd.set_wait_sufficient_speed(value)
self.session.lm.rawserver.add_task(set_wait_sufficient_speed_lambda, 0)
if self.dd is not None:
set_wait_sufficient_speed_lambda = lambda : self.dd is not None and self.dd.set_wait_sufficient_speed(value)
self.session.lm.rawserver.add_task(set_wait_sufficient_speed_lambda, 0)
DownloadConfigInterface.set_wait_sufficient_speed(self, value)
finally:
self.dllock.release()
def get_wait_sufficient_speed(self):
self.dllock.acquire()
try:
return DownloadConfigInterface.get_wait_sufficient_speed(self)
finally:
self.dllock.release()
def get_auto_download_limit(self):
self.dllock.acquire()
try:
return DownloadConfigInterface.get_auto_download_limit(self)
finally:
self.dllock.release()
def get_http_support(self):
self.dllock.acquire()
try:
return DownloadConfigInterface.get_http_support(self)
finally:
self.dllock.release()
def is_hidden(self):
self.dllock.acquire()
try:
return DownloadConfigInterface.is_hidden(self)
finally:
self.dllock.release()
def set_hidden(self, hidden):
self.dllock.acquire()
try:
DownloadConfigInterface.set_hidden(self, hidden)
finally:
self.dllock.release()
def get_extra(self, key, default = None):
self.dllock.acquire()
try:
return DownloadConfigInterface.get_extra(self, key, default)
finally:
self.dllock.release()
def set_extra(self, key, value):
self.dllock.acquire()
try:
DownloadConfigInterface.set_extra(self, key, value)
finally:
self.dllock.release()
def set_direct_download_url(self, value):
self.dllock.acquire()
try:
DownloadConfigInterface.set_direct_download_url(self, value)
finally:
self.dllock.release()
def set_download_finished_callback(self, value):
self.dllock.acquire()
try:
DownloadConfigInterface.set_download_finished_callback(self, value)
finally:
self.dllock.release()
def set_download_failed_callback(self, value):
self.dllock.acquire()
try:
DownloadConfigInterface.set_download_failed_callback(self, value)
finally:
self.dllock.release()
def set_predownload(self, value):
self.dllock.acquire()
try:
DownloadConfigInterface.set_predownload(self, value)
finally:
self.dllock.release()
def get_files_priority(self):
self.dllock.acquire()
try:
return DownloadConfigInterface.get_files_priority(self)
finally:
self.dllock.release()
def set_files_priority(self, priority_list):
self.dllock.acquire()
try:
DownloadConfigInterface.set_files_priority(self, priority_list)
finally:
self.dllock.release()
def set_dest_dir(self, path):
raise OperationNotPossibleAtRuntimeException()
def set_video_event_callback(self, usercallback, dlmode = DLMODE_VOD):
self.dllock.acquire()
try:
DownloadConfigInterface.set_video_event_callback(self, usercallback, dlmode=dlmode)
finally:
self.dllock.release()
def set_video_events(self, events):
self.dllock.acquire()
try:
DownloadConfigInterface.set_video_events(self, events)
finally:
self.dllock.release()
def set_mode(self, mode):
self.dllock.acquire()
try:
DownloadConfigInterface.set_mode(self, mode)
finally:
self.dllock.release()
def get_mode(self):
self.dllock.acquire()
try:
return DownloadConfigInterface.get_mode(self)
finally:
self.dllock.release()
def get_video_event_callback(self):
self.dllock.acquire()
try:
return DownloadConfigInterface.get_video_event_callback(self)
finally:
self.dllock.release()
def get_video_events(self):
self.dllock.acquire()
try:
return DownloadConfigInterface.get_video_events(self)
finally:
self.dllock.release()
def set_selected_files(self, files):
self.dllock.acquire()
try:
DownloadConfigInterface.set_selected_files(self, files)
self.set_filepieceranges(self.tdef.get_metainfo())
finally:
self.dllock.release()
def set_extra_files(self, extra_files):
self.dllock.acquire()
try:
DownloadConfigInterface.set_extra_files(self, extra_files)
finally:
self.dllock.release()
def get_selected_files(self):
self.dllock.acquire()
try:
return DownloadConfigInterface.get_selected_files(self)
finally:
self.dllock.release()
def set_max_conns_to_initiate(self, nconns):
self.dllock.acquire()
try:
if self.sd is not None:
set_max_conns2init_lambda = lambda : self.sd is not None and self.sd.set_max_conns_to_initiate(nconns, None)
self.session.lm.rawserver.add_task(set_max_conns2init_lambda, 0.0)
DownloadConfigInterface.set_max_conns_to_initiate(self, nconns)
finally:
self.dllock.release()
def get_max_conns_to_initiate(self):
self.dllock.acquire()
try:
return DownloadConfigInterface.get_max_conns_to_initiate(self)
finally:
self.dllock.release()
def set_max_conns(self, nconns):
self.dllock.acquire()
try:
if self.sd is not None:
set_max_conns_lambda = lambda : self.sd is not None and self.sd.set_max_conns(nconns, None)
self.session.lm.rawserver.add_task(set_max_conns_lambda, 0.0)
DownloadConfigInterface.set_max_conns(self, nconns)
finally:
self.dllock.release()
def get_max_conns(self):
self.dllock.acquire()
try:
return DownloadConfigInterface.get_max_conns(self)
finally:
self.dllock.release()
def set_max_uploads(self, value):
self.dllock.acquire()
try:
if self.sd is not None:
set_max_uploads_lambda = lambda : self.sd is not None and self.sd.set_max_uploads(value)
self.session.lm.rawserver.add_task(set_max_uploads_lambda, 0.0)
DownloadConfigInterface.set_max_uploads(self, value)
finally:
self.dllock.release()
def get_max_uploads(self):
self.dllock.acquire()
try:
return DownloadConfigInterface.get_max_uploads(self)
finally:
self.dllock.release()
def set_keepalive_interval(self, value):
raise OperationNotPossibleAtRuntimeException()
def get_keepalive_interval(self):
self.dllock.acquire()
try:
return DownloadConfigInterface.get_keepalive_interval(self)
finally:
self.dllock.release()
def set_download_slice_size(self, value):
raise OperationNotPossibleAtRuntimeException()
def get_download_slice_size(self):
self.dllock.acquire()
try:
return DownloadConfigInterface.get_download_slice_size(self)
finally:
self.dllock.release()
def set_upload_unit_size(self, value):
raise OperationNotPossibleAtRuntimeException()
def get_upload_unit_size(self):
self.dllock.acquire()
try:
return DownloadConfigInterface.get_upload_unit_size(self)
finally:
self.dllock.release()
def set_request_backlog(self, value):
raise OperationNotPossibleAtRuntimeException()
def get_request_backlog(self):
self.dllock.acquire()
try:
return DownloadConfigInterface.get_request_backlog(self)
finally:
self.dllock.release()
def set_max_message_length(self, value):
raise OperationNotPossibleAtRuntimeException()
def get_max_message_length(self):
self.dllock.acquire()
try:
return DownloadConfigInterface.get_max_message_length(self)
finally:
self.dllock.release()
def set_max_slice_length(self, value):
raise OperationNotPossibleAtRuntimeException()
def get_max_slice_length(self):
self.dllock.acquire()
try:
return DownloadConfigInterface.get_max_slice_length(self)
finally:
self.dllock.release()
def set_max_rate_period(self, value):
raise OperationNotPossibleAtRuntimeException()
def get_max_rate_period(self):
self.dllock.acquire()
try:
return DownloadConfigInterface.get_max_rate_period(self)
finally:
self.dllock.release()
def set_upload_rate_fudge(self, value):
raise OperationNotPossibleAtRuntimeException()
def get_upload_rate_fudge(self):
self.dllock.acquire()
try:
return DownloadConfigInterface.get_upload_rate_fudge(self)
finally:
self.dllock.release()
def set_tcp_ack_fudge(self, value):
raise OperationNotPossibleAtRuntimeException()
def get_tcp_ack_fudge(self):
self.dllock.acquire()
try:
return DownloadConfigInterface.get_tcp_ack_fudge(self)
finally:
self.dllock.release()
def set_rerequest_interval(self, value):
raise OperationNotPossibleAtRuntimeException()
def get_rerequest_interval(self):
self.dllock.acquire()
try:
return DownloadConfigInterface.get_rerequest_interval(self)
finally:
self.dllock.release()
def set_min_peers(self, value):
raise OperationNotPossibleAtRuntimeException()
def get_min_peers(self):
self.dllock.acquire()
try:
return DownloadConfigInterface.get_min_peers(self)
finally:
self.dllock.release()
def set_http_timeout(self, value):
raise OperationNotPossibleAtRuntimeException()
def get_http_timeout(self):
self.dllock.acquire()
try:
return DownloadConfigInterface.get_http_timeout(self)
finally:
self.dllock.release()
def set_check_hashes(self, value):
raise OperationNotPossibleAtRuntimeException()
def get_check_hashes(self):
self.dllock.acquire()
try:
return DownloadConfigInterface.get_check_hashes(self)
finally:
self.dllock.release()
def set_alloc_type(self, value):
raise OperationNotPossibleAtRuntimeException()
def get_alloc_type(self):
self.dllock.acquire()
try:
return DownloadConfigInterface.get_alloc_type(self)
finally:
self.dllock.release()
def set_alloc_rate(self, value):
raise OperationNotPossibleAtRuntimeException()
def get_alloc_rate(self):
self.dllock.acquire()
try:
return DownloadConfigInterface.get_alloc_rate(self)
finally:
self.dllock.release()
def set_buffer_reads(self, value):
raise OperationNotPossibleAtRuntimeException()
def get_buffer_reads(self):
self.dllock.acquire()
try:
return DownloadConfigInterface.get_buffer_reads(self)
finally:
self.dllock.release()
def set_write_buffer_size(self, value):
raise OperationNotPossibleAtRuntimeException()
def get_write_buffer_size(self):
self.dllock.acquire()
try:
return DownloadConfigInterface.get_write_buffer_size(self)
finally:
self.dllock.release()
def set_breakup_seed_bitfield(self, value):
raise OperationNotPossibleAtRuntimeException()
def get_breakup_seed_bitfield(self):
self.dllock.acquire()
try:
return DownloadConfigInterface.get_breakup_seed_bitfield(self)
finally:
self.dllock.release()
def set_snub_time(self, value):
raise OperationNotPossibleAtRuntimeException()
def get_snub_time(self):
self.dllock.acquire()
try:
return DownloadConfigInterface.get_snub_time(self)
finally:
self.dllock.release()
def set_rarest_first_cutoff(self, value):
raise OperationNotPossibleAtRuntimeException()
def get_rarest_first_cutoff(self):
self.dllock.acquire()
try:
return DownloadConfigInterface.get_rarest_first_cutoff(self)
finally:
self.dllock.release()
def set_rarest_first_priority_cutoff(self, value):
raise OperationNotPossibleAtRuntimeException()
def get_rarest_first_priority_cutoff(self):
self.dllock.acquire()
try:
return DownloadConfigInterface.get_rarest_first_priority_cutoff(self)
finally:
self.dllock.release()
def set_min_uploads(self, value):
raise OperationNotPossibleAtRuntimeException()
def get_min_uploads(self):
self.dllock.acquire()
try:
return DownloadConfigInterface.get_min_uploads(self)
finally:
self.dllock.release()
def set_max_files_open(self, value):
raise OperationNotPossibleAtRuntimeException()
def get_max_files_open(self):
self.dllock.acquire()
try:
return DownloadConfigInterface.get_max_files_open(self)
finally:
self.dllock.release()
def set_round_robin_period(self, value):
raise OperationNotPossibleAtRuntimeException()
def get_round_robin_period(self):
self.dllock.acquire()
try:
return DownloadConfigInterface.get_round_robin_period(self)
finally:
self.dllock.release()
def set_super_seeder(self, value):
raise OperationNotPossibleAtRuntimeException()
def get_super_seeder(self):
self.dllock.acquire()
try:
return DownloadConfigInterface.get_super_seeder(self)
finally:
self.dllock.release()
def set_security(self, value):
raise OperationNotPossibleAtRuntimeException()
def get_security(self):
self.dllock.acquire()
try:
return DownloadConfigInterface.get_security(self)
finally:
self.dllock.release()
def set_auto_kick(self, value):
raise OperationNotPossibleAtRuntimeException()
def get_auto_kick(self):
self.dllock.acquire()
try:
return DownloadConfigInterface.get_auto_kick(self)
finally:
self.dllock.release()
def set_double_check_writes(self, value):
raise OperationNotPossibleAtRuntimeException()
def get_double_check_writes(self):
self.dllock.acquire()
try:
return DownloadConfigInterface.get_double_check_writes(self)
finally:
self.dllock.release()
def set_triple_check_writes(self, value):
raise OperationNotPossibleAtRuntimeException()
def get_triple_check_writes(self):
self.dllock.acquire()
try:
return DownloadConfigInterface.get_triple_check_writes(self)
finally:
self.dllock.release()
def set_lock_files(self, value):
raise OperationNotPossibleAtRuntimeException()
def get_lock_files(self):
self.dllock.acquire()
try:
return DownloadConfigInterface.get_lock_files(self)
finally:
self.dllock.release()
def set_lock_while_reading(self, value):
raise OperationNotPossibleAtRuntimeException()
def get_lock_while_reading(self):
self.dllock.acquire()
try:
return DownloadConfigInterface.get_lock_while_reading(self)
finally:
self.dllock.release()
def set_auto_flush(self, value):
raise OperationNotPossibleAtRuntimeException()
def get_auto_flush(self):
self.dllock.acquire()
try:
return DownloadConfigInterface.get_auto_flush(self)
finally:
self.dllock.release()
def set_exclude_ips(self, value):
raise OperationNotPossibleAtRuntimeException()
def get_exclude_ips(self):
self.dllock.acquire()
try:
return DownloadConfigInterface.get_exclude_ips(self)
finally:
self.dllock.release()
def set_ut_pex_max_addrs_from_peer(self, value):
raise OperationNotPossibleAtRuntimeException()
def get_ut_pex_max_addrs_from_peer(self):
self.dllock.acquire()
try:
return DownloadConfigInterface.get_ut_pex_max_addrs_from_peer(self)
finally:
self.dllock.release()
def set_poa(self, poa):
self.dllock.acquire()
try:
DownloadConfigInterface.set_poa(self, poa)
finally:
self.dllock.release()
def get_poa(self, poa):
self.dllock.acquire()
try:
return DownloadConfigInterface.get_poa(self)
finally:
self.dllock.release()
def set_same_nat_try_internal(self, value):
raise OperationNotPossibleAtRuntimeException()
def get_same_nat_try_internal(self):
self.dllock.acquire()
try:
return DownloadConfigInterface.get_same_nat_try_internal(self)
finally:
self.dllock.release()
def set_unchoke_bias_for_internal(self, value):
raise OperationNotPossibleAtRuntimeException()
def get_unchoke_bias_for_internal(self):
self.dllock.acquire()
try:
return DownloadConfigInterface.get_unchoke_bias_for_internal(self)
finally:
self.dllock.release()
def set_proxy_mode(self, value):
self.dllock.acquire()
try:
DownloadConfigInterface.set_proxy_mode(self, value)
finally:
self.dllock.release()
def get_proxy_mode(self):
self.dllock.acquire()
try:
return DownloadConfigInterface.get_proxy_mode(self)
finally:
self.dllock.release()
def set_no_helpers(self, value):
self.dllock.acquire()
try:
return DownloadConfigInterface.set_no_helpers(self, value)
finally:
self.dllock.release()
def get_no_helpers(self):
self.dllock.acquire()
try:
return DownloadConfigInterface.get_no_helpers(self)
finally:
self.dllock.release()
|
|
#!/usr/bin/env python
# coding=utf-8
# Author: YAO Matrix (yaoweifeng0301@126.com)
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import sys
import time
import datetime
import logging
import numpy as np
import tensorflow as tf
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
from data_io import load_minst_data
from metrics import precision, error_rate
VALIDATION_SIZE = 5000 # Size of the validation set.
SEED = 66478 # Set to None for random seed.
BATCH_SIZE = 64
NUM_EPOCHS = 26
EVAL_BATCH_SIZE = 64
EVAL_FREQUENCY = 100 # Number of steps between evaluations.
module_dir = os.path.dirname(os.path.abspath(__file__))
module_name = os.path.basename(__file__).split('.')[0]
log_path = os.path.join(module_dir, os.path.pardir, os.path.pardir, 'logs', module_name + '_' + datetime.date.today().strftime('%Y%m%d') + '.log')
logger = logging.getLogger(module_name)
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler(log_path)
ch = logging.StreamHandler()
fh.setLevel(logging.DEBUG)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s][%(name)s][%(levelname)s]: %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
FLAGS = tf.app.flags.FLAGS
def main(argv = None):
# load mnist into numpy arrays.
train_data, train_labels = load_minst_data(t = 'train')
test_data, test_labels = load_minst_data(t = 'test')
height = train_data.shape[1]
width = train_data.shape[2]
channel = (train_data.shape[3] if train_data.ndim > 3 else 1)
label_max = np.amax(train_labels)
label_min = np.amin(train_labels)
num_labels = label_max - label_min + 1
# Generate a validation set.
validation_data = train_data[:VALIDATION_SIZE, ...]
validation_labels = train_labels[:VALIDATION_SIZE]
train_data = train_data[VALIDATION_SIZE:, ...]
train_labels = train_labels[VALIDATION_SIZE:]
num_epochs = NUM_EPOCHS
train_size = train_labels.shape[0]
# This is where training samples and labels are fed to the graph.
# These placeholder nodes will be fed a batch of training data at each
# training step using the {feed_dict} argument to the Run() call below.
train_data_node = tf.placeholder(
tf.float32,
shape = (BATCH_SIZE, height, width, channel))
train_labels_node = tf.placeholder(tf.int64, shape = (BATCH_SIZE,))
eval_data = tf.placeholder(
tf.float32,
shape = (EVAL_BATCH_SIZE, height, width, channel))
# The variables below hold all the trainable weights. They are passed an
# initial value which will be assigned when we call:
# {tf.initialize_all_variables().run()}
conv1_weights = tf.Variable(
tf.truncated_normal([5, 5, channel, 32], # 5x5 filter, depth 32.
stddev = 0.1,
seed = SEED),
name = "conv1_weights")
conv1_biases = tf.Variable(tf.zeros([32]), name = "conv1_biases")
conv2_weights = tf.Variable(
tf.truncated_normal([5, 5, 32, 64],
stddev = 0.1,
seed = SEED),
name = "conv2_weights")
conv2_biases = tf.Variable(tf.constant(0.1, shape = [64]), name = "conv2_biases")
fc1_weights = tf.Variable( # fully connected, depth 512.
tf.truncated_normal(
[height // 4 * width // 4 * 64, 512],
stddev = 0.1,
seed = SEED),
name = "fc1_weights")
fc1_biases = tf.Variable(tf.constant(0.1, shape = [512]), name = "fc1_biases")
fc2_weights = tf.Variable(
tf.truncated_normal([512, num_labels],
stddev = 0.1,
seed = SEED),
name = "fc2_weights")
fc2_biases = tf.Variable(tf.constant(0.1, shape = [num_labels]), name = "fc2_biases")
# We will replicate the model structure for the training subgraph, as well
# as the evaluation subgraphs, while sharing the trainable parameters.
def lenet(data, train = False):
"""LeNet definition."""
# 2D convolution, with 'SAME' padding (i.e. the output feature map has
# the same size as the input). Note that {strides} is a 4D array whose
# shape matches the data layout: [n, h, w, c].
conv1 = tf.nn.conv2d(data,
conv1_weights,
strides = [1, 1, 1, 1],
padding = 'SAME')
# Bias and rectified linear non-linearity.
relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))
# Max pooling. The kernel size spec {ksize} also follows the layout of
# the data. Here we have a pooling window of 2, and a stride of 2.
pool1 = tf.nn.max_pool(relu1,
ksize = [1, 2, 2, 1],
strides = [1, 2, 2, 1],
padding = 'SAME')
conv2 = tf.nn.conv2d(pool1,
conv2_weights,
strides = [1, 1, 1, 1],
padding = 'SAME')
relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))
pool2 = tf.nn.max_pool(relu2,
ksize = [1, 2, 2, 1],
strides = [1, 2, 2, 1],
padding = 'SAME')
# Reshape the feature map cuboid into a 2D matrix to feed it to the
# fully connected layers.
pool_shape = pool2.get_shape().as_list()
reshape = tf.reshape(pool2,
[pool_shape[0], pool_shape[1] * pool_shape[2] * pool_shape[3]])
# Fully connected layer. Note that the '+' operation automatically
# broadcasts the biases.
fc1 = tf.nn.relu(tf.matmul(reshape, fc1_weights) + fc1_biases)
# Add a 50% dropout during training only. Dropout also scales
# activations such that no rescaling is needed at evaluation time.
if train:
fc1 = tf.nn.dropout(fc1, 0.5, seed = SEED)
return tf.matmul(fc1, fc2_weights) + fc2_biases
# Training computation: logits + cross-entropy loss.
logits = lenet(train_data_node, True)
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits, train_labels_node))
# L2 regularization for the fully connected parameters.
regularizers = (tf.nn.l2_loss(fc1_weights) + tf.nn.l2_loss(fc1_biases) +
tf.nn.l2_loss(fc2_weights) + tf.nn.l2_loss(fc2_biases))
# Add the regularization term to the loss.
loss += 5e-4 * regularizers
# Optimizer: set up a variable that's incremented once per batch and
# controls the learning rate decay.
batch = tf.Variable(0)
# Decay once per epoch, using an exponential schedule starting at 0.01.
learning_rate = tf.train.exponential_decay(
0.01, # Base learning rate.
batch * BATCH_SIZE, # Current index into the dataset.
train_size, # Decay step.
0.95, # Decay rate.
staircase = True)
# Use simple momentum for the optimization.
optimizer = tf.train.MomentumOptimizer(learning_rate,
0.9).minimize(loss,
global_step = batch)
# Predictions for the current training minibatch.
train_prediction = tf.nn.softmax(logits)
# Predictions for the test and validation, which we'll compute less often.
eval_prediction = tf.nn.softmax(lenet(eval_data))
# Small utility function to evaluate a dataset by feeding batches of data to
# {eval_data} and pulling the results from {eval_predictions}.
# Saves memory and enables this to run on smaller GPUs.
def eval_in_batches(data, sess):
"""Get all predictions for a dataset by running it in small batches."""
size = data.shape[0]
if size < EVAL_BATCH_SIZE:
raise ValueError("batch size for evals larger than dataset: %d" % size)
predictions = np.ndarray(shape = (size, num_labels), dtype = np.float32)
for begin in xrange(0, size, EVAL_BATCH_SIZE):
end = begin + EVAL_BATCH_SIZE
if end <= size:
predictions[begin:end, :] = sess.run(
eval_prediction,
feed_dict={eval_data: data[begin:end, ...]})
else:
batch_predictions = sess.run(
eval_prediction,
feed_dict={eval_data: data[-EVAL_BATCH_SIZE:, ...]})
predictions[begin:, :] = batch_predictions[begin - size:, :]
return predictions
# Create a local session to run the training.
start_time = time.time()
model_dir = os.path.join(module_dir, os.path.pardir, os.path.pardir, 'models')
with tf.Session() as sess:
# Run all the initializers to prepare the trainable parameters.
tf.initialize_all_variables().run()
logger.info('Initialized!')
# Loop through training steps.
for step in xrange(int(num_epochs * train_size) // BATCH_SIZE):
# Compute the offset of the current minibatch in the data.
# Note that we could use better randomization across epochs.
offset = (step * BATCH_SIZE) % (train_size - BATCH_SIZE)
batch_data = train_data[offset:(offset + BATCH_SIZE), ...]
batch_labels = train_labels[offset:(offset + BATCH_SIZE)]
# This dictionary maps the batch data (as a numpy array) to the
# node in the graph it should be fed to.
feed_dict = {train_data_node: batch_data,
train_labels_node: batch_labels}
# Run the graph and fetch some of the nodes.
_, l, lr, predictions = sess.run(
[optimizer, loss, learning_rate, train_prediction],
feed_dict=feed_dict)
if step % EVAL_FREQUENCY == 0:
elapsed_time = time.time() - start_time
start_time = time.time()
logger.info('Step %d (epoch %.2f), %.1f ms' %
(step, float(step) * BATCH_SIZE / train_size,
1000 * elapsed_time / EVAL_FREQUENCY))
logger.info('Minibatch loss: %.3f, learning rate: %.6f' % (l, lr))
logger.info('Minibatch training error: %.1f%%' % error_rate(predictions, batch_labels))
logger.info('Validation error: %.1f%%' % error_rate(eval_in_batches(validation_data, sess), validation_labels))
sys.stdout.flush()
# Finally print the result!
test_precision = precision(eval_in_batches(test_data, sess), test_labels)
logger.info('Test precision: %.1f%%' % test_precision)
# Model persistence
saver = tf.train.Saver([conv1_weights, conv1_biases, conv2_weights, conv2_biases, fc1_weights, fc1_biases])
model_path = os.path.join(model_dir, "base", "lenet_base.ckpt")
save_path = saver.save(sess, model_path)
logger.info("Model saved in file: %s" % save_path)
if __name__ == '__main__':
tf.app.run()
|
|
# -*- coding: utf-8 -*-
"""
paperchase.tasks
~~~~~~~~~~~~~~
paperchase tasks module
"""
import dateutil.parser
import datetime
import feedparser
import requests
import urlparse
import os
from pytz import utc
from datetime import timedelta
from celery.utils.log import get_task_logger
from xml.etree import ElementTree as ET
from lxml import etree
from bs4 import BeautifulSoup
from flask.ext.mail import Message
from jinja2 import Environment, FileSystemLoader, exceptions
from .core import mail
from .factory import create_celery_app
from .services import journals, papers
from .helpers import bozo_checker, days_since, deltatime
from .helpers.favicon import FaviconFetcher
from .settings import scraper_config
celery = create_celery_app()
logger = get_task_logger(__name__)
# This setting will be overwritten by fabric when launching the celery
# beat worker
logger.setLevel(scraper_config.get("log_level"))
whitelist_tags = ['span', 'sup', 'em']
blacklist_tags = ['h1', 'h2', 'h3', 'h4', 'h5', 'h6']
def sanitize_html(html, whitelist_tags=[], blacklist_tags=[]):
"""
Sanitize html. It accepts a BeautifulSoup tree or a string as input.
It removes the html tags and their content if they appear in the
blacklist_tags array and hides all other tags that don't appear
in whitelist_tags.
"""
if isinstance(html, basestring):
html = BeautifulSoup(html)
for tag in html.findAll(True):
if tag.name in blacklist_tags:
tag.decompose()
elif tag.name not in whitelist_tags:
tag.hidden = True
else:
tag.attrs = {}
return html.renderContents()
def is_absolute(url):
return bool(urlparse.urlparse(url).scheme)
def make_absolute_url(relative_url, page_url):
"""
Create an absolute url for the relative_url by extracting the domain from
the page_url
"""
parsed_uri = urlparse.urlparse(page_url)
domain = '{uri.scheme}://{uri.netloc}/'.format(uri=parsed_uri)
return urlparse.urljoin(domain, relative_url)
def feed_requester(feed_url):
"""
This function handles the requesting and parsing of the journal feed.
The feed is requested and parsed using feedparser. If the function is
successful it will return a list of dicts for each article in the
feed. If the function is not successful it shall return None.
:param feed_url: A string with the url of the feed to retrieve.
:return: the feed_data in dictionary format as parsed by feedparser.
"""
feed_data = None
try:
feed_data = feedparser.parse(
feed_url, agent=scraper_config.get("User-agent"))
except Exception as err:
logger.error(
"Failed to parse {0}\nTraceback:\n{1}".format(feed_url, err))
return None
if not feed_data:
logger.error(
"Retriving feed from {0} returned nothing\n".format(feed_url))
return None
if feed_data.bozo:
logger.warning("Feed at {0}, generated bozo error: {1}.\n".format(
feed_url, feed_data.bozo_exception))
if not bozo_checker(feed_data.bozo_exception):
return None
return feed_data
def extract_elements(article, paths):
"""
Scrape elements from the article webpage and insert them into the article
dictionary.
:param article: The article to add.
:param paths: Paths is a dictionary containing the name of the element to
parse from the webpage of the article as key and the xpath
corresponding to it as value
"""
try:
r = requests.get(article.get("url"))
except Exception:
logger.error(
"Something went wrong while requesting webpage" +
" of article: {0}".format(article.get("title")))
return
tree = etree.HTML(r.content)
for path in paths:
elements = tree.xpath(path.path)
if len(elements) is 0:
logger.warning(
"Article at URL {0} has " +
"no element {1}".format(article.get("url"), path.path))
continue
element_string = sanitize_html(
ET.tostring(elements[0]), whitelist_tags, blacklist_tags)
article[path.type] = element_string.strip(' \n')
def default_parser(entry):
"""
Parse a raw entry from the feed, as extracted by feedparser,
and fill it with the correct information we want to keep as journal entry.
:param entry: The feed entry from feedparser
:return: It returns a dictionary for the article with all the keys
necessary to instantiate a new article object for the database
"""
article = {"title": entry.get("title", "No title"),
"url": entry.get("link", "#"),
"created": entry.get("dc_date", datetime.datetime.utcnow()),
"doi": entry.get("prism_doi", ""),
"ref": entry.get("dc_source", ""),
"abstract": entry.get("summary", ""),
"authors": entry.get("authors", "")
}
article['doi'] = article['doi'][4:]
authors = []
for author in article['authors']:
if 'name' in author:
authors.append(author['name'])
authors = ', '.join(authors)
# let's sanitize authors from unwanted html tags
authors = sanitize_html(authors)
authors = authors.strip(' \n')
# if the authors list is too long truncate it to 900 chars
article['authors'] = (authors[:900] + '..') if len(
authors) > 900 else authors
return article
@celery.task
def get_journals():
"""
Gets journals that needs to be updated from the database.
The update frequency aka how many minutes between each time to
request the article, is defined in the config (config.py).
The method will update the last_checked column of the feed after is has
put it on the queue.
"""
journals_list = journals.filter(
journals.model().next_check <= datetime.datetime.utcnow()).all()
for journal in journals_list:
get_papers.delay(journal.id)
@celery.task
def get_papers(journal_id):
"""
Fetch the papers, RSS feed, of a journal.
:param journal_id: the id of the journal
"""
journal = journals.get(journal_id)
logger.debug("Getting papers for journal: {0}".format(journal.title))
feed_url = journal.url
feed_data = feed_requester(feed_url)
if feed_data is None:
return
if feed_data.get("entries"):
for entry in feed_data.entries:
add_article.delay(entry, journal.id)
update_check.delay(journal.id, feed_data)
if days_since(datetime.datetime.utcnow(), journal.metadata_update) >= scraper_config.get("metadata_update"):
update_metadata.delay(journal.id, feed_data)
@celery.task
def update_check(journal_id, feed_data):
"""
Update the time of the next feed refresh for a journal using its fetched
RSS feed.
:param journal_id: the id of the journal
:param feed_data: the RSS feed data of the journal used to compute the next
refresh time
"""
journal = journals.get(journal_id)
logger.debug(
"Updating last_checked for journal: {0}".format(journal.title))
journals.update(journal, last_checked=datetime.datetime.utcnow())
updateBase = feed_data.feed.get("sy_updatebase", None)
if not updateBase:
journals.update(journal, next_check=datetime.datetime.utcnow() -
datetime.timedelta(seconds=scraper_config.get("update_frequency") * 60))
return
updateBase = dateutil.parser.parse(updateBase)
updateBase = updateBase.astimezone(utc)
updateBase = updateBase.replace(tzinfo=None)
# if the feed does not provide an updatePeriod we assume the RSS standard
# daily
updatePeriod = feed_data.feed.get("sy_updateperiod", 'daily')
updateFrequency = feed_data.feed.get("sy_updatefrequency", 1)
time_between_updates = deltatime(updatePeriod, updateFrequency)
if not time_between_updates:
journals.update(journal, next_check=datetime.datetime.utcnow() +
datetime.timedelta(seconds=scraper_config.get("update_frequency") * 60))
return
seconds_from_updateBase = datetime.datetime.utcnow() - updateBase
seconds_to_next_update = time_between_updates.total_seconds(
) - seconds_from_updateBase.total_seconds() % time_between_updates.total_seconds()
journals.update(journal, next_check=datetime.datetime.utcnow()
+ timedelta(seconds=seconds_to_next_update))
@celery.task
def update_metadata(journal_id, feed_data):
"""
This method updates the metadata of a journal, this function should be called if
the feed is newly added, or if it has been longer than N days since last
update. N days is defined in the config.
:param journal_id: the id of the journal
:param feed_data: The resulting dict from a feed_requester call.
"""
journal = journals.get(journal_id)
logger.debug("Updating metadata for journal: {0}".format(journal.title))
paper = papers.first(journal_id=journal_id)
if paper:
paper_url = paper.url
favicon_url = FaviconFetcher().find_favicon(paper_url)
if favicon_url is not None:
journals.update(journal, favicon=favicon_url)
else:
logger.warning(
"Can't find favicon at URL {1} for journal {0}".format(journal.title, paper_url))
journals.update(journal, metadata_update=datetime.datetime.utcnow())
@celery.task
def add_article(entry, journal_id):
"""
Adds an article to the database. The function will
check if the article already is in the DB.
:param entry: the data of the article
:param journal_id: the id of the journal
"""
# entry is simply an item out of the feed so it is guaranteed to have a
# link atrribute
url = entry.get("link")
stored_paper = papers.first(url=url)
if stored_paper is not None:
return
paper = default_parser(entry)
journal = journals.get(journal_id)
paths = journal.paths.all()
extract_elements(paper, paths)
papers.create(created=paper.get("created"),
title=paper.get("title"),
abstract=paper.get("abstract"),
doi=paper.get("doi"),
ref=paper.get("ref"),
url=paper.get("url"),
authors=paper.get("authors"),
journal_id=journal_id
)
logger.debug("Added new entry with doi: {0}".format(paper.get("url")))
@celery.task
def send_email(subject, template, recipient, **kwargs):
"""Send an email via the Flask-Mail extension.
:param subject: Email subject
:param recipient: Email recipient
:param template: The name of the email template. The method uses Jinja2 to
render the template
:param **kwargs: dictionary containing the info used to render the template
"""
msg = Message(subject, recipients=[recipient])
this_dir = os.path.dirname(os.path.abspath(__file__))
env = Environment(
loader=FileSystemLoader("%s/email_templates/" % this_dir))
msg.body = env.get_template("%s.txt" % template).render(**kwargs)
try:
msg.html = env.get_template("%s.html" % template).render(**kwargs)
except exceptions.TemplateNotFound:
# We assume there is at least a txt template for a text email.
pass
mail.send(msg)
|
|
import inspect
from copy import copy
from django.conf import settings
from django.utils import timezone
from django.utils.functional import cached_property
from django.utils.six.moves import range
from .query_utils import QueryWrapper
class RegisterLookupMixin(object):
def _get_lookup(self, lookup_name):
try:
return self.class_lookups[lookup_name]
except KeyError:
# To allow for inheritance, check parent class' class_lookups.
for parent in inspect.getmro(self.__class__):
if 'class_lookups' not in parent.__dict__:
continue
if lookup_name in parent.class_lookups:
return parent.class_lookups[lookup_name]
except AttributeError:
# This class didn't have any class_lookups
pass
return None
def get_lookup(self, lookup_name):
found = self._get_lookup(lookup_name)
if found is None and hasattr(self, 'output_field'):
return self.output_field.get_lookup(lookup_name)
if found is not None and not issubclass(found, Lookup):
return None
return found
def get_transform(self, lookup_name):
found = self._get_lookup(lookup_name)
if found is None and hasattr(self, 'output_field'):
return self.output_field.get_transform(lookup_name)
if found is not None and not issubclass(found, Transform):
return None
return found
@classmethod
def register_lookup(cls, lookup):
if 'class_lookups' not in cls.__dict__:
cls.class_lookups = {}
cls.class_lookups[lookup.lookup_name] = lookup
return lookup
@classmethod
def _unregister_lookup(cls, lookup):
"""
Removes given lookup from cls lookups. Meant to be used in
tests only.
"""
del cls.class_lookups[lookup.lookup_name]
class Transform(RegisterLookupMixin):
bilateral = False
def __init__(self, lhs, lookups):
self.lhs = lhs
self.init_lookups = lookups[:]
def as_sql(self, compiler, connection):
raise NotImplementedError
@cached_property
def output_field(self):
return self.lhs.output_field
def relabeled_clone(self, relabels):
return self.__class__(self.lhs.relabeled_clone(relabels))
def get_group_by_cols(self):
return self.lhs.get_group_by_cols()
def get_bilateral_transforms(self):
if hasattr(self.lhs, 'get_bilateral_transforms'):
bilateral_transforms = self.lhs.get_bilateral_transforms()
else:
bilateral_transforms = []
if self.bilateral:
bilateral_transforms.append((self.__class__, self.init_lookups))
return bilateral_transforms
@cached_property
def contains_aggregate(self):
return self.lhs.contains_aggregate
class Lookup(RegisterLookupMixin):
lookup_name = None
def __init__(self, lhs, rhs):
self.lhs, self.rhs = lhs, rhs
self.rhs = self.get_prep_lookup()
if hasattr(self.lhs, 'get_bilateral_transforms'):
bilateral_transforms = self.lhs.get_bilateral_transforms()
else:
bilateral_transforms = []
if bilateral_transforms:
# We should warn the user as soon as possible if he is trying to apply
# a bilateral transformation on a nested QuerySet: that won't work.
# We need to import QuerySet here so as to avoid circular
from django.db.models.query import QuerySet
if isinstance(rhs, QuerySet):
raise NotImplementedError("Bilateral transformations on nested querysets are not supported.")
self.bilateral_transforms = bilateral_transforms
def apply_bilateral_transforms(self, value):
for transform, lookups in self.bilateral_transforms:
value = transform(value, lookups)
return value
def batch_process_rhs(self, compiler, connection, rhs=None):
if rhs is None:
rhs = self.rhs
if self.bilateral_transforms:
sqls, sqls_params = [], []
for p in rhs:
value = QueryWrapper('%s',
[self.lhs.output_field.get_db_prep_value(p, connection)])
value = self.apply_bilateral_transforms(value)
sql, sql_params = compiler.compile(value)
sqls.append(sql)
sqls_params.extend(sql_params)
else:
params = self.lhs.output_field.get_db_prep_lookup(
self.lookup_name, rhs, connection, prepared=True)
sqls, sqls_params = ['%s'] * len(params), params
return sqls, sqls_params
def get_prep_lookup(self):
return self.lhs.output_field.get_prep_lookup(self.lookup_name, self.rhs)
def get_db_prep_lookup(self, value, connection):
return (
'%s', self.lhs.output_field.get_db_prep_lookup(
self.lookup_name, value, connection, prepared=True))
def process_lhs(self, compiler, connection, lhs=None):
lhs = lhs or self.lhs
return compiler.compile(lhs)
def process_rhs(self, compiler, connection):
value = self.rhs
if self.bilateral_transforms:
if self.rhs_is_direct_value():
# Do not call get_db_prep_lookup here as the value will be
# transformed before being used for lookup
value = QueryWrapper("%s",
[self.lhs.output_field.get_db_prep_value(value, connection)])
value = self.apply_bilateral_transforms(value)
# Due to historical reasons there are a couple of different
# ways to produce sql here. get_compiler is likely a Query
# instance, _as_sql QuerySet and as_sql just something with
# as_sql. Finally the value can of course be just plain
# Python value.
if hasattr(value, 'get_compiler'):
value = value.get_compiler(connection=connection)
if hasattr(value, 'as_sql'):
sql, params = compiler.compile(value)
return '(' + sql + ')', params
if hasattr(value, '_as_sql'):
sql, params = value._as_sql(connection=connection)
return '(' + sql + ')', params
else:
return self.get_db_prep_lookup(value, connection)
def rhs_is_direct_value(self):
return not(
hasattr(self.rhs, 'as_sql') or
hasattr(self.rhs, '_as_sql') or
hasattr(self.rhs, 'get_compiler'))
def relabeled_clone(self, relabels):
new = copy(self)
new.lhs = new.lhs.relabeled_clone(relabels)
if hasattr(new.rhs, 'relabeled_clone'):
new.rhs = new.rhs.relabeled_clone(relabels)
return new
def get_group_by_cols(self):
cols = self.lhs.get_group_by_cols()
if hasattr(self.rhs, 'get_group_by_cols'):
cols.extend(self.rhs.get_group_by_cols())
return cols
def as_sql(self, compiler, connection):
raise NotImplementedError
@cached_property
def contains_aggregate(self):
return self.lhs.contains_aggregate or getattr(self.rhs, 'contains_aggregate', False)
class BuiltinLookup(Lookup):
def process_lhs(self, compiler, connection, lhs=None):
lhs_sql, params = super(BuiltinLookup, self).process_lhs(
compiler, connection, lhs)
field_internal_type = self.lhs.output_field.get_internal_type()
db_type = self.lhs.output_field.db_type(connection=connection)
lhs_sql = connection.ops.field_cast_sql(
db_type, field_internal_type) % lhs_sql
lhs_sql = connection.ops.lookup_cast(self.lookup_name, field_internal_type) % lhs_sql
return lhs_sql, params
def as_sql(self, compiler, connection):
lhs_sql, params = self.process_lhs(compiler, connection)
rhs_sql, rhs_params = self.process_rhs(compiler, connection)
params.extend(rhs_params)
rhs_sql = self.get_rhs_op(connection, rhs_sql)
return '%s %s' % (lhs_sql, rhs_sql), params
def get_rhs_op(self, connection, rhs):
return connection.operators[self.lookup_name] % rhs
default_lookups = {}
class Exact(BuiltinLookup):
lookup_name = 'exact'
default_lookups['exact'] = Exact
class IExact(BuiltinLookup):
lookup_name = 'iexact'
def process_rhs(self, qn, connection):
rhs, params = super(IExact, self).process_rhs(qn, connection)
if params:
params[0] = connection.ops.prep_for_iexact_query(params[0])
return rhs, params
default_lookups['iexact'] = IExact
class GreaterThan(BuiltinLookup):
lookup_name = 'gt'
default_lookups['gt'] = GreaterThan
class GreaterThanOrEqual(BuiltinLookup):
lookup_name = 'gte'
default_lookups['gte'] = GreaterThanOrEqual
class LessThan(BuiltinLookup):
lookup_name = 'lt'
default_lookups['lt'] = LessThan
class LessThanOrEqual(BuiltinLookup):
lookup_name = 'lte'
default_lookups['lte'] = LessThanOrEqual
class In(BuiltinLookup):
lookup_name = 'in'
def process_rhs(self, compiler, connection):
if self.rhs_is_direct_value():
# rhs should be an iterable, we use batch_process_rhs
# to prepare/transform those values
rhs = list(self.rhs)
if not rhs:
from django.db.models.sql.datastructures import EmptyResultSet
raise EmptyResultSet
sqls, sqls_params = self.batch_process_rhs(compiler, connection, rhs)
placeholder = '(' + ', '.join(sqls) + ')'
return (placeholder, sqls_params)
else:
return super(In, self).process_rhs(compiler, connection)
def get_rhs_op(self, connection, rhs):
return 'IN %s' % rhs
def as_sql(self, compiler, connection):
max_in_list_size = connection.ops.max_in_list_size()
if self.rhs_is_direct_value() and (max_in_list_size and
len(self.rhs) > max_in_list_size):
# This is a special case for Oracle which limits the number of elements
# which can appear in an 'IN' clause.
lhs, lhs_params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.batch_process_rhs(compiler, connection)
in_clause_elements = ['(']
params = []
for offset in range(0, len(rhs_params), max_in_list_size):
if offset > 0:
in_clause_elements.append(' OR ')
in_clause_elements.append('%s IN (' % lhs)
params.extend(lhs_params)
sqls = rhs[offset: offset + max_in_list_size]
sqls_params = rhs_params[offset: offset + max_in_list_size]
param_group = ', '.join(sqls)
in_clause_elements.append(param_group)
in_clause_elements.append(')')
params.extend(sqls_params)
in_clause_elements.append(')')
return ''.join(in_clause_elements), params
else:
return super(In, self).as_sql(compiler, connection)
default_lookups['in'] = In
class PatternLookup(BuiltinLookup):
def get_rhs_op(self, connection, rhs):
# Assume we are in startswith. We need to produce SQL like:
# col LIKE %s, ['thevalue%']
# For python values we can (and should) do that directly in Python,
# but if the value is for example reference to other column, then
# we need to add the % pattern match to the lookup by something like
# col LIKE othercol || '%%'
# So, for Python values we don't need any special pattern, but for
# SQL reference values or SQL transformations we need the correct
# pattern added.
if (hasattr(self.rhs, 'get_compiler') or hasattr(self.rhs, 'as_sql')
or hasattr(self.rhs, '_as_sql') or self.bilateral_transforms):
pattern = connection.pattern_ops[self.lookup_name].format(connection.pattern_esc)
return pattern.format(rhs)
else:
return super(PatternLookup, self).get_rhs_op(connection, rhs)
class Contains(PatternLookup):
lookup_name = 'contains'
def process_rhs(self, qn, connection):
rhs, params = super(Contains, self).process_rhs(qn, connection)
if params and not self.bilateral_transforms:
params[0] = "%%%s%%" % connection.ops.prep_for_like_query(params[0])
return rhs, params
default_lookups['contains'] = Contains
class IContains(Contains):
lookup_name = 'icontains'
default_lookups['icontains'] = IContains
class StartsWith(PatternLookup):
lookup_name = 'startswith'
def process_rhs(self, qn, connection):
rhs, params = super(StartsWith, self).process_rhs(qn, connection)
if params and not self.bilateral_transforms:
params[0] = "%s%%" % connection.ops.prep_for_like_query(params[0])
return rhs, params
default_lookups['startswith'] = StartsWith
class IStartsWith(PatternLookup):
lookup_name = 'istartswith'
def process_rhs(self, qn, connection):
rhs, params = super(IStartsWith, self).process_rhs(qn, connection)
if params and not self.bilateral_transforms:
params[0] = "%s%%" % connection.ops.prep_for_like_query(params[0])
return rhs, params
default_lookups['istartswith'] = IStartsWith
class EndsWith(PatternLookup):
lookup_name = 'endswith'
def process_rhs(self, qn, connection):
rhs, params = super(EndsWith, self).process_rhs(qn, connection)
if params and not self.bilateral_transforms:
params[0] = "%%%s" % connection.ops.prep_for_like_query(params[0])
return rhs, params
default_lookups['endswith'] = EndsWith
class IEndsWith(PatternLookup):
lookup_name = 'iendswith'
def process_rhs(self, qn, connection):
rhs, params = super(IEndsWith, self).process_rhs(qn, connection)
if params and not self.bilateral_transforms:
params[0] = "%%%s" % connection.ops.prep_for_like_query(params[0])
return rhs, params
default_lookups['iendswith'] = IEndsWith
class Between(BuiltinLookup):
def get_rhs_op(self, connection, rhs):
return "BETWEEN %s AND %s" % (rhs, rhs)
class Year(Between):
lookup_name = 'year'
default_lookups['year'] = Year
class Range(BuiltinLookup):
lookup_name = 'range'
def get_rhs_op(self, connection, rhs):
return "BETWEEN %s AND %s" % (rhs[0], rhs[1])
def process_rhs(self, compiler, connection):
if self.rhs_is_direct_value():
# rhs should be an iterable of 2 values, we use batch_process_rhs
# to prepare/transform those values
return self.batch_process_rhs(compiler, connection)
else:
return super(Range, self).process_rhs(compiler, connection)
default_lookups['range'] = Range
class DateLookup(BuiltinLookup):
def process_lhs(self, compiler, connection, lhs=None):
from django.db.models import DateTimeField
lhs, params = super(DateLookup, self).process_lhs(compiler, connection, lhs)
if isinstance(self.lhs.output_field, DateTimeField):
tzname = timezone.get_current_timezone_name() if settings.USE_TZ else None
sql, tz_params = connection.ops.datetime_extract_sql(self.extract_type, lhs, tzname)
return connection.ops.lookup_cast(self.lookup_name) % sql, tz_params
else:
return connection.ops.date_extract_sql(self.lookup_name, lhs), []
def get_rhs_op(self, connection, rhs):
return '= %s' % rhs
class Month(DateLookup):
lookup_name = 'month'
extract_type = 'month'
default_lookups['month'] = Month
class Day(DateLookup):
lookup_name = 'day'
extract_type = 'day'
default_lookups['day'] = Day
class WeekDay(DateLookup):
lookup_name = 'week_day'
extract_type = 'week_day'
default_lookups['week_day'] = WeekDay
class Hour(DateLookup):
lookup_name = 'hour'
extract_type = 'hour'
default_lookups['hour'] = Hour
class Minute(DateLookup):
lookup_name = 'minute'
extract_type = 'minute'
default_lookups['minute'] = Minute
class Second(DateLookup):
lookup_name = 'second'
extract_type = 'second'
default_lookups['second'] = Second
class IsNull(BuiltinLookup):
lookup_name = 'isnull'
def as_sql(self, compiler, connection):
sql, params = compiler.compile(self.lhs)
if self.rhs:
return "%s IS NULL" % sql, params
else:
return "%s IS NOT NULL" % sql, params
default_lookups['isnull'] = IsNull
class Search(BuiltinLookup):
lookup_name = 'search'
def as_sql(self, compiler, connection):
lhs, lhs_params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.process_rhs(compiler, connection)
sql_template = connection.ops.fulltext_search_sql(field_name=lhs)
return sql_template, lhs_params + rhs_params
default_lookups['search'] = Search
class Regex(BuiltinLookup):
lookup_name = 'regex'
def as_sql(self, compiler, connection):
if self.lookup_name in connection.operators:
return super(Regex, self).as_sql(compiler, connection)
else:
lhs, lhs_params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.process_rhs(compiler, connection)
sql_template = connection.ops.regex_lookup(self.lookup_name)
return sql_template % (lhs, rhs), lhs_params + rhs_params
default_lookups['regex'] = Regex
class IRegex(Regex):
lookup_name = 'iregex'
default_lookups['iregex'] = IRegex
|
|
from subprocess import Popen, PIPE
from signal import SIGINT, signal
import threading
import Queue
import argparse
from itertools import product
import re
import time
from functools import partial
def print_with_color(data, color):
print "\033[%dm%s\033[0m" % (color, data)
def split_strip_and_filter(input, delimeter = ','):
output = map(lambda x: x.strip(), input.split(delimeter))
return filter(bool, output)
class Tailor(threading.Thread):
daemon = True
running = True
def __init__(self, queue, lock, server, file, match = None, ignore = None):
self.lock = lock
threading.Thread.__init__(self)
self.server = server
self.file = file
self.queue = queue
self.match = match
self.ignore = ignore
self.start()
def run(self):
self._connect()
for line in self._lines():
self._process_line(line)
self.stop()
def stop(self):
if self.running:
print "Closing: %s:%s" % (self.server, self.file)
self._stop()
return self
def _lines(self):
line = self.tail_process.stdout.readline()
while self.running and line:
yield line
line = self.tail_process.stdout.readline()
def _is_local(self):
return self.server not in ('localhost', 'local', '')
def _tail_command(self):
command = ['ssh', '-t', self.server] if self._is_local() else []
return command + ['tail', '-f', self.file]
def _start_tail_process(self):
self.tail_process = Popen(
self._tail_command(),
stdout = PIPE,
stdin = PIPE,
stderr = PIPE
)
def _ignore(self, line):
return self.ignore and re.search(self.ignore, line)
def _match(self, line):
if self._ignore(line):
return False
elif self.match:
return re.search(self.match, line)
else:
return True
def _process_line(self, line):
line = line.strip()
if self._match(line):
self._put_in_queue(line)
def _put_in_queue(self, line):
self.queue.put((self.server, self.file, line))
def _connect(self):
self.lock.acquire()
try:
self._start_tail_process()
finally:
self.lock.release()
def _stop(self):
self.running = False
self._stop_tailing_process()
def _stop_tailing_process(self):
try:
self.tail_process.terminate()
finally:
self._Thread__stop()
class TailManager(object):
match = None
ignore = None
def __init__(self, args):
self.queue = Queue.Queue()
self.lock = threading.Lock()
self._init_args(args)
self._set_colors()
def run(self):
signal(SIGINT, self._stop)
self.running = True
self._tail()
def _stop(self, *args):
self.running = False
for t in self.trailers:
t.stop()
def _init_args(self, args):
self.servers = split_strip_and_filter(args.servers)
self.files = split_strip_and_filter(args.files)
self._set_rules(args)
def _set_rules(self, args):
if args.ignore is not None:
self.ignore = re.compile(args.ignore, re.I)
if args.match is not None:
self.match = re.compile(args.match, re.I)
def _print_open(self, server, file):
message = "Opening {file} on {server}".format(
file = file,
server = server
)
self._print(message, server, file)
def _init_tailor(self, server, file):
self._print_open(server, file)
return Tailor(
self.queue,
self.lock,
server,
file,
self.match,
self.ignore
)
def _tail(self):
self._start_trailers()
while self.running:
if self.queue.empty():
time.sleep(.5)
else:
self._print_line()
def _print_line(self):
server, file, line = self.queue.get()
self._print(line + "\r", server, file)
def _start_trailers(self):
server_file_combos = product(self.servers, self.files)
self.trailers = [self._init_tailor(s, f) for s,f in server_file_combos]
def _print(self, message, server, file):
identifier = server if self.color_by == 'server' else file
print_with_color(message, self.colors[identifier])
def _set_colors(self):
if (len(self.servers) > 1):
self.color_by = 'server'
alternates = self.servers
else:
self.color_by = 'file'
alternates = self.files
self.colors = { f: (91 + i) % 100 for i, f in enumerate(alternates) }
def get_args():
parser = argparse.ArgumentParser(
description = 'Tail a file[s] locally or across across multiple servers'
)
parser.add_argument('-i', '--ignore',
default = None,
help = 'a regex string to ignore (similar to: tail -f <file> | grep -v <ignore>)'
)
parser.add_argument('-m', '--match',
default = None,
help = 'a regex string to match(similar to: tail -f <file> | grep <match>)'
)
parser.add_argument('files',
type = str,
help = 'The path to the file you want to tail'
)
parser.add_argument('servers',
type = str,
default = 'local',
nargs = '?',
help = 'A comma seperated list of servers to connect to. local for files on your computer'
)
return parser.parse_args()
if __name__ == "__main__":
TailManager(get_args()).run()
|
|
# -*- coding: utf-8 -*-
# This file is part of the pymfony package.
#
# (c) Alexandre Quercia <alquerci@email.com>
#
# For the full copyright and license information, please view the LICENSE
# file that was distributed with this source code.
from __future__ import absolute_import;
import os;
from time import time;
import re;
from pymfony.component.system import Object;
from pymfony.component.system.oop import final;
from pymfony.component.system.oop import interface;
from pymfony.component.system.types import Array;
from pymfony.component.system.reflection import ReflectionObject;
from pymfony.component.system.exception import LogicException;
from pymfony.component.system.exception import InvalidArgumentException;
from pymfony.component.system.exception import RuntimeException;
from pymfony.component.system.serializer import unserialize;
from pymfony.component.system.serializer import serialize;
from pymfony.component.config import ConfigCache;
from pymfony.component.config.loader import LoaderResolver;
from pymfony.component.config.loader import DelegatingLoader;
from pymfony.component.dependency import ContainerBuilder;
from pymfony.component.dependency.interface import ContainerInterface;
from pymfony.component.dependency.interface import ContainerAwareInterface;
from pymfony.component.dependency.parameterbag import ParameterBag;
from pymfony.component.dependency.loader import IniFileLoader;
from pymfony.component.dependency.loader import JsonFileLoader;
from pymfony.component.dependency.loader import YamlFileLoader;
from pymfony.component.http_kernel.bundle import BundleInterface;
from pymfony.component.http_kernel.config import FileLocator;
from pymfony.component.http_kernel.config import FileResourceLocatorInterface;
from pymfony.component.http_kernel.dependency import MergeExtensionConfigurationPass;
from pymfony.component.http_kernel.debug import ExceptionHandler;
"""
"""
@interface
class KernelInterface(FileResourceLocatorInterface):
def registerContainerConfiguration(self, loader):
"""Loads the container configuration
@param loader: LoaderInterface A LoaderInterface instance
"""
pass;
def boot(self):
"""Boots the current kernel."""
pass;
def shutdown(self):
"""Shutdowns the kernel."""
pass;
def getName(self):
"""Gets the name of the kernel.
@return: string The kernel name
"""
pass;
def getEnvironment(self):
"""Gets the environment.
@return: string The current environment
"""
pass;
def isDebug(self):
"""Checks if debug mode is enabled.
@return: Boolean true if debug mode is enabled, false otherwise
"""
pass;
def getContainer(self):
"""Gets the current container.
@return: ContainerInterface A ContainerInterface instance
"""
pass;
def getStartTime(self):
"""Gets the request start time (not available if debug is disabled).
@return: float The request start timestamp
"""
pass;
def registerBundles(self):
"""Returns a list of bundles to registers.
@return: BundleInterface[] An list of bundle instances.
"""
pass;
def getBundles(self):
"""Gets the registered bundle instances.
@return: BundleInterface{} An dict of registered bundle instances
"""
pass;
def isClassInActiveBundle(self, className):
"""Checks if a given class name belongs to an active bundle.
@param className: string A class name
@return: Boolean true if the class belongs to an active bundle,
false otherwise
"""
pass;
def getBundle(self, name, first=True):
"""Returns a bundle and optionally its descendants by its name.
@param name: string Bundle name
@param first: Boolean Whether to return the first bundle only or
together with its descendants
@return: BundleInterface|BundleInterface[] A BundleInterface instance
or an list of BundleInterface instances if $first is false
@raise InvalidArgumentException: when the bundle is not enabled
"""
pass;
def getCharset(self):
"""Gets the charset of the application.
@return: string The charset
"""
pass;
def getLogDir(self):
"""Gets the log directory.
@return: string The log directory
"""
pass;
def getCacheDir(self):
"""Gets the cache directory.
@return: string The cache directory
"""
pass;
def getRootDir(self):
"""Gets the application root dir.
@return: string The application root dir
"""
pass;
@final
class HttpKernelEvents(Object):
# The REQUEST event occurs at the very beginning of request dispatching
#
# This event allows you to create a response for a request before any
# other code in the framework is executed. The event listener method
# receives a pymfony.component.httpkernel.event.GetResponseEvent instance.
REQUEST = 'http_kernel.request';
# The EXCEPTION event occurs when an uncaught exception appears
#
# This event allows you to create a response for a thrown exception or
# to modify the thrown exception. The event listener method receives
# a pymfony.component.httpkernel.event.GetResponseForExceptionEvent instance.
EXCEPTION = 'http_kernel.exception';
# The VIEW event occurs when the return value of a controller
# is not a Response instance.
#
# This event allows you to create a response for the return value of the
# controller. The event listener method receives a
# pymfony.component.httpkernel.event.GetResponseForControllerResultEvent
# instance.
VIEW = 'http_kernel.view';
# The CONTROLLER event occurs once a controller was found for
# handling a request
#
# This event allows you to change the controller that will handle the
# request. The event listener method receives a
# pymfony.component.httpkernel.event.FilterControllerEvent instance.
CONTROLLER = 'http_kernel.controller';
# The RESPONSE event occurs once a response was created for
# replying to a request
#
# This event allows you to modify or replace the response that will be
# replied. The event listener method receives a
# pymfony.component.httpkernel.event.FilterResponseEvent instance.
RESPONSE = 'http_kernel.response';
# The TERMINATE event occurs once a response was sent
#
# This event allows you to run expensive post-response jobs.
# The event listener method receives a
# pymfony.component.httpkernel.event.PostResponseEvent instance.
TERMINATE = 'http_kernel.terminate';
class Kernel(KernelInterface):
VERSION = '2.2.1';
VERSION_ID = '20201';
MAJOR_VERSION = '2';
MINOR_VERSION = '2';
RELEASE_VERSION = '1';
EXTRA_VERSION = '';
def __init__(self, environment, debug):
self._environment = environment;
self._debug = bool(debug);
self._name = None;
self._rootDir = None;
self._bundles = dict();
self._bundleMap = dict();
self._container = None;
self._extension = None;
self._booted = False;
self._rootDir = self.getRootDir();
self._name = self.getName();
self._version = self.getVersion();
if self._debug:
self._startTime = time();
self.init();
def init(self):
if self._debug:
ExceptionHandler.register(self._debug);
def __clone__(self):
if self._debug:
self._startTime = time();
self._booted = False;
self._container = None;
def _getKernelParameters(self):
bundles = dict();
for name, bundle in self._bundles.items():
bundles[name] = ReflectionObject(bundle).getName();
parameters = {
'kernel.root_dir': self._rootDir,
'kernel.environment': self._environment,
'kernel.debug': self._debug,
'kernel.name': self._name,
'kernel.cache_dir': self.getCacheDir(),
'kernel.logs_dir': self.getLogDir(),
'kernel.bundles': bundles,
'kernel.charset': self.getCharset(),
'kernel.container_class': self._getContainerClass(),
'kernel.version': self.getVersion(),
};
parameters.update(self._getEnvParameters());
return parameters;
def _getEnvParameters(self):
parameters = dict();
for key, value in os.environ.items():
key = str(key);
prefix = self.getName().upper()+"__";
prefix.replace("-", "");
if key.startswith(prefix):
name = key.replace("__", ".").lower();
parameters[name] = value;
return parameters;
def boot(self):
if self._booted:
return;
# init container
self._initializeBundles();
# init container
self._initializeContainer();
for bundle in self.getBundles().values():
assert isinstance(bundle, ContainerAwareInterface);
bundle.setContainer(self._container);
bundle.boot();
self._booted = True;
def _getContainerClass(self):
"""Gets the container class.
@return string The container class
"""
return self._name+self._environment[0].upper() + self._environment[1:]+('Debug' if self._debug else '')+'ProjectContainer';
def _getContainerBaseClass(self):
"""Gets the container's base class.
All names except Container must be fully qualified.
@return string
"""
return 'Container';
def _initializeContainer(self):
"""Initializes the service container.
The cached version of the service container is used when fresh,
otherwise the container is built.
"""
className = self._getContainerClass();
cache = ConfigCache(self.getCacheDir()+'/'+className+'.dat', self._debug);
fresh = True;
if not cache.isFresh() :
container = self._buildContainer();
self._dumpContainer(cache, container, className, self._getContainerBaseClass());
fresh = False;
if fresh :
f = open(str(cache));
try:
content = f.read();
finally:
f.close();
self._container = unserialize(content);
else:
self._container = container;
self._container.set('kernel', self);
if not fresh and self._container.has('cache_warmer') :
self._container.get('cache_warmer').warmUp(self._container.getParameter('kernel.cache_dir'));
def _initializeBundles(self):
"""Initializes the data structures related to the bundle management.
- the bundles property maps a bundle name to the bundle instance,
- the bundleMap property maps a bundle name to the bundle inheritance
hierarchy (most derived bundle first).
@raise LogicException: if two bundles share a common name
@raise LogicException: if a bundle tries to extend a non-registered
bundle
@raise LogicException: if a bundle tries to extend itself
@raise LogicException: if two bundles extend the same ancestor
"""
# init bundle
self._bundles = dict();
topMostBundles = dict();
directChildren = dict();
for bundle in self.registerBundles():
assert isinstance(bundle, BundleInterface);
name = bundle.getName();
if name in self._bundles.keys():
raise LogicException(
'Trying to register two bundles with the same name "{0}"'
''.format(name)
);
self._bundles[name] = bundle;
parentName = bundle.getParent();
if parentName:
if parentName in directChildren.keys():
raise LogicException(
'Bundle "{0}" is directly extended by two bundles '
'"{1}" and "{2}".'
''.format(parentName, name, directChildren[parentName])
);
if parentName == name:
raise LogicException(
'Bundle "{0}" can not extend itself.'.format(name)
);
directChildren[parentName] = name;
else:
topMostBundles[name] = bundle;
# look for orphans
diff = Array.diff(
list(directChildren.keys()),
list(self._bundles.keys()),
);
if diff:
raise LogicException(
'Bundle "{0}" extends bundle "{1}", which is not registered.'
''.format(directChildren[diff[0]], diff[0])
);
# inheritance
self._bundleMap = dict();
for name, bundle in topMostBundles.items():
bundleMap = [bundle];
hierarchy = [name];
while name in directChildren.keys():
name = directChildren[name];
bundleMap.insert(0, self._bundles[name]);
hierarchy.append(name);
for name in hierarchy:
self._bundleMap[name] = list(bundleMap);
bundleMap.pop();
def _buildContainer(self):
resouces = {
'cache': self.getCacheDir(),
'logs': self.getLogDir(),
};
for name, path in resouces.items():
if not os.path.isdir(path):
try:
os.makedirs(path, 0o777);
except Exception:
raise RuntimeException(
"Unable to create the {0} directory ({1})\n"
"".format(name, path)
);
elif not os.access(path, os.W_OK):
raise RuntimeException(
"Unable to write in the {0} directory ({1})\n"
"".format(name, path)
);
container = self._getContainerBuilder();
extensions = list();
container.addObjectResource(self);
for bundle in self._bundles.values():
extension = bundle.getContainerExtension();
if extension:
container.registerExtension(extension);
extensions.append(extension.getAlias());
if self._debug:
container.addObjectResource(bundle);
for bundle in self._bundles.values():
bundle.build(container);
container.addObjectResource(self);
# ensure these extensions are implicitly loaded
container.getCompilerPassConfig().setMergePass(
MergeExtensionConfigurationPass(extensions)
);
cont = self.registerContainerConfiguration(
self._getContainerLoader(container)
);
if not cont is None:
container.merge(cont);
container.compile();
return container;
def _getContainerLoader(self, container):
assert isinstance(container, ContainerInterface);
locator = FileLocator(self);
resolver = LoaderResolver([
IniFileLoader(container, locator),
JsonFileLoader(container, locator),
YamlFileLoader(container, locator),
]);
return DelegatingLoader(resolver);
def _getContainerBuilder(self):
return ContainerBuilder(ParameterBag(self._getKernelParameters()));
def _dumpContainer(self, cache, container, className, baseClass):
"""Dumps the service container to PHP code in the cache.
@param ConfigCache cache The config cache
@param ContainerBuilder container The service container
@param string class The name of the class to generate
@param string baseClass The name of the container's base class
"""
assert isinstance(container, ContainerBuilder);
assert isinstance(cache, ConfigCache);
# cache the container
content = serialize(container);
cache.write(content, container.getResources());
def shutdown(self):
if not self._booted:
return;
self._booted = False;
for bundle in self.getBundles().values():
assert isinstance(bundle, BundleInterface);
bundle.shutdown();
bundle.setContainer(None);
self._container = None;
def getBundles(self):
return self._bundles;
def isClassInActiveBundle(self, className):
for bundle in self._bundles.values():
assert isinstance(bundle, BundleInterface);
if 0 == str(className).find(bundle.getNamespace()):
return True;
return False;
def getBundle(self, name, first=True):
if name not in self._bundleMap:
raise InvalidArgumentException(
'Bundle "{0}" does not exist or it is not enabled. Maybe you '
'forgot to add it in the registerBundles() method of your {1} '
'file?'.format(name, ReflectionObject(self).getFileName())
);
if first is True:
return self._bundleMap[name][0];
return self._bundleMap[name];
def getName(self):
if self._name is None:
self._name = re.sub(r"[^a-zA-Z0-9_]+", "", os.path.basename(self._rootDir));
return self._name;
def getEnvironment(self):
return self._environment;
def getContainer(self):
return self._container;
def getStartTime(self):
if self._debug:
return self._startTime;
else:
return -1;
def isDebug(self):
return self._debug;
def locateResource(self, name, directory=None, first=True):
"""Returns the file path for a given resource.
A Resource can be a file or a directory.
The resource name must follow the following pattern:
@BundleName/path/to/a/file.something
where package is the name of the package
and the remaining part is the relative path in the package.
If directory is passed, and the first segment of the path is Resources,
this method will look for a file named:
directory/BundleName/path/without/Resources
If BundleName is empty the application root directory is use.
%kernel.root_dir%/path/to/a/file.something
@param name: string A resource name to locate
@param path: string A directory where to look for the resource first
@param first: Boolean Whether to return the first path
or paths for all matching bundles
@return: string|array The absolute path of the resource
or an array if $first is false
@raise InvalidArgumentException: if the file cannot be found or
the name is not valid
@raise RuntimeException: if the name contains invalid/unsafe characters
"""
name = str(name);
isResource = False;
if not name.startswith("@"):
raise InvalidArgumentException(
'A resource name must start with @ ("{0}" given).'
"".format(name)
)
if ".." in name:
raise RuntimeException(
'File name "{0}" contains invalid characters (..).'
"".format(name)
);
bundleName = name[1:];
if "/" in bundleName:
bundleName, path = bundleName.split("/", 1);
if path.startswith("Resources") and directory:
isResource = True;
overridePath = path[10:];
resourceBundle = None;
files = [];
if bundleName:
bundles = self.getBundle(bundleName, False);
for bundle in bundles:
if isResource:
filename = os.path.join(
directory,
bundle.getName(),
overridePath
);
if os.path.exists(filename):
if resourceBundle:
raise RuntimeException(
'"{0}" resource is hidden by a resource from '
'the "{1}" derived bundle. Create a "{2}" '
'file to override the bundle resource.'
''.format(
filename,
resourceBundle,
directory+'/'+bundles[0].getName()+'/'+overridePath
));
if first:
return filename;
files.append(filename);
filename = os.path.join(bundle.getPath(), path);
if os.path.exists(filename):
if first and not isResource:
return filename;
files.append(filename);
resourceBundle = bundle.getName();
else:
# check in root_dir when bundle name is empty
if isResource:
filename = os.path.join(directory, overridePath);
else:
filename = os.path.join(self._rootDir, path);
if os.path.exists(filename):
if first and not isResource:
return filename;
files.append(filename);
if files:
if first and isResource:
return files[0];
else:
return files;
raise InvalidArgumentException(
'Unable to find file "{0}".'.format(name)
);
def getRootDir(self):
if self._rootDir is None:
r = ReflectionObject(self);
self._rootDir = os.path.dirname(r.getFileName()).replace('\\', '/');
return self._rootDir;
def getCacheDir(self):
return self._rootDir+'/cache/'+self._environment;
def getLogDir(self):
return self._rootDir+'/logs/'+self._environment;
def getCharset(self):
return 'UTF-8';
|
|
"""
Copyright (c) 2015-2021 Ad Schellevis <ad@opnsense.org>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import ipaddress
import os
import subprocess
import sys
def parse_address(addr):
parse_result = {'port': '0'}
if addr.count(':') > 1:
# parse IPv6 address
parse_result['addr'] = addr.split('[')[0]
parse_result['ipproto'] = 'ipv6'
if addr.find('[') > -1:
parse_result['port'] = addr.split('[')[1].split(']')[0]
else:
# parse IPv4 address
parse_result['ipproto'] = 'ipv4'
parse_result['addr'] = addr.split(':')[0]
if addr.find(':') > -1:
parse_result['port'] = addr.split(':')[1]
return parse_result
def fetch_rule_labels():
result = dict()
descriptions = dict()
# query descriptions from active ruleset so we can search and display rule descriptions as well.
if os.path.isfile('/tmp/rules.debug'):
with open('/tmp/rules.debug', "rt", encoding="utf-8") as f_in:
for line in f_in:
lbl = line.split(' label ')[-1] if line.find(' label ') > -1 else ""
rule_label = lbl.split('"')[1] if lbl.count('"') >= 2 else None
descriptions[rule_label] = ''.join(lbl.split('"')[2:]).strip().strip('# : ')
sp = subprocess.run(['/sbin/pfctl', '-vvPsr'], capture_output=True, text=True)
for line in sp.stdout.strip().split('\n'):
if line.startswith('@'):
line_id = line.split()[0][1:]
if line.find(' label ') > -1:
rid = ''.join(line.split(' label ')[-1:]).strip()[1:].split('"')[0]
result[line_id] = {'rid': rid, 'descr': None}
if rid in descriptions:
result[line_id]['descr'] = descriptions[rid]
return result
def query_states(rule_label, filter_str):
result = list()
try:
filter_network = ipaddress.ip_network(filter_str.strip())
except ValueError:
filter_network = None
rule_labels = fetch_rule_labels()
sp = subprocess.run(['/sbin/pfctl', '-vvs', 'state'], capture_output=True, text=True)
record = None
for line in sp.stdout.strip().split('\n'):
parts = line.split()
if line.startswith(" ") and len(parts) > 1 and record:
if parts[0] == 'age':
for part in line.split(","):
part = part.strip()
if part.startswith("rule "):
record["rule"] = part.split()[-1]
if record["rule"] in rule_labels:
record["label"] = rule_labels[record["rule"]]["rid"]
record["descr"] = rule_labels[record["rule"]]["descr"]
elif part.startswith("age "):
record["age"] = part.split()[-1]
elif part.startswith("expires in"):
record["expires"] = part.split()[-1]
elif part.endswith("pkts"):
record["pkts"] = [int(s) for s in part.split()[0].split(':')]
elif part.endswith("bytes"):
record["bytes"] = [int(s) for s in part.split()[0].split(':')]
elif parts[0] == "id:":
# XXX: in order to kill a state, we need to pass both the id and the creator, so it seeems to make
# sense to uniquely identify the state by the combined number
record["id"] = "%s/%s" % (parts[1], parts[3])
search_line = " ".join(str(item) for item in filter(None, record.values()))
if rule_label != "" and record['label'].lower().find(rule_label) == -1:
# label
continue
elif filter_network is not None:
try:
match = False
for field in ['src_addr', 'dst_addr', 'nat_addr']:
addr = ipaddress.ip_network(record[field])
if field is not None and ipaddress.ip_network(filter_network).overlaps(addr):
match = True
break
if not match:
continue
except:
continue
elif filter_str != "" and search_line.lower().find(filter_str.lower()) == -1:
# apply filter when provided
continue
if parts[0] == "id:":
# append to response
result.append(record)
elif len(parts) >= 6:
record = {
'label': '',
'descr': '',
'nat_addr': None,
'nat_port': None,
'iface': parts[0],
'proto': parts[1],
'ipproto': parse_address(parts[2])['ipproto']
}
if parts[3].find('(') > -1:
# NAT enabled
nat_record = parse_address(parts[3][1:-1])
record['nat_addr'] = nat_record['addr']
if nat_record['port'] != '0':
record['nat_port'] = nat_record['port']
if parts[-3] == '->':
record['direction'] = 'out'
else:
record['direction'] = 'in'
record['dst_addr'] = parse_address(parts[-2])['addr'] if record['direction'] == 'out' else parse_address(parts[2])['addr']
record['dst_port'] = parse_address(parts[-2])['port'] if record['direction'] == 'out' else parse_address(parts[2])['port']
record['src_addr'] = parse_address(parts[2])['addr'] if record['direction'] == 'out' else parse_address(parts[-2])['addr']
record['src_port'] = parse_address(parts[2])['port'] if record['direction'] == 'out' else parse_address(parts[-2])['port']
record['state'] = parts[-1]
return result
def query_top(rule_label, filter_str):
result = list()
rule_labels = fetch_rule_labels()
sp = subprocess.run(['/usr/local/sbin/pftop', '-w', '1000', '-b','-v', 'long','9999999999999'], capture_output=True, text=True)
header = None
try:
filter_network = ipaddress.ip_network(filter_str.strip())
except ValueError:
filter_network = None
for rownum, line in enumerate(sp.stdout.strip().split('\n')):
parts = line.strip().split()
if rownum >= 2 and len(parts) > 5:
record = {
'proto': parts[0],
'dir': parts[1].lower(),
'src_addr': parse_address(parts[2])['addr'],
'src_port': parse_address(parts[2])['port'],
'dst_addr': parse_address(parts[3])['addr'],
'dst_port': parse_address(parts[3])['port'],
'gw_addr': None,
'gw_port': None,
}
if parts[4].count(':') > 2 or parts[4].count('.') > 2:
record['gw_addr'] = parse_address(parts[4])['addr']
record['gw_port'] = parse_address(parts[4])['port']
idx = 5
else:
idx = 4
record['state'] = parts[idx]
record['age'] = parts[idx+1]
record['expire'] = parts[idx+2]
record['pkts'] = int(parts[idx+3]) if parts[idx+3].isdigit() else 0
record['bytes'] = int(parts[idx+4]) if parts[idx+4].isdigit() else 0
record['avg'] = int(parts[idx+5]) if parts[idx+5].isdigit() else 0
record['rule'] = parts[idx+6]
if record['rule'] in rule_labels:
record['label'] = rule_labels[record['rule']]['rid']
record['descr'] = rule_labels[record['rule']]['descr']
else:
record['label'] = None
record['descr'] = None
for timefield in ['age', 'expire']:
tmp = record[timefield].split(':')
record[timefield] = int(tmp[0]) * 3600 + int(tmp[1]) * 60 + int(tmp[2])
search_line = " ".join(str(item) for item in filter(None, record.values()))
if rule_label != "" and record['label'].lower().find(rule_label) == -1:
# label
continue
elif filter_network is not None:
try:
match = False
for field in ['src_addr', 'dst_addr', 'gateway']:
addr = ipaddress.ip_network(record[field])
if field is not None and ipaddress.ip_network(filter_network).overlaps(addr):
match = True
break
if not match:
continue
except:
continue
elif filter_str != "" and search_line.lower().find(filter_str.lower()) == -1:
# apply filter when provided
continue
result.append(record)
return result
|
|
import json
import logging
import sys
from datetime import datetime
from datetime import timedelta
from django.http import HttpResponse
from django.template import Context, loader
from django.shortcuts import render, redirect
from mapper.models import Event, Location
from mapper.utils import FixedOffset
def index(req):
# pick a random event happening soon and close and pass it as context
now = datetime.now(tz=FixedOffset(PSTOFFSET, 'PST'))
now += timedelta(hours = -1)
event = Event.objects.order_by('when').filter(when__gt=now)[:1]
if len(event):
event = event_for_client(event[0])
else:
event = None
return render(req, 'index.html', { 'event': event })
def all_events(req):
# give back a json_array with all the events
now = datetime.now(tz=FixedOffset(PSTOFFSET, 'PST'))
now += timedelta(hours = -1)
db_evs = Event.objects.order_by('when').filter(when__gt=now)
events = [ event_for_client(db_ev) for db_ev in db_evs ]
res = HttpResponse(content_type='application/json')
json.dump({ 'events': events }, res)
return res
def event_for_client(event):
d = {}
d['name'] = event.name
d['when'] = event.when.strftime("%a, %d %b %Y %H:%M:%S GMT%z") # ITEF, js parsable
d['description'] = event.description
d['image'] = event.image_url
d['where'] = event.where.building
d['address'] = event.where.address
d['lat'] = event.where.latitude
d['lon'] = event.where.longitude
return d
PSTOFFSET = -8
def reset_db(request):
Event.objects.all().delete()
Location.objects.all().delete()
tm = datetime.now(tz=FixedOffset(PSTOFFSET, 'PST'))
base = tm - timedelta(minutes=tm.minute % 30, seconds=tm.second, microseconds=tm.microsecond)
data = [('Whine and Cheese', base + timedelta(hours = 0, minutes = 0), 'Enjoy a fun afternoon with your RFs and let off some steam! Free burgers and fries.', '/static/imgs/1.png', 37.4244677, -122.16637889999998, '618 Escondido Rd, Stanford CA 94305', 'Stern Hall'),
('Overseas Info Session', base + timedelta(hours = 0, minutes = 45), 'Thinking about going abroad? Info session today to learn about what the Bing program has to offer you. Lunch provided.', '/static/imgs/2.png', 37.4223369, -122.1557191, '725 Escondido Rd, Stanford CA 94305', 'Mirrielees'),
('Community Pot Luck', base + timedelta(hours = 1, minutes = 0), 'The Stanford has not shared a meal together in far too long! Bring some food and get some food today!', '/static/imgs/3.png', 37.4221702, -122.16796340000002, '565 Mayfield Ave, Stanford CA 94305', 'Haas Center'),
('Biology Department BBQ', base + timedelta(hours = 1, minutes = 0), 'Biology Department annual picnic. Sandwiches for everyone', '/static/imgs/4.png', 37.4225305, -122.16902270000003, '417 Mayfield Ave, Stanford CA 94305', 'Levin Field'),
('Dominos Giveaway', base + timedelta(hours = 1, minutes = 30), 'Dominos is giving away pizza to promote the new 10 foot pizza! Come to the oval for free, tasty pizza!', '/static/imgs/5.png', 37.4283681, -122.16694740000003, '516 Serra Mall, Stanford CA 94305', 'The Oval'),
('Free Pancackes for Jesus', base + timedelta(hours = 1, minutes = 45),'We want to spend some time talking to you about Jesus, and to entice you we are providing free pancakes! Yum!', '/static/imgs/6.png', 37.4253224, -122.17352779999999, '317 Santa Teresa Ave, Stanford CA 94305', 'Roble Field'),
('Dewy Decimal Appreciation Day!', base + timedelta(hours = 2, minutes = 0),'The Librarians want to remind you how important the Dewy Decimal system is! Brownies and milk served outside Green!', '/static/imgs/7.png', 37.426824, -122.16585320000002, '600 Crothers Way, Stanford CA 94305', 'Green Library'),
('Open house at the GSB', base + timedelta(hours = 5, minutes = 15),'The GSB would like to show Stanford just how nice the new Business School is. Come check it out and grab some dinner with us!', '/static/imgs/8.png', 37.4295811, -122.16577430000001,'501 Memorial Way, Stanford CA 94305', 'GSB'),
('Chipotle at Meyer', base + timedelta(hours = 6, minutes = 0),'Due to a massive accounting error, we have found ourselves with hundreds of extra burritos. Come grab a free one tonight!', '/static/imgs/9.png', 37.4195909, -122.16989999999998,'1040 Campus Drive, Stanford CA 94305', 'Meyer Library'),
('Hot dogs and soda at BASEBALL GAME', base + timedelta(hours = 6, minutes = 30),'Tonight at Stanford Baseball we are giving vouchers for free soda and hotdogs to the first 200 students that show up. Must show SUID.', '/static/imgs/10.png', 37.430656, -122.1588329,'645 Nelson Road, Stanford CA 94305', 'Sunken Diamond'),
('Facebook in White Plaza', base + timedelta(hours = 6, minutes = 30), 'Facebook is spreading the love with free Jamba Juice!', '/static/imgs/11.png', 37.4235194, -122.17236250000002, '475 Lagunita Drive, Stanford CA 94305', 'White Plaza'),
('Teach for America dinner', base + timedelta(hours = 6, minutes = 30), 'Always wanted to help the world where it counts? Come to dinner and hear all about how you can make a difference with Teach for America.', '/static/imgs/5.png', 37.421149, -122.16506600000002, '612 Alvarado Row, Stanford CA 04305', 'Munger'),
('Chicken finger for your thoughts', base + timedelta(hours = 10, minutes = 30), 'Late Nite will give you a free chicken finger if you share with us one interesting fact you studied tonight', '/static/imgs/12.png', 37.4235206, -122.16510410000001, '609 Arguello Way, Stanford CA 94305', 'The Dish at Stern')]
for t in data:
E = Event()
E.name = t[0]
E.when = t[1]
E.description = t[2]
E.image_url = t[3]
L = Location()
L.latitude = t[4]
L.longitude = t[5]
L.address = t[6]
L.building = t[7]
L.save()
E.where = L
E.save()
return redirect('/')
def list_events(request):
until = None
if 'until' in request.GET:
d = request.GET['until']
d = d.split('GMT')[0].strip()
try:
until = datetime.strptime(d, '%a %b %d %Y %H:%M:%S')
until = until.replace(tzinfo=FixedOffset(PSTOFFSET, 'PST'))
except:
sys.stderr.write("Not able to parse 'until' {0}\n".format(request.GET['until']))
now = datetime.now(tz=FixedOffset(PSTOFFSET, 'PST'))
now += timedelta(hours = -1)
events = Event.objects.order_by('when').filter(when__gt=now)
if until:
events = events.filter(when__lte=until)
event_list = []
for event in events:
event_details = {}
event_details['name'] = event.name
event_details['pk'] = event.pk
event_details['lat'] = event.where.latitude
event_details['lng'] = event.where.longitude
event_details['datetime'] = event.when.strftime("%a, %d %b %Y %H:%M:%S GMT%z") # IETF syntax
event_list.append(event_details)
response = HttpResponse(content_type='application/json')
json.dump({ 'details': event_list }, response)
return response
def add_event(request):
to_add = Event()
to_add.name = request.POST['name']
to_add.when = datetime.strptime(request.POST['time'],"%H:%M %Y-%m-%d")
to_add.when = to_add.when.replace(tzinfo=FixedOffset(PSTOFFSET, 'PST'))
where = Location()
where.latitude = float(request.POST['lat'])
where.longitude = float(request.POST['long'])
where.address = request.POST['address']
where.save()
to_add.where = where
to_add.description = request.POST['description']
to_add.save()
return HttpResponse(to_add.pk, status=201)
def event(request, event_id):
e = Event.objects.get(pk=event_id)
tags = e.tags.split(';')[:-1]
tag_list = []
for t in tags:
tag_list.append({"tag":t})
#cannot use json or django.core.serializers because location object, so must do manually
json_event = {}
json_event['pk'] = e.pk
json_event['name'] = e.name
json_event['description'] = e.description
json_event['when'] = e.when.strftime("%I:%M %p %m/%d/%Y") # "%a, %d %b %Y %H:%M:%S GMT%z" ITEF, js parsable
json_event['where'] = {"latitude":e.where.latitude, "longitude":e.where.longitude, "address":e.where.address, "building":e.where.building}
#json_event['tags'] = tag_list
json_event['image'] = e.image_url
response = HttpResponse(content_type='application/json')
json.dump({'event':json_event}, response)
return response
###
# -----------------------------------------------
# --- OLD CODE
#
###
def index_b(request):
return render(request, 'index_b.html')
def index_with_settings(req):
return render(req, 'index_settings.html')
def list_events_b(request):
until = None
if 'until' in request.GET:
d = request.GET['until']
d = d.split('GMT')[0].strip()
try:
until = datetime.strptime(d, '%a %b %d %Y %H:%M:%S')
until = until.replace(tzinfo=FixedOffset(PSTOFFSET, 'PST'))
except:
sys.stderr.write("Not able to parse 'until' {0}\n".format(request.GET['until']))
today = datetime.now(tz=FixedOffset(PSTOFFSET, 'PST'))
today += timedelta(hours = -1)
events = Event.objects.all().filter(when__gt=today)
if until:
events = events.filter(when__lte=until)
segmented_events = {}
for event in events:
event_details = {}
event_details['name'] = event.name
event_details['pk'] = event.pk
event_details['lat'] = event.where.latitude
event_details['lng'] = event.where.longitude
if event.when.date() not in segmented_events:
segmented_events[event.when.date()] = []
segmented_events[event.when.date()].append(event_details)
# the list is only needed because of the format required clientside
split_list = []
for segment in segmented_events:
dateString = segment.strftime("%m/%d/%Y")
if segment == datetime.now(tz=FixedOffset(PSTOFFSET, 'PST')).date():
dateString = "Today"
split_list.append({
'date': dateString,
'd': segment.strftime("%m/%d/%Y"),
'details': segmented_events[segment]
})
split_list.sort(key=lambda x: x['d'])
response = HttpResponse(content_type='application/json')
json.dump({ 'days': split_list }, response)
return response
def event_details(request):
return render_to_response('eventdetails.html', {})
|
|
#!/usr/bin/python3
"""
test-anatomize.py
APP: Inquisition
DESC: Unit test for Anatomize.py library
CREATION_DATE: 2017-04-28
"""
# MODULES
# | Native
import configparser
import unittest
# | Third-Party
# | Custom
from lib.anatomize.Anatomize import Anatomize
# METADATA
__author__ = 'Joshua Carlson-Purcell'
__copyright__ = 'Copyright 2017, CarlsoNet'
__license__ = 'MIT'
__version__ = '1.0.0-alpha'
__maintainer__ = 'Joshua Carlson-Purcell'
__email__ = 'jcarlson@carlso.net'
__status__ = 'Development'
class ParserTestCase(unittest.TestCase):
def setUp(self):
# generate config
cfg = configparser.ConfigParser()
cfg.read('build/tests/unit_tests_GOOD.cfg')
self.anatomizer = Anatomize(cfg)
self.parser = self.anatomizer.parserStore[2]
def test_fetchTemplates(self):
self.assertGreater(len(self.parser.templateStore), 0)
def test_updateStatInLogDB(self):
statKey = '2_fake_apache_logs'
statName = 'total_logs_processed'
# set stat
self.parser.updateStatInLogDB(statName=statName, statKey=statKey, newVal=0, action='set')
currentNumLogs = int(self.parser.logDbHandle.hget('stats:parser:' + statKey, statName).decode('utf-8'))
self.assertEqual(currentNumLogs, 0)
# update stat by increasing it
self.parser.updateStatInLogDB(statName=statName, incrAmt=1)
# get new num logs
newNumLogs = int(self.parser.logDbHandle.hget('stats:parser:' + statKey, statName).decode('utf-8'))
# check to see if new num logs is 1 more than the current num logs
self.assertEqual(newNumLogs, currentNumLogs + 1)
def test_updateStatInLogDB_strictInvalidKey(self):
statKey = 'doesn\'t_exist'
statName = 'fake_name'
try:
# set log
self.parser.updateStatInLogDB(statName=statName, statKey=statKey, incrAmt=1, strict=True)
# if we get here, we didn't get to where we expected; considered failure
self.assertTrue(False)
except IndexError:
self.assertTrue(True)
def test_updateStatInLogDB_invalidIncrAmt(self):
statKey = '2_fake_apache_logs'
statName = 'total_logs_processed'
try:
# set log
self.parser.updateStatInLogDB(statName=statName, statKey=statKey, incrAmt=-5)
# if we get here, we didn't get to where we expected; considered failure
self.assertTrue(False)
except ValueError:
self.assertTrue(True)
def test_updateStatInLogDB_invalidAction(self):
statKey = '2_fake_apache_logs'
statName = 'total_logs_processed'
try:
# set log
self.parser.updateStatInLogDB(statName=statName, statKey=statKey, action='bad_action', newVal=5)
# if we get here, we didn't get to where we expected; considered failure
self.assertTrue(False)
except ValueError:
self.assertTrue(True)
def test_incrStat(self):
statName = 'total_logs_processed'
self.parser.resetParserStats(statType=statName)
# increase TLP
self.parser.incrStat(statKey=statName, amt=1)
self.assertEqual(self.parser.stats[statName], 1)
def test_incrStat_invalidIncrAmt(self):
try:
statName = 'total_logs_processed'
self.parser.incrStat(statKey=statName, amt=-1)
# if we get here, we didn't get to where we expected; considered failure
self.assertTrue(False)
except ValueError:
self.assertTrue(True)
def test_incrStat_strictInvalidKey(self):
try:
statKey = 'nonexistant_stat_key_strict'
self.parser.incrStat(statKey=statKey, amt=1, strict=True)
# if we get here, we didn't get to where we expected; considered failure
self.assertTrue(False)
except IndexError:
self.assertTrue(True)
def test_avgStat(self):
statName = 'average_log_length'
initialVal = 1
newVal = 5
numValsCurrentlyInSet = 1
# set stat
self.parser.stats[statName] = initialVal
self.parser.avgStat(statKey=statName, initialVal=initialVal, newVal=newVal, numValsInSet=numValsCurrentlyInSet)
calcAvg = self.parser.stats[statName]
actualAvg = (initialVal + newVal) / (numValsCurrentlyInSet + 1)
self.assertEqual(calcAvg, actualAvg)
def test_avgStat_strictInvalidKey(self):
statName = 'average_log_length'
initialVal = 1
newVal = 5
numValsInSet = 2
del self.parser.stats[statName]
try:
self.parser.avgStat(statKey=statName, initialVal=initialVal, newVal=newVal, numValsInSet=numValsInSet
, strict=True)
# if we get here, we didn't get to where we expected; considered failure
self.assertTrue(False)
except IndexError:
self.assertTrue(True)
def test_avgStat_invalidNumValsInSet(self):
statName = 'average_log_length'
initialVal = 1
newVal = 5
numValsInSet = -1000
try:
self.parser.avgStat(statKey=statName, initialVal=initialVal, newVal=newVal, numValsInSet=numValsInSet
, strict=True)
# if we get here, we didn't get to where we expected; considered failure
self.assertTrue(False)
except ValueError:
self.assertTrue(True)
def test_avgStat_checkDbVals(self):
statKey = '2_fake_apache_logs'
statName = 'average_log_length'
initialVal = 1
newVal = 5
numValsCurrentlyInSet = 1
# set stat
self.parser.stats[statName] = initialVal
self.parser.avgStat(statKey=statName, initialVal=initialVal, newVal=newVal, numValsInSet=numValsCurrentlyInSet
, storeInDb=True)
inMemAvg = self.parser.stats[statName]
actualAvg = (initialVal + newVal) / (numValsCurrentlyInSet + 1)
# check val in db
logDbAvgStat = float(self.parser.logDbHandle.hget('stats:parser:' + statKey, statName).decode('utf-8'))
self.assertEqual(inMemAvg, logDbAvgStat)
self.assertEqual(actualAvg, logDbAvgStat)
def test_resetParserStats_specificStatAndVal(self):
statKey = '2_fake_apache_logs'
statName = 'total_logs_processed'
# set stat
self.parser.stats[statName] = 0
self.parser.resetParserStats(statType='total_logs_processed', statData=1)
self.assertEqual(self.parser.stats[statName], 1)
# get val in db
logDbStatVal = int(self.parser.logDbHandle.hget('stats:parser:' + statKey, statName).decode('utf-8'))
self.assertEqual(logDbStatVal, 1)
def test_printStats(self):
self.parser.resetParserStats()
statString = self.parser.printStats()
self.assertIs(type(statString), str)
def test_printStats_raw(self):
self.parser.resetParserStats()
statString = self.parser.printStats(raw=True)
self.assertIs(type(statString), dict)
def test_parseLog(self):
self.assertTrue(self.parser.parseLog(rawLog='raw log'))
def test_parseLog_invalidLogTTL(self):
# set log TTL to invalid values
self.parser.logTTL = 0
try:
self.parser.parseLog(rawLog='164.169.65.152')
# if we get here, we didn't get to where we expected; considered failure
self.assertTrue(False)
except ValueError:
self.assertTrue(True)
def test_parseLog_baselineMode(self):
# try running parse log while in baseline mode
# set baseline mode to ON
self.parser.baselineMode = True
# attempt to parse log
self.assertTrue(self.parser.parseLog(rawLog='raw log'))
def test_processLog(self):
self.parser.resetParserStats(statType='total_logs_processed')
self.assertTrue(self.parser.processLog('raw log'))
def test_pollLogFile(self):
try:
# set log file
self.parser.logFile = 'build/src/sample_logs/bluecoat.log'
self.parser.pollLogFile(exitOnMaxLogs=False)
self.assertTrue(True)
except Exception:
self.assertTrue(False)
def test_pollLogFile_useHazyStateTracking(self):
try:
self.parser.pollLogFile(useHazyStateTracking=True, numLogsBetweenTrackingUpdate=0)
# if we get here, we didn't get to where we expected; considered failure
self.assertTrue(False)
except ValueError:
self.assertTrue(True)
try:
self.parser.pollLogFile(useHazyStateTracking=True, numLogsBetweenTrackingUpdate=-10000)
# if we get here, we didn't get to where we expected; considered failure
self.assertTrue(False)
except ValueError:
self.assertTrue(True)
def test_pollLogFile_nonExistantFile(self):
try:
self.parser.logFile = '/var/log/non_existant_file'
self.parser.pollLogFile()
# if we get here, we didn't get to where we expected; considered failure
self.assertTrue(False)
except Exception:
self.assertTrue(True)
def test_pollLogFile_nonAccessibleFile(self):
try:
self.parser.logFile = '/var/log/inaccessible_test_log'
self.parser.pollLogFile()
# if we get here, we didn't get to where we expected; considered failure
self.assertTrue(False)
except Exception:
self.assertTrue(True)
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python
from __future__ import division
from __future__ import with_statement
import numpy as np
#from pylab import ion
import matplotlib as mpl
#from matplotlib.path import Path
from matplotlib import pyplot as plt
from scipy.optimize import curve_fit
#import numexpr as ne
from numba import autojit
import sys
import time
import cPickle as pickle
import collections
from collections import deque
from multiprocessing import Process, Queue
from smartFormat import smartFormat
from genericUtils import wstdout
__author__ = "J.L. Lanfranchi"
__email__ = "jll1062@phys.psu.edu"
__copyright__ = "Copyright 2014 J.L. Lanfranchi"
__credits__ = ["J.L. Lanfranchi"]
__license__ = """Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including without
limitation the rights to use, copy, modify, merge, publish, distribute,
sublicense, and/or sell copies of the Software, and to permit persons to whom
the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE."""
#-- Turn live-interactive plotting on (makes updated plots appear animated)
#ion()
#-- Adjust the font used on the plots
font = {'family' : 'serif', 'weight' : 'normal', 'size' : 8}
mpl.rc('font', **font)
#@autojit
def step(previousDirection, currentCoord, relMoveDir):
newDirection = (previousDirection + relMoveDir) % 4
if newDirection == 0:
return (currentCoord[0]+1, currentCoord[1])
elif newDirection == 1:
return (currentCoord[0], currentCoord[1]+1)
elif newDirection == 2:
return (currentCoord[0]-1, currentCoord[1])
else:
return (currentCoord[0], currentCoord[1]-1)
#@autojit
def measureChain(chain):
"""Measures the Euclidean distance from the startpoint to endpoint of
a chain"""
return ((chain[-1][0] - chain[0][0])**2 + (chain[-1][1] - chain[0][1])**2)
#@autojit
def simpleAttemptToCreateChain(nSteps, changesInDir=(0,-1,+1)):
"""State is taken to be direction of travel; there are 4 directions, so
four states: right (0), up (1), left (2), and down (3). The only allowed
transitions are
state -> state + 1 (modulo 4)
state -> state - 1 (modulo 4)
state -> state
Then, it must be checked that the coordinate isn't already in the chain;
if it is, then None is returned; otherwise, the algo repeats until a
chain of nSteps is reached.
"""
#-- Initialize chain to start at (0,0) and move to (0,1) (i.e., move up)
#chainCoords = collections.deque([(0,0)])
chainCoords = [(0,0), (0,1)]
chainX = [0,0]
chainY = [0,1]
#chainCoords = {(0,0):True}
coord = (0,1)
#chainCoords.append(coord)
#chainCoords.update({coord:True})
previousDirection = 1
length = 1
nChangesInDir = len(changesInDir)
while True:
relMoveDir = changesInDir[ np.int(np.random.random()*nChangesInDir) ]
coord = step(previousDirection, coord, relMoveDir)
if coord in chainCoords:
return None
chainCoords.append(coord)
#chainCoords.update({coord:True})
length += 1
if length == nSteps:
return chainCoords
def npCreateChain(nSteps, changesInDir=(0,-1,+1)):
nChangesInDir = len(changesInDir)
np.random.random(nSteps)*nChangesInDir
class CreateChainWorkerClass(Process):
"""Direction is direction of travel; there are 4 directions, so
right=0, up=1, left=2, and down=3. By default the only allowed transitions
are
direction -> direction + 1 (modulo 4)
direction -> direction - 1 (modulo 4)
direction -> direction
but the changesInDir allows the user to specify what changes are allowed.
Note that each is chosen with equal probability.
Next, it is checked that the coordinate isn't already in the chain;
if it is in the chain, then None is returned; otherwise, the algo repeats
until a chain of nSteps is reached.
"""
# TODO: shared "tries" object contains single int, gets incremented
# for each try
def __init__(self, chainsQueue, nSteps, changesInDir=(0,-1,+1), nChains=1):
Process.__init__(self)
wstdout("0")
self.chainsQueue = chainsQueue
self.nSteps = nSteps
self.changesInDir = changesInDir
self.nChains = nChains
self.nChangesInDir = len(changesInDir)
wstdout("1\n")
def step(self, previousDirection, currentCoord, relMoveDir):
newDirection = (previousDirection + relMoveDir) % 4
if newDirection is 0:
return (currentCoord[0]+1, currentCoord[1])
elif newDirection is 1:
return (currentCoord[0], currentCoord[1]+1)
elif newDirection is 2:
return (currentCoord[0]-1, currentCoord[1])
else:
return (currentCoord[0], currentCoord[1]-1)
def run(self):
#-- Initialize chain to start at (0,0) and move to (0,1) (i.e., move up)
while True:
chainCoords = collections.deque([(0,0)], maxlen=self.nSteps+1)
coord = (0,1)
chainCoords.append(coord)
previousDirection = 1
thisChainLen = 1
while thisChainLen < self.nSteps:
if self.chainsQueue.qsize() >= self.nChains:
return
relMoveDir = self.changesInDir[
np.random.randint(0,self.nChangesInDir)]
coord = self.step(previousDirection, coord, relMoveDir)
if coord in chainCoords:
break
chainCoords.append(coord)
thisChainLen += 1
if thisChainLen == self.nSteps:
self.chainsQueue.put(chainCoords)
break
def createChainWorker(chainsQueue, nSteps, changesInDir=(0,-1,+1), nChains=1):
"""Direction is direction of travel; there are 4 directions, so
right=0, up=1, left=2, and down=3. By default the only allowed transitions
are
direction -> direction + 1 (modulo 4)
direction -> direction - 1 (modulo 4)
direction -> direction
but the changesInDir allows the user to specify what changes are allowed.
Note that each is chosen with equal probability.
Next, it is checked that the coordinate isn't already in the chain;
if it is in the chain, then None is returned; otherwise, the algo repeats
until a chain of nSteps is reached.
"""
# TODO: shared "tries" object contains single int, gets incremented
# for each try
#wstdout("0")
chainsQueue = chainsQueue
nSteps = nSteps
nChains = nChains
nChangesInDir = len(changesInDir)
#wstdout("1")
#-- Initialize chain to start at (0,0) and move to (0,1) (i.e., move up)
while True:
#chainCoords = collections.deque([(nSteps,nSteps),(nSteps,nSteps+1)],
# maxlen=nSteps+1)
chainCoords = collections.deque([(0,0), (0,1)])
coord = (0,1)
previousDirection = 1
thisChainLen = 1
while thisChainLen < nSteps:
relMoveDir = changesInDir[
np.random.randint(0,nChangesInDir)]
coord = step(previousDirection, coord, relMoveDir)
if coord in chainCoords:
break
chainCoords.append(coord)
thisChainLen += 1
if thisChainLen == nSteps:
chainsQueue.put(chainCoords)
break
def reptateChainWorker(chainsToReptate, lenSq, nChainsToCompute):
chain = chainsToReptate.get()
result = chain.reptate()
for (childChain, operation) in childChains:
if operation == 'move':
lenSq.put(measureChain(childChain))
if lenSq.len() < nChainsToCompute:
chainsToReptate.put(childChain)
def simpleCreateChain(nSteps=5, changesInDir=(-1,0,1)):
while True:
chain = simpleAttemptToCreateChain(nSteps, changesInDir=changesInDir)
if chain != None:
break
return collections.deque(chain, maxlen=nSteps+1)
def createChainParallel(nSteps=60, nChains=1, changesInDir=(-1,0,1), nProcs=4):
chainsQueue = Queue()
args = (chainsQueue, nSteps, changesInDir, nChains)
##pool = Pool(processes=nProcs)
#kwargs = {'nSteps': nSteps,
# 'changesInDir': changesInDir,
# 'nChains': nChains,
# 'chainsQueue': chainsQueue
# }
#for procN in range(nProcs):
# #pool.apply_async(CreateChainWorkerClass, kwds=kwargs)
# CreateChainWorkerClass, kwds=kwargs)
#while chainsQueue.qsize() < nChains:
# time.sleep(0.2)
#chains = []
#while not chainsQueue.empty():
# chains.append(chainsQueue.get())
procs = []
for n in range(nProcs):
procs.append(Process(target=createChainWorker,args=args))
[proc.start() for proc in procs]
#while chainsQueue.qsize() < nChains:
# time.sleep(0.1)
chains = []
#while not chainsQueue.empty():
#while len(chains) < nChains:
#time.sleep(0.5)
chains.append(chainsQueue.get())
[proc.terminate() for proc in procs]
return chains
#class chain:
# def __init__(self, nSteps, initialChain=None):
# self.moveForward = True
# if initialChain == None:
# self.nSteps = nSteps
# self.
#
# self.coordinates = collections.deque(coords, maxlen=nSteps)
class reptationChain90:
"""
90-degree-only reptation chain of length nSteps
"""
def __init__(self, nSteps, initialChain):
self.nSteps = nSteps
def reptate(self):
pass
def createChainReptation(nSteps):
"""State is taken to be direction of travel; there are 4 directions, so
four states: right (0), up (1), left (2), and down (3). The only allowed
transitions are
state -> state + 1 (modulo 4)
state -> state - 1 (modulo 4)
state -> state
Then, it must be checked that the coordinate isn't already in the chain;
if it is, then None is returned; otherwise, the algo repeats until a
chain of nSteps is reached.
"""
#-- Initialize chain to start at (0,0) and move to (0,1) (i.e., move up)
chainCoords = [(0,0)]
chainCoords = [(0,0)]
coord = (0,1)
chainCoords.append(coord)
state = 1
length = 1
#np.random.seed(int(time.time()*1000)%120)
#np.random.seed(2)
while True:
randVal = np.random.randint(low=-1, high=2)
state = (state + randVal) % 4
if state is 0:
coord = (coord[0]+1, coord[1])
elif state is 1:
coord = (coord[0], coord[1]+1)
elif state is 2:
coord = (coord[0]-1, coord[1])
elif state is 3:
coord = (coord[0], coord[1]-1)
if coord in chainCoords:
return None
chainCoords.append(coord)
length += 1
if length == nSteps:
return chainCoords
def coordsFromAbsDir(absdir):
nsteps = len(absdir)
offset = 1
xincr = zeros(nsteps+1, dtype=int)
yincr = ones(nsteps+1, dtype=int)
xincr[argwhere(absdir==0)+1] = 1
xincr[argwhere(absdir==2)+1] = -1
yincr[argwhere(absdir==1)+1] = 1
yincr[argwhere(absdir==3)+1] = -1
x = cumsum(xincr)
y = cumsum(yincr)
return x, y
def plotSnakeAbsDir(absdir):
plotSnakeXY(coordsFromDir(absdir))
def plotSnakeXY(x, y):
fig, ax = subplots()
plot(x,y,'r-o',linewidth=3,markersize=6)
plot(x[0],y[0],'ko',markersize=10)
#ax.set_xlim(min(x)-2, max(x)+2)
#ax.set_ylim(min(y)-2, max(y)+2)
axis('image')
for spine in ax.spines.itervalues():
spine.set_visible(False)
ax.set_xlim(min(x)-2, max(x)+2)
ax.set_ylim(min(y)-2, max(y)+2)
def plotSnakeCoord(coords):
x = []
y = []
for c in coords:
x.append(c[0])
y.append(c[1])
plotSnakeXY(x, y)
def newSnake1(nSteps=10):
#reldir = (random.random(nSteps)*2).astype(int)-1
reldir = random.randint(-1,2,nSteps)
absdir = mod(1+cumsum(reldir), 4)
x, y = coordsFromDir(absdir)
def newSnake2(nSteps=10):
pass
class snake:
"""Self-avoiding random walk."""
def __init__(self, nsteps, validDirs=(-1,1)):
#-- Use a deque as a circular buffer to store the coords
self.coords = deque(maxlen=nsteps+1)
[ self.coords.append((0,y)) for y in range(nsteps+1) ]
self.R2 = [nsteps**2]
#-- This is either -1 (points at most-recently-added element)
# or 0 (points at oldest element)
self.forward = True
self.c1 = -1
self.c2 = -2
self.c_end = 0
self.validDirs = validDirs
self.nValidDirs = len(validDirs)
def plot(self):
if self.forward:
plotSnakeCoord(self.coords)
else:
rc = self.coords
rc.reverse()
plotSnakeCoord(rc)
def stats(self):
self.meanR2 = np.mean(self.R2)
return self.meanR2
def reptate(self):
dx = self.coords[self.c1][0]-self.coords[self.c2][0]
if dx == 1:
previousDir = 0
elif dx == -1:
previousDir = 2
elif self.coords[self.c1][1]-self.coords[self.c2][1] == 1:
previousDir = 1
else:
previousDir = 3
proposedDir = (previousDir + \
self.validDirs[np.random.randint(0,self.nValidDirs)]) % 4
if proposedDir == 0:
proposedCoord = (self.coords[self.c1][0]+1,self.coords[self.c1][1])
elif proposedDir == 1:
proposedCoord = (self.coords[self.c1][0],self.coords[self.c1][1]+1)
elif proposedDir == 2:
proposedCoord = (self.coords[self.c1][0]-1,self.coords[self.c1][1])
else:
proposedCoord = (self.coords[self.c1][0],self.coords[self.c1][1]-1)
#-- Exchange head and tail of snake
if proposedCoord in self.coords:
self.forward = not self.forward
if self.forward:
self.c1 = -1
self.c2 = -2
self.c_end = 0
else:
self.c1 = 0
self.c2 = 1
self.c_end = -1
self.R2.append(self.R2[-1])
#-- Or prepand / append new coord
else:
if self.forward:
self.coords.append(proposedCoord)
else:
self.coords.appendleft(proposedCoord)
#print self.coords[self.c1], self.coords[self.c2]
self.R2.append((self.coords[self.c1][0]
-self.coords[self.c_end][0])**2+
(self.coords[self.c1][1]
-self.coords[self.c_end][1])**2)
#def measureChain(chain):
# """Measures the Euclidean distance from the startpoint to endpoint of
# a chain"""
# return (chain[-1][0] - chain[0][0])**2 + (chain[-1][1] - chain[0][1])**2
formatDic = {'sigFigs': 4, 'demarc': "", 'threeSpacing': False, 'rightSep':""}
def powerLaw(x, power, multFact, offset):
return multFact*(x**power) + offset
def powerLawLatex(power, multFact=1, offset=0, pcov=None):
offsetStr = smartFormat(offset, alwaysShowSign=True, **formatDic)
if not (offsetStr[0] == "+" or offsetStr[0] == "-"):
offsetStr = "+" + offsetStr
latex = r"$" + smartFormat(multFact, **formatDic) + \
r" \cdot N^{" + smartFormat(power, **formatDic) + r"} " + \
offsetStr + \
r"$"
return latex
def exponential(x, expExponent, multFact=1):
return multFact * np.exp(np.array(x)*expExponent)
def exponentialLatex(expExponent, multFact=1, pcov=None):
latex = r"$" + smartFormat(multFact, **formatDic) + \
r"\cdot e^{" + smartFormat(expExponent, **formatDic) + \
r"\cdot N}$"
return latex
def expPower(x, expExponent, powerLawExp, multFact):
x = np.array(x)
return multFact * np.exp(x*expExponent) * x**powerLawExp
def expPowerLatex(expExponent, powerLawExp, multFact, pcov=None):
latex = r"$" + smartFormat(multFact, **formatDic) + \
r"\cdot e^{" + smartFormat(expExponent, **formatDic) + \
r"\cdot N}\cdot N^{" + smartFormat(powerLawExp, **formatDic) + \
r"}$"
return latex
class SimulationData:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class Simulation:
def __init__(self):
self.sd = SimulationData()
self.sd.simulationCompleted = False
self.sd.postprocCompleted = False
self.stateFilename = "p7x28_state.pk"
def saveState(self, filename=None):
if filename == None:
filename = self.stateFilename
with open(filename, 'wb') as stateFile:
pickle.dump(self.sd, stateFile, -1)
def loadState(self, filename=None):
if filename == None:
filename = self.stateFilename
with open(filename, 'rb') as stateFile:
self.sd = pickle.load(stateFile)
def runSimulation(self, targetSuccesses=10, stepsRange=(4,50),
plotting=False):
#-- Reset state variables for a new simulation run
self.sd.simulationCompleted = False
self.sd.postprocCompleted = False
timeLastSaved = time.time()
self.sd.targetSuccesses = targetSuccesses
self.sd.stepsInChains = range(stepsRange[0],stepsRange[1])
self.sd.allChainFinalCoords = []
self.sd.allMeanChainFinalCoords = []
self.sd.meanChainFinalCoords = []
self.sd.chainSquareLengthAvg = []
self.sd.successRatio = []
self.sd.timingAvg = []
if plotting:
self.fig1 = plt.figure(1)
self.fig1.clf()
self.ax1 = fig1.add_subplot(111)
line, = ax1.plot([], [], 'ko-', lw=2)
self.ax1.set_xlim(-20,20)
self.ax1.set_ylim(-20,20)
ax1.axis('image')
plt.draw()
for stepsThisChain in self.sd.stepsInChains:
startTime = time.time()
successfulChains = []
chainSquareLengths = []
chainFinalCoords = []
meanChainFinalCoord = []
nSuccesses = 0
trialN = 0
while nSuccesses < self.sd.targetSuccesses:
trialN += 1
chain = simpleAttemptToCreateChain(stepsThisChain,(-1,0,1))
if chain == None:
continue
successfulChains.append(chain)
chain = np.array(chain)
chainSquareLengths.append(measureChain(chain)**2)
chainFinalCoords.append(chain[-1,:])
nSuccesses += 1
if plotting:
line.set_data(chain[:,0],chain[:,1])
self.ax1.set_xlim(-20,20)
self.ax1.set_ylim(-20,20)
plt.draw()
time.sleep(0.005)
chainFinalCoords = np.array(chainFinalCoords)
self.sd.allChainFinalCoords.append(chainFinalCoords)
self.sd.allMeanChainFinalCoords.append(meanChainFinalCoord)
self.sd.meanChainFinalCoord = np.mean(chainFinalCoords, 0)
self.sd.chainSquareLengthAvg.append(np.mean(chainSquareLengths))
self.sd.successRatio.append(nSuccesses / trialN)
self.sd.timingAvg.append( (time.time()-startTime)/nSuccesses )
sys.stdout.write("\nstepsThisChain = " + str(stepsThisChain) + "\n")
sys.stdout.write(" nSuccesses/nTrials = " + str(nSuccesses) + "/"
+ str(trialN) + " = "
+ str(self.sd.successRatio[-1]) + "\n")
sys.stdout.write(" time/success = " +
str(self.sd.timingAvg[-1]) + "\n")
sys.stdout.flush()
if (time.time() - timeLastSaved) > 60*5:
self.saveState()
timeLastSaved = time.time()
self.sd.allMeanChainFinalCoords = \
np.array(self.sd.allMeanChainFinalCoords)
#-- TODO: mean of final-position vector (r_N vector)
#np.sqrt(allMeanChainFinalCoords[:,0]**2+
# allMeanChainFinalCoords[:,1]**2)
self.sd.simulationCompleted = True
self.saveState()
def postproc(self):
"""Perform curve fitting to the data"""
#-- Update state
self.sd.postprocCompleted = False
#-- Check that simulation data is present
if not self.sd.simulationCompleted:
raise Exception("No simulation run; cannot perform curve fit!")
#-- Same x data is used for *all* the below curve fits
x = self.sd.stepsInChains
#============================================================
# Fit success fraction with const * exponential * power law
#============================================================
y = self.sd.successRatio
#-- Weight variance by data size to make small data points equally
# important to fit to as large data points
sigma = list(np.array(y))
p0 = (-0.117, 0.1, 2)
popt1, pcov1 = curve_fit(f=expPower, xdata=x, ydata=y, sigma=sigma,
p0=p0)
self.sd.fit1 = expPower(x, *popt1)
self.sd.fit1eqn = expPowerLatex(*popt1)
print popt1, pcov1, "\n"
#============================================================
# TODO: Fit the final position data
#============================================================
#y = (self.sd.chainLengthAvg)
#sigma = list(np.array(y))
#popt2, pcov2 = curve_fit(powerLaw, x, y, sigma=sigma)
#self.sd.fit2 = powerLaw(x, *popt2)
#self.sd.fit2eqn = powerLawLatex(*popt2)
#print popt2, pcov2, "\n"
#============================================================
# Fit R_N^2 with const * power-law + const
#============================================================
y = self.sd.chainSquareLengthAvg
#-- Weight variance by data size to make small data points equally
# important to fit to as large data points
sigma = list(np.array(y))
popt3, pcov3 = curve_fit(f=powerLaw, xdata=x, ydata=y, sigma=sigma)
self.sd.fit3 = powerLaw(x, *popt3)
self.sd.fit3eqn = powerLawLatex(*popt3)
print popt3, pcov3, "\n"
#============================================================
# Exponential fit to wall-clock time (not as good a fit as
# exp*power, so this is commented out)
#============================================================
#y = (self.sd.timingAvg)
##p0 = (0.0985, 0.1, 1.65e-5)
#p0 = (0.0985, 1)
#sigma = list(np.array(y))
#popt4, pcov4 = curve_fit(f=exponential, xdata=x, ydata=y, sigma=sigma,
# p0=p0, )
#self.sd.fit4 = exponential(x, *popt4)
#self.sd.fit4eqn = exponentialLatex(*popt4)
#print popt4, pcov4, "\n"
#============================================================
# Exponential * power-law fit to wall-clock time
#============================================================
y = self.sd.timingAvg
#-- Initial guess
p0 = (0.129, 0, 2.981e-3)
#-- Weight variance by data size to make small data points equally
# important to fit to as large data points
sigma = list(np.array(y))
popt4, pcov4 = curve_fit(f=expPower, xdata=x, ydata=y, sigma=sigma,
p0=p0, )
self.sd.fit4 = expPower(x, *popt4)
self.sd.fit4eqn = expPowerLatex(*popt4)
print popt4, pcov4, "\n"
#-- Update state
self.sd.postprocCompleted = True
def plotResults(self, savePlot=True):
"""Plot the data and the fit curves"""
if not self.sd.simulationCompleted:
raise Exception("No simulation has been run; cannot plot results!")
if not self.sd.postprocCompleted:
self.postproc()
self.fig2 = plt.figure(2, figsize=(7,12), dpi=80)
self.fig2.clf()
self.ax21 = self.fig2.add_subplot(311)
self.ax21.plot(self.sd.stepsInChains, self.sd.successRatio,
'bo', label="data", markersize=4)
self.ax21.plot(self.sd.stepsInChains, self.sd.fit1,
'r-', label=self.sd.fit1eqn, linewidth=2, alpha=0.75)
self.ax21.set_title(
"Non-intersecting 2D random-walk chains;" +
" stop condition: " + str(self.sd.targetSuccesses) +
" successfully-built chains")
self.ax21.set_ylabel(r"Success fraction $f(N)$")
self.ax21.set_yscale('log')
self.ax21.grid(which='major', b=True)
self.ax21.legend(loc="best", fancybox=True, shadow=True)
#-- TODO: average of final position plot
#self.ax22 = fig2.add_subplot(412)
#self.ax22.plot(self.sd.stepsInChains, self.sd.chainLengthAvg,
# 'bo', label="data", markersize=4)
#self.ax22.plot(self.sd.stepsInChains, self.sd.fit2,
# 'r-', label=self.sd.fit2eqn, linewidth=2, alpha=0.75)
#self.ax22.set_ylabel(r"$\langle R_N \rangle$")
##self.ax22.set_yscale('log')
#ax22.grid(which='major', b=True)
#ax22.legend(loc="best", fancybox=True, shadow=True)
self.ax23 = self.fig2.add_subplot(312)
self.ax23.plot(self.sd.stepsInChains, self.sd.chainSquareLengthAvg,
'bo', label="data", markersize=4)
self.ax23.plot(self.sd.stepsInChains, self.sd.fit3,
'r-', label=self.sd.fit3eqn, linewidth=2, alpha=0.75)
self.ax23.set_ylabel(r"$\langle R_N^2\rangle$")
self.ax23.grid(which='major', b=True)
self.ax23.legend(loc="upper left", fancybox=True, shadow=True)
self.ax24 = self.fig2.add_subplot(313)
self.ax24.plot(self.sd.stepsInChains, self.sd.timingAvg,
'bo', label="data", markersize=4)
self.ax24.plot(self.sd.stepsInChains, self.sd.fit4,
'r-', label=self.sd.fit4eqn, linewidth=2, alpha=0.75)
self.ax24.set_xlabel(r"Nmber of steps in walk, $N$")
self.ax24.set_ylabel("Wall-clock time per successful chain (s)")
self.ax24.set_yscale('log')
self.ax24.grid(which='major', b=True)
self.ax24.legend(loc="upper left", fancybox=True, shadow=True)
self.fig2.tight_layout()
if savePlot:
self.fig2.savefig("2014-01-14_problem7x28_plots.pdf")
self.fig2.savefig("2014-01-14_problem7x28_plots.png", dpi=120)
plt.show()
if __name__ == "__main__":
startTime = time.time()
#-- Instantiate the Simulation object
#sim = Simulation()
##-- Try to load the sim data from any previous run; if no data saved
## to disk in the default location, run a new simulation
##try:
## sim.loadState()
##except Exception as e:
## print "Error({0}: {1}".format(e.errno, e.strerror)
## #sim.runSimulation(targetSuccesses=10, stepsRange=(4,101))
#sim.runSimulation(targetSuccesses=10, stepsRange=(5,30))
##-- *Always* perform post-processing and plotting (allows easy modification
## of the postprocessing (curve fitting) and plotting routines
## without needing to re-run the simulation, which can take hours)
#sim.postproc()
#sim.plotResults()
##print simpleCreateChain(nSteps=20)
chains = createChainParallel(nSteps=60, nProcs=1, nChains=1)
print time.time()-startTime
[wstdout(str(len(chain)) + " ") for chain in chains]
wstdout("\n")
|
|
#!/usr/bin/env python
# Copyright 2014 Hewlett-Packard Development Company, L.P.
# Copyright 2014 Samsung Electronics
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Trace a subunit stream in reasonable detail and high accuracy."""
import argparse
import datetime
import functools
import os
import re
import sys
import subunit
import testtools
# NOTE(mtreinish) on python3 anydbm was renamed dbm and the python2 dbm module
# was renamed to dbm.ndbm, this block takes that into account
try:
import anydbm as dbm
except ImportError:
import dbm
DAY_SECONDS = 60 * 60 * 24
FAILS = []
RESULTS = {}
def total_seconds(timedelta):
# NOTE(mtreinish): This method is built-in to the timedelta class in
# python >= 2.7 it is here to enable it's use on older versions
return ((timedelta.days * DAY_SECONDS + timedelta.seconds) * 10 ** 6 +
timedelta.microseconds) / 10 ** 6
def cleanup_test_name(name, strip_tags=True, strip_scenarios=False):
"""Clean up the test name for display.
By default we strip out the tags in the test because they don't help us
in identifying the test that is run to it's result.
Make it possible to strip out the testscenarios information (not to
be confused with tempest scenarios) however that's often needed to
indentify generated negative tests.
"""
if strip_tags:
tags_start = name.find('[')
tags_end = name.find(']')
if tags_start > 0 and tags_end > tags_start:
newname = name[:tags_start]
newname += name[tags_end + 1:]
name = newname
if strip_scenarios:
tags_start = name.find('(')
tags_end = name.find(')')
if tags_start > 0 and tags_end > tags_start:
newname = name[:tags_start]
newname += name[tags_end + 1:]
name = newname
return name
def get_duration(timestamps):
start, end = timestamps
if not start or not end:
duration = ''
else:
delta = end - start
duration = '%d.%06ds' % (
delta.days * DAY_SECONDS + delta.seconds, delta.microseconds)
return duration
def find_worker(test):
"""Get the worker number.
If there are no workers because we aren't in a concurrent environment,
assume the worker number is 0.
"""
for tag in test['tags']:
if tag.startswith('worker-'):
return int(tag[7:])
return 0
# Print out stdout/stderr if it exists, always
def print_attachments(stream, test, all_channels=False):
"""Print out subunit attachments.
Print out subunit attachments that contain content. This
runs in 2 modes, one for successes where we print out just stdout
and stderr, and an override that dumps all the attachments.
"""
channels = ('stdout', 'stderr')
for name, detail in test['details'].items():
# NOTE(sdague): the subunit names are a little crazy, and actually
# are in the form pythonlogging:'' (with the colon and quotes)
name = name.split(':')[0]
if detail.content_type.type == 'test':
detail.content_type.type = 'text'
if (all_channels or name in channels) and detail.as_text():
title = "Captured %s:" % name
stream.write("\n%s\n%s\n" % (title, ('~' * len(title))))
# indent attachment lines 4 spaces to make them visually
# offset
for line in detail.as_text().split('\n'):
stream.write(" %s\n" % line)
def find_test_run_time_diff(test_id, run_time):
times_db_path = os.path.join(os.path.join(os.getcwd(), '.testrepository'),
'times.dbm')
if os.path.isfile(times_db_path):
try:
test_times = dbm.open(times_db_path)
except Exception:
return False
avg_runtime = float(test_times.get(str(test_id), False))
if avg_runtime and avg_runtime > 0:
run_time = float(run_time.rstrip('s'))
perc_diff = ((run_time - avg_runtime) / avg_runtime) * 100
return perc_diff
return False
def show_outcome(stream, test, print_failures=False, failonly=False,
threshold='0'):
global RESULTS
status = test['status']
# TODO(sdague): ask lifeless why on this?
if status == 'exists':
return
worker = find_worker(test)
name = cleanup_test_name(test['id'])
duration = get_duration(test['timestamps'])
if worker not in RESULTS:
RESULTS[worker] = []
RESULTS[worker].append(test)
# don't count the end of the return code as a fail
if name == 'process-returncode':
return
if status == 'fail':
FAILS.append(test)
stream.write('{%s} %s [%s] ... FAILED\n' % (
worker, name, duration))
if not print_failures:
print_attachments(stream, test, all_channels=True)
elif not failonly:
if status == 'success':
out_string = '{%s} %s [%s' % (worker, name, duration)
perc_diff = find_test_run_time_diff(test['id'], duration)
if perc_diff and abs(perc_diff) >= abs(float(threshold)):
if perc_diff > 0:
out_string = out_string + ' +%.2f%%' % perc_diff
else:
out_string = out_string + ' %.2f%%' % perc_diff
stream.write(out_string + '] ... ok\n')
print_attachments(stream, test)
elif status == 'skip':
stream.write('{%s} %s ... SKIPPED: %s\n' % (
worker, name, test['details']['reason'].as_text()))
else:
stream.write('{%s} %s [%s] ... %s\n' % (
worker, name, duration, test['status']))
if not print_failures:
print_attachments(stream, test, all_channels=True)
stream.flush()
def print_fails(stream):
"""Print summary failure report.
Currently unused, however there remains debate on inline vs. at end
reporting, so leave the utility function for later use.
"""
if not FAILS:
return
stream.write("\n==============================\n")
stream.write("Failed %s tests - output below:" % len(FAILS))
stream.write("\n==============================\n")
for f in FAILS:
stream.write("\n%s\n" % f['id'])
stream.write("%s\n" % ('-' * len(f['id'])))
print_attachments(stream, f, all_channels=True)
stream.write('\n')
def count_tests(key, value):
count = 0
for k, v in RESULTS.items():
for item in v:
if key in item:
if re.search(value, item[key]):
count += 1
return count
def run_time():
runtime = 0.0
for k, v in RESULTS.items():
for test in v:
runtime += float(get_duration(test['timestamps']).strip('s'))
return runtime
def worker_stats(worker):
tests = RESULTS[worker]
num_tests = len(tests)
delta = tests[-1]['timestamps'][1] - tests[0]['timestamps'][0]
return num_tests, delta
def print_summary(stream, elapsed_time):
stream.write("\n======\nTotals\n======\n")
stream.write("Ran: %s tests in %.4f sec.\n" % (
count_tests('status', '.*'), total_seconds(elapsed_time)))
stream.write(" - Passed: %s\n" % count_tests('status', '^success$'))
stream.write(" - Skipped: %s\n" % count_tests('status', '^skip$'))
stream.write(" - Expected Fail: %s\n" % count_tests('status', '^xfail$'))
stream.write(" - Unexpected Success: %s\n" % count_tests('status',
'^uxsuccess$'))
stream.write(" - Failed: %s\n" % count_tests('status', '^fail$'))
stream.write("Sum of execute time for each test: %.4f sec.\n" % run_time())
# we could have no results, especially as we filter out the process-codes
if RESULTS:
stream.write("\n==============\nWorker Balance\n==============\n")
for w in range(max(RESULTS.keys()) + 1):
if w not in RESULTS:
stream.write(
" - WARNING: missing Worker %s! "
"Race in testr accounting.\n" % w)
else:
num, time = worker_stats(w)
stream.write(" - Worker %s (%s tests) => %ss\n" %
(w, num, time))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--no-failure-debug', '-n', action='store_true',
dest='print_failures', help='Disable printing failure '
'debug information in realtime')
parser.add_argument('--fails', '-f', action='store_true',
dest='post_fails', help='Print failure debug '
'information after the stream is proccesed')
parser.add_argument('--failonly', action='store_true',
dest='failonly', help="Don't print success items",
default=(
os.environ.get('TRACE_FAILONLY', False)
is not False))
parser.add_argument('--diff-threshold', '-t', dest='threshold',
help="Threshold to use for displaying percent change "
"from the avg run time. If one is not specified "
"the percent change will always be displayed")
parser.add_argument('--no-summary', action='store_true',
help="Don't print the summary of the test run after "
" completes")
return parser.parse_args()
def main():
args = parse_args()
stream = subunit.ByteStreamToStreamResult(
sys.stdin, non_subunit_name='stdout')
outcomes = testtools.StreamToDict(
functools.partial(show_outcome, sys.stdout,
print_failures=args.print_failures,
failonly=args.failonly))
summary = testtools.StreamSummary()
result = testtools.CopyStreamResult([outcomes, summary])
result = testtools.StreamResultRouter(result)
cat = subunit.test_results.CatFiles(sys.stdout)
result.add_rule(cat, 'test_id', test_id=None)
start_time = datetime.datetime.utcnow()
result.startTestRun()
try:
stream.run(result)
finally:
result.stopTestRun()
stop_time = datetime.datetime.utcnow()
elapsed_time = stop_time - start_time
if count_tests('status', '.*') == 0:
print("The test run didn't actually run any tests")
exit(1)
if args.post_fails:
print_fails(sys.stdout)
if not args.no_summary:
print_summary(sys.stdout, elapsed_time)
exit(0 if summary.wasSuccessful() else 1)
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python
"""Tests for hunts output plugins."""
import csv
import StringIO
import sys
# pylint: disable=unused-import,g-bad-import-order
from grr.lib import server_plugins
# pylint: enable=unused-import,g-bad-import-order
from grr.lib import aff4
from grr.lib import config_lib
from grr.lib import email_alerts
from grr.lib import flags
from grr.lib import hunts
from grr.lib import rdfvalue
from grr.lib import test_lib
from grr.lib import utils
class OutputpluginsTest(test_lib.FlowTestsBaseclass):
pass
class EmailPluginTest(OutputpluginsTest):
"""Tests email hunt output plugins."""
def RunHunt(self, plugin_name, plugin_args):
with hunts.GRRHunt.StartHunt(
hunt_name="GenericHunt",
flow_runner_args=rdfvalue.FlowRunnerArgs(flow_name="GetFile"),
flow_args=rdfvalue.GetFileArgs(
pathspec=rdfvalue.PathSpec(
path="/tmp/evil.txt", pathtype=rdfvalue.PathSpec.PathType.OS)),
regex_rules=[rdfvalue.ForemanAttributeRegex(
attribute_name="GRR client",
attribute_regex="GRR")],
output_plugins=[rdfvalue.OutputPlugin(
plugin_name=plugin_name,
plugin_args=plugin_args)],
client_rate=0, token=self.token) as hunt:
hunt.Run()
hunt.StartClients(hunt.session_id, self.client_ids)
# Run the hunt.
client_mock = test_lib.SampleHuntMock()
test_lib.TestHuntHelper(client_mock, self.client_ids, False, self.token)
# Stop the hunt now.
hunt.GetRunner().Stop()
# Run cron flow that executes actual output plugins
for _ in test_lib.TestFlowHelper("ProcessHuntResultsCronFlow",
token=self.token):
pass
return hunt.urn
def setUp(self):
super(EmailPluginTest, self).setUp()
# Set up 10 clients.
self.client_ids = self.SetupClients(40)
def testEmailPlugin(self):
def SendEmail(address, sender, title, message, **_):
self.email_messages.append(dict(address=address, sender=sender,
title=title, message=message))
with utils.Stubber(email_alerts, "SendEmail", SendEmail):
self.email_messages = []
email_alerts.SendEmail = SendEmail
email_address = "notify@%s" % config_lib.CONFIG["Logging.domain"]
hunt_urn = self.RunHunt("EmailPlugin", rdfvalue.EmailPluginArgs(
email=email_address, email_limit=10))
hunt_obj = aff4.FACTORY.Open(hunt_urn, age=aff4.ALL_TIMES,
mode="rw", token=self.token)
self.client_ids = self.SetupClients(40)
hunt_obj.StartClients(hunt_obj.session_id, self.client_ids)
# Run the hunt.
client_mock = test_lib.SampleHuntMock()
test_lib.TestHuntHelper(client_mock, self.client_ids, False, self.token)
# Run cron flow that executes actual output plugins
for _ in test_lib.TestFlowHelper("ProcessHuntResultsCronFlow",
token=self.token):
pass
# Stop the hunt now.
hunt_obj.GetRunner().Stop()
hunt_obj = aff4.FACTORY.Open(hunt_urn, age=aff4.ALL_TIMES,
token=self.token)
started, finished, errors = hunt_obj.GetClientsCounts()
self.assertEqual(started, 40)
self.assertEqual(finished, 40)
self.assertEqual(errors, 20)
collection = aff4.FACTORY.Open(hunt_urn.Add("Results"),
mode="r", token=self.token)
self.assertEqual(len(collection), 20)
# Due to the limit there should only by 10 messages.
self.assertEqual(len(self.email_messages), 10)
for msg in self.email_messages:
self.assertEqual(msg["address"], email_address)
self.assertTrue(
"%s got a new result" % hunt_obj.session_id.Add("Results")
in msg["title"])
self.assertTrue("fs/os/tmp/evil.txt" in msg["message"])
self.assertTrue("sending of emails will be disabled now"
in self.email_messages[-1]["message"])
class CSVOutputPluginTest(OutputpluginsTest):
"""Tests CSV hunt output plugins."""
def RunHunt(self, plugin_args=None, responses=None,
process_responses_separately=False):
if responses is None:
responses = []
with hunts.GRRHunt.StartHunt(
hunt_name="GenericHunt",
flow_runner_args=rdfvalue.FlowRunnerArgs(flow_name="GetFile"),
flow_args=rdfvalue.GetFileArgs(pathspec=rdfvalue.PathSpec(
path="/tmp/evil.txt", pathtype=rdfvalue.PathSpec.PathType.OS)),
regex_rules=[
rdfvalue.ForemanAttributeRegex(attribute_name="GRR client",
attribute_regex="GRR"),
],
client_rate=0, token=self.token) as hunt:
hunt_urn = hunt.urn
plugin_def = rdfvalue.OutputPlugin(
plugin_name="CSVOutputPlugin",
plugin_args=plugin_args)
plugin = plugin_def.GetPluginForHunt(hunt)
# We don't want to test the whole output plugins subsystem as it's
# tested in its own tests. We only want to test logic specific to
# ColumnIOHuntOutputPlugin.
messages = []
for response in responses:
messages.append(rdfvalue.GrrMessage(source=self.client_id,
payload=response))
if process_responses_separately:
for message in messages:
plugin.ProcessResponses([message])
else:
plugin.ProcessResponses(messages)
plugin.Flush()
return (hunt_urn, plugin)
def setUp(self):
super(CSVOutputPluginTest, self).setUp()
self.client_id = self.SetupClients(1)[0]
def testCSVPluginWithValuesOfSameType(self):
responses = []
for i in range(10):
responses.append(rdfvalue.StatEntry(
aff4path=self.client_id.Add("/fs/os/foo/bar").Add(str(i)),
pathspec=rdfvalue.PathSpec(path="/foo/bar"),
st_mode=33184,
st_ino=1063090,
st_dev=64512L,
st_nlink=1 + i,
st_uid=139592,
st_gid=5000,
st_size=0,
st_atime=1336469177,
st_mtime=1336129892,
st_ctime=1336129892))
hunt_urn, _ = self.RunHunt(plugin_args=rdfvalue.CSVOutputPluginArgs(
output_dir=rdfvalue.RDFURN("aff4:/tmp/csv")), responses=responses)
plugin_output_files = list(aff4.FACTORY.Open(
"aff4:/tmp/csv", token=self.token).ListChildren())
self.assertListEqual(plugin_output_files,
[rdfvalue.RDFURN("aff4:/tmp/csv/ExportedFile.csv")])
output_file = aff4.FACTORY.Open(
plugin_output_files[0], aff4_type="AFF4Image", token=self.token)
contents = output_file.Read(sys.maxint)
parsed_output = list(csv.DictReader(StringIO.StringIO(contents)))
self.assertEqual(len(parsed_output), 10)
for i in range(10):
self.assertEqual(parsed_output[i]["metadata.client_urn"], self.client_id)
self.assertEqual(parsed_output[i]["metadata.hostname"], "Host-0")
self.assertEqual(parsed_output[i]["metadata.mac_address"], "aabbccddee00")
self.assertEqual(parsed_output[i]["metadata.source_urn"],
hunt_urn.Add("Results"))
self.assertEqual(parsed_output[i]["urn"],
self.client_id.Add("/fs/os/foo/bar").Add(str(i)))
self.assertEqual(parsed_output[i]["st_mode"], "33184")
self.assertEqual(parsed_output[i]["st_ino"], "1063090")
self.assertEqual(parsed_output[i]["st_dev"], "64512")
self.assertEqual(parsed_output[i]["st_nlink"], str(1 + i))
self.assertEqual(parsed_output[i]["st_uid"], "139592")
self.assertEqual(parsed_output[i]["st_gid"], "5000")
self.assertEqual(parsed_output[i]["st_size"], "0")
self.assertEqual(parsed_output[i]["st_atime"], "2012-05-08 09:26:17")
self.assertEqual(parsed_output[i]["st_mtime"], "2012-05-04 11:11:32")
self.assertEqual(parsed_output[i]["st_ctime"], "2012-05-04 11:11:32")
self.assertEqual(parsed_output[i]["st_blksize"], "0")
self.assertEqual(parsed_output[i]["st_rdev"], "0")
self.assertEqual(parsed_output[i]["symlink"], "")
def testCSVPluginWithValuesOfMultipleTypes(self):
hunt_urn, _ = self.RunHunt(
plugin_args=rdfvalue.CSVOutputPluginArgs(
output_dir=rdfvalue.RDFURN("aff4:/tmp/csv")),
responses=[
rdfvalue.StatEntry(
aff4path=self.client_id.Add("/fs/os/foo/bar"),
pathspec=rdfvalue.PathSpec(path="/foo/bar")),
rdfvalue.Process(pid=42)],
process_responses_separately=True)
plugin_output_files = sorted(list(aff4.FACTORY.Open(
"aff4:/tmp/csv", token=self.token).ListChildren()))
self.assertListEqual(plugin_output_files,
[rdfvalue.RDFURN("aff4:/tmp/csv/ExportedFile.csv"),
rdfvalue.RDFURN("aff4:/tmp/csv/ExportedProcess.csv")])
output_file = aff4.FACTORY.Open(
plugin_output_files[0], aff4_type="AFF4Image", token=self.token)
parsed_output = list(csv.DictReader(
StringIO.StringIO(output_file.Read(sys.maxint))))
self.assertEqual(len(parsed_output), 1)
self.assertEqual(parsed_output[0]["metadata.client_urn"], self.client_id)
self.assertEqual(parsed_output[0]["metadata.hostname"], "Host-0")
self.assertEqual(parsed_output[0]["metadata.mac_address"], "aabbccddee00")
self.assertEqual(parsed_output[0]["metadata.source_urn"],
hunt_urn.Add("Results"))
self.assertEqual(parsed_output[0]["urn"],
self.client_id.Add("/fs/os/foo/bar"))
output_file = aff4.FACTORY.Open(
plugin_output_files[1], aff4_type="AFF4Image", token=self.token)
parsed_output = list(csv.DictReader(
StringIO.StringIO(output_file.Read(sys.maxint))))
self.assertEqual(len(parsed_output), 1)
self.assertEqual(parsed_output[0]["metadata.client_urn"], self.client_id)
self.assertEqual(parsed_output[0]["metadata.hostname"], "Host-0")
self.assertEqual(parsed_output[0]["metadata.mac_address"], "aabbccddee00")
self.assertEqual(parsed_output[0]["metadata.source_urn"],
hunt_urn.Add("Results"))
self.assertEqual(parsed_output[0]["pid"], "42")
def testCSVPluginGeneratesTemporaryNameIfOutputDirIsNotSpecified(self):
_, plugin = self.RunHunt(responses=[rdfvalue.Process(pid=42)])
self.assertTrue("ExportedProcess" in plugin.state.files_by_type)
output_file = aff4.FACTORY.Open(
plugin.state.files_by_type["ExportedProcess"].urn,
aff4_type="AFF4Image", token=self.token)
parsed_output = list(csv.DictReader(
StringIO.StringIO(output_file.Read(sys.maxint))))
self.assertEqual(len(parsed_output), 1)
class TestLoader(test_lib.GRRTestLoader):
base_class = OutputpluginsTest
def main(argv):
test_lib.GrrTestProgram(argv=argv, testLoader=TestLoader())
if __name__ == "__main__":
flags.StartMain(main)
|
|
#!/usr/bin/env python
import argparse
import sys
import os
from pprint import pprint
import re
import logging
from six import StringIO
MAX_LINE_WIDTH=79
FILE_EXTENSIONS = [
"swift",
"strings",
"cs",
"txt",
"shader"
]
TARGET_FILE_EXTENSIONS = [
"asciidoc",
"md"
]
def build_file_list(starting_dir, extension):
"""Returns an array of files."""
found_files = []
for (path, dirs, files) in os.walk(starting_dir):
for filename in files:
if filename.endswith("."+extension):
found_files.append(path+os.path.sep+filename)
return found_files
def tag_source_file(path):
"""Returns a list of tuples: (line_text, list_of_tags)"""
file = open(path, "r")
# The list of tagged lines
tagged_lines = []
# The list of tags that currently apply
current_tags = []
# Use this to store snapshots of current_tags
from copy import copy
# Regexes for detecting when tags start and end
begin_re = re.compile(".*?\/\/ BEGIN (.*).*")
end_re = re.compile(".*?\/\/ END (.*).*")
line_num = 0
for line in file:
# If this line contains "//-", "/*-" or "-*/", it's a comment
# that should not be rendered.
if "/*-" in line or "-*/" in line or "//-" in line:
pass
# If we entered a tag, add it to the list
elif begin_re.search(line):
tag = begin_re.search(line).group(1)
if tag in current_tags:
logging.warn("{0}:{1}: \"{2}\" was entered twice without exiting it".format(path, line_num, tag))
current_tags.append(tag)
# If we left a tag, remove it
elif end_re.search(line):
tag = end_re.search(line).group(1)
if tag not in current_tags:
logging.warn("{0}:{1}: \"{2}\" was exited, but had not yet been entered".format(path, line_num, tag))
current_tags.remove(tag)
# If it's neither, add it to the list of tagged lines
else:
tagged_lines.append((line, copy(current_tags), (path, line_num)))
line_num += 1
# TODO: Error if we left a file with an unclosed tag
return tagged_lines
def parse_snippet_command(command):
"""Returns the tuple (tags_to_include, tags_to_exclude, tags_to_highlight)"""
# Split the command into usable tokens
import re
tokens = re.split('\/\/|:|,| |\n', command)
tokens = filter(None, tokens)
if tokens[0] != "snip":
logging.fatal("Somehow managed to parse First token must be 'snip'")
# The current mode of our parser
INCLUDE_TAGS=0
EXCLUDE_TAGS=1
HIGHLIGHT_TAGS=2
ISOLATE_TAGS=3
mode = INCLUDE_TAGS
# The useful output of this function
tags_to_include = []
tags_to_exclude = []
tags_to_highlight = []
tags_to_isolate = []
# Interpret the list of tokens
for token in tokens[1:]:
# Change mode if we have to
if token == "except":
mode = EXCLUDE_TAGS
elif token == "highlighting":
mode = HIGHLIGHT_TAGS
elif token == "isolating":
mode = ISOLATE_TAGS
# Otherwise, add it to the list of tokens
else:
if mode == INCLUDE_TAGS:
tags_to_include.append(token)
elif mode == EXCLUDE_TAGS:
tags_to_exclude.append(token)
elif mode == HIGHLIGHT_TAGS:
tags_to_highlight.append(token)
elif mode == ISOLATE_TAGS:
tags_to_isolate.append(token)
if len(tags_to_isolate) > 1:
logging.warn("Command {0}: 'isolating' should only have one tag in its list".format(command))
return (tags_to_include,tags_to_exclude,tags_to_highlight,tags_to_isolate)
def render_snippet(tags, include, exclude, highlight, isolate):
"""Searches 'tags', and returns a string comprised of all lines that match any tags in 'include' and do not match any in 'exclude'"""
# TODO: Implement highlighting support
has_content = False
highlighted_lines = []
snippet_contents = StringIO()
for candidate_line in tags:
line = candidate_line[0]
is_highlighted = set(candidate_line[1]).intersection(highlight)
# If its LAST tag is the same as any of the isolating tags, include it
if set(candidate_line[1][-1:]).intersection(isolate):
snippet_contents.write(line)
highlighted_lines.append(is_highlighted)
has_content = True
# Otherwise, if it has tags that we want, and none of the tags we don't, include it
elif set(candidate_line[1]).intersection(include) and not set(candidate_line[1]).intersection(exclude):
snippet_contents.write(line)
highlighted_lines.append(is_highlighted)
has_content = True
if has_content == False:
return (None, [])
rendered_snippet = snippet_contents.getvalue()
import textwrap
rendered_snippet = textwrap.dedent(rendered_snippet)
# list of (highlighted,contents) tuples
contents = []
from string import split
for line_num, line_text in enumerate(split(rendered_snippet, "\n")):
if line_num < len(highlighted_lines) and highlighted_lines[line_num]:
contents.append((True, line_text))
else:
contents.append((False, line_text))
# remove multiple blank lines
final_contents = []
last_line_was_blank = True # doing this means removing initial blank lines
empty_line = re.compile("^\s*$") # the line contains only whitespace
for line in contents:
this_line_blank = empty_line.match(line[1])
if last_line_was_blank and this_line_blank:
continue
last_line_was_blank = this_line_blank
final_contents.append(line)
rendered_snippet = StringIO()
warnings = []
for (line_num, line) in enumerate(final_contents):
if line[0] == True:
rendered_snippet.write("> {0}\n".format(line[1]))
else:
rendered_snippet.write(" {0}\n".format(line[1]))
if (len(line[1]) > MAX_LINE_WIDTH):
warnings.append((line_num, "exceeds max line length ({0})".format(len(line[1]))))
return (rendered_snippet.getvalue(), warnings)
def render_file(file_path, tags, language):
"""Returns the text of the file, with snippets rendered."""
# First, clean the file of any already-rendered snippets, preserving the tag
file = open(file_path, 'r')
file_contents = file.read()
snip_with_code = re.compile("(//.*snip:.*\n)(\[.*\]\n)*----\n(.*\n)*?----\n")
cleaned_contents = re.sub(snip_with_code, r'\1', file_contents)
# Now render snippets in this cleaned content
cleaned_contents = StringIO(cleaned_contents)
file_contents = StringIO()
for line in cleaned_contents:
# Write back the line
file_contents.write(line)
# Expand snippet commands if we find them
if line.startswith("// snip:"):
snippet_command = parse_snippet_command(line)
snippet_content, warnings = render_snippet(tags, snippet_command[0], snippet_command[1], snippet_command[2], snippet_command[3])
for warning in warnings:
warn_line_num = warning[0] + file_contents.getvalue().count("\n") + 3
logging.warn("{0}:{1} {2}".format(file_path, warn_line_num, warning[1]))
if snippet_content:
file_contents.write("[source,{0}]\n".format(language))
file_contents.write("----\n")
file_contents.write(snippet_content[0:-3]) # omit the last linebreak and its following 2 spaces
file_contents.write("----\n")
else:
logging.warn("{0}:{1}: no snippet found".format(file_path, line))
return file_contents.getvalue()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Renders snippets in AsciiDoc files.')
parser.add_argument('--only-clean', dest='only_clean', action='store_const',
const=True, default=False,
help='remove snippets from source documents only')
parser.add_argument('--lang', dest="language", type=str, default="javascript",
help='the language to use for syntax highlighting')
parser.add_argument('--asciidoc-directory', type=str, default=".",
help='the directory containing asciidoc files to process (default = current directory)')
parser.add_argument('--verbose', dest="verbose", action='store_const', const=True, default=False, help="Be verbose")
parser.add_argument('source_directory', type=str,
help='the directory to search for code snippets')
args = parser.parse_args()
tags = []
# Pull in the tags, unless we're only cleaning
if args.only_clean == False:
swift_files = []
for extension in FILE_EXTENSIONS:
swift_files.extend(build_file_list(args.source_directory, extension))
for file in swift_files:
new_tags = tag_source_file(file)
# TODO: Warn if a tag was detected in multiple files
tags += new_tags
# Process every asciidoc file we found
asciidoc_files = []
for extension in TARGET_FILE_EXTENSIONS:
asciidoc_files.extend(build_file_list(args.asciidoc_directory, extension))
for file in asciidoc_files:
if args.verbose:
print file
new_contents = render_file(file, tags, args.language)
new_file = open(file, "w")
new_file.write(new_contents)
|
|
"""COLORS."""
# --- import --------------------------------------------------------------------------------------
import os
import pathlib
import collections
import numpy as np
from scipy.interpolate import griddata
from ._data import Data
from .. import kit as wt_kit
# --- define --------------------------------------------------------------------------------------
__all__ = ["from_COLORS"]
# --- from function -------------------------------------------------------------------------------
def from_COLORS(
filepaths,
name=None,
cols=None,
invert_d1=True,
ignore=["w3", "wa", "dref", "m0", "m1", "m2", "m3", "m4", "m5", "m6"],
parent=None,
verbose=True,
):
"""Create data object from COLORS file(s).
Parameters
----------
filepaths : path-like or list of path-like
Filepath(s).
Can be either a local or remote file (http/ftp).
Can be compressed with gz/bz2, decompression based on file name.
name : string (optional)
Unique dataset identifier. If None (default), autogenerated.
cols : {'v0', 'v1', 'v2'} (optional)
Format of COLORS dat file. If None, autorecognized. Default is None.
invert_d1 : boolean (optional)
Toggle inversion of D1 at import time. Default is True.
ignore : list of strings (optional)
Columns to ignore.
parent : WrightTools.Collection (optional)
Collection to place new data object within. Default is None.
verbose : bool (optional)
Toggle talkback. Default is True.
Returns
-------
WrightTools.Data
Data from COLORS.
"""
# do we have a list of files or just one file? ------------------------------------------------
if isinstance(filepaths, list):
filestrs = [os.fspath(f) for f in filepaths]
filepaths = [pathlib.Path(f) for f in filepaths]
else:
filestrs = [os.fspath(filepaths)]
filepaths = [pathlib.Path(filepaths)]
ds = np.DataSource(None)
# define format of dat file -------------------------------------------------------------------
if cols:
pass
else:
f = ds.open(filestrs[0], "rt")
num_cols = len(np.genfromtxt(f).T)
f.close()
if num_cols in [28, 35, 41]:
cols = "v2"
elif num_cols in [20]:
cols = "v1"
elif num_cols in [15, 16, 19]:
cols = "v0"
if verbose:
print("cols recognized as", cols, "(%d)" % num_cols)
if cols == "v2":
axes = collections.OrderedDict()
axes["w1"] = {"idx": 1, "units": "nm", "tolerance": 0.5, "label": "1"}
axes["w2"] = {"idx": 3, "units": "nm", "tolerance": 0.5, "label": "2"}
axes["w3"] = {"idx": 5, "units": "nm", "tolerance": 0.5, "label": "3"}
axes["wm"] = {"idx": 7, "units": "nm", "tolerance": 0.5, "label": "m"}
axes["wa"] = {"idx": 8, "units": "nm", "tolerance": 1.0, "label": "a"}
axes["d0"] = {"idx": 10, "units": "fs", "tolerance": 25.0, "label": "0"}
axes["d1"] = {"idx": 12, "units": "fs", "tolerance": 4.0, "label": "1"}
axes["d2"] = {"idx": 14, "units": "fs", "tolerance": 4.0, "label": "2"}
axes["m0"] = {"idx": 22, "units": None, "tolerance": 10.0, "label": 0}
axes["m1"] = {"idx": 23, "units": None, "tolerance": 10.0, "label": 1}
axes["m2"] = {"idx": 24, "units": None, "tolerance": 10.0, "label": 2}
axes["m3"] = {"idx": 25, "units": None, "tolerance": 10.0, "label": 3}
axes["m4"] = {"idx": 26, "units": None, "tolerance": 15.0, "label": 4}
axes["m5"] = {"idx": 27, "units": None, "tolerance": 15.0, "label": 5}
axes["m6"] = {"idx": 28, "units": None, "tolerance": 15.0, "label": 6}
channels = collections.OrderedDict()
channels["ai0"] = {"idx": 16, "label": "0"}
channels["ai1"] = {"idx": 17, "label": "1"}
channels["ai2"] = {"idx": 18, "label": "2"}
channels["ai3"] = {"idx": 19, "label": "3"}
channels["ai4"] = {"idx": 20, "label": "4"}
channels["mc"] = {"idx": 21, "label": "a"}
elif cols == "v1":
axes = collections.OrderedDict()
axes["w1"] = {"idx": 1, "units": "nm", "tolerance": 0.5, "label": "1"}
axes["w2"] = {"idx": 3, "units": "nm", "tolerance": 0.5, "label": "2"}
axes["wm"] = {"idx": 5, "units": "nm", "tolerance": 0.5, "label": "m"}
axes["d1"] = {"idx": 6, "units": "fs", "tolerance": 3.0, "label": "1"}
axes["d2"] = {"idx": 7, "units": "fs", "tolerance": 3.0, "label": "2"}
channels = collections.OrderedDict()
channels["ai0"] = {"idx": 8, "label": "0"}
channels["ai1"] = {"idx": 9, "label": "1"}
channels["ai2"] = {"idx": 10, "label": "2"}
channels["ai3"] = {"idx": 11, "label": "3"}
elif cols == "v0":
axes = collections.OrderedDict()
axes["w1"] = {"idx": 1, "units": "nm", "tolerance": 0.5, "label": "1"}
axes["w2"] = {"idx": 3, "units": "nm", "tolerance": 0.5, "label": "2"}
axes["wm"] = {"idx": 5, "units": "nm", "tolerance": 0.5, "label": "m"}
axes["d1"] = {"idx": 6, "units": "fs", "tolerance": 3.0, "label": "1"}
axes["d2"] = {"idx": 8, "units": "fs", "tolerance": 3.0, "label": "2"}
channels = collections.OrderedDict()
channels["ai0"] = {"idx": 10, "label": "0"}
channels["ai1"] = {"idx": 11, "label": "1"}
channels["ai2"] = {"idx": 12, "label": "2"}
channels["ai3"] = {"idx": 13, "label": "3"}
# import full array ---------------------------------------------------------------------------
arr = []
for f in filestrs:
ff = ds.open(f, "rt")
arr.append(np.genfromtxt(ff).T)
ff.close()
arr = np.concatenate(arr, axis=1)
if invert_d1:
idx = axes["d1"]["idx"]
arr[idx] = -arr[idx]
# recognize dimensionality of data ------------------------------------------------------------
axes_discover = axes.copy()
for key in ignore:
if key in axes_discover:
axes_discover.pop(key) # remove dimensions that mess up discovery
scanned = wt_kit.discover_dimensions(arr, axes_discover)
# create data object --------------------------------------------------------------------------
if name is None:
name = wt_kit.string2identifier(filepaths[0].name)
kwargs = {"name": name, "kind": "COLORS", "source": filestrs}
if parent is not None:
data = parent.create_data(**kwargs)
else:
data = Data(**kwargs)
# grid and fill data --------------------------------------------------------------------------
# variables
ndim = len(scanned)
for i, key in enumerate(scanned.keys()):
for name in key.split("="):
shape = [1] * ndim
a = scanned[key]
shape[i] = a.size
a.shape = tuple(shape)
units = axes[name]["units"]
label = axes[name]["label"]
data.create_variable(name=name, values=a, units=units, label=label)
for key, dic in axes.items():
if key not in data.variable_names:
c = np.mean(arr[dic["idx"]])
if not np.isnan(c):
shape = [1] * ndim
a = np.array([c])
a.shape = tuple(shape)
units = dic["units"]
label = dic["label"]
data.create_variable(name=key, values=a, units=units, label=label)
# channels
points = tuple(arr[axes[key.split("=")[0]]["idx"]] for key in scanned.keys())
if len(scanned) == 1: # 1D data
(xi,) = scanned.values()
for key in channels.keys():
channel = channels[key]
zi = arr[channel["idx"]]
grid_i = griddata(points, zi, xi, method="nearest")
data.create_channel(name=key, values=grid_i)
else: # all other dimensionalities
xi = tuple(np.meshgrid(*scanned.values(), indexing="ij"))
for key in channels.keys():
channel = channels[key]
zi = arr[channel["idx"]]
fill_value = min(zi)
grid_i = griddata(points, zi, xi, method="linear", fill_value=fill_value)
data.create_channel(name=key, values=grid_i)
# axes
data.transform(*scanned.keys())
# return --------------------------------------------------------------------------------------
if verbose:
print("data created at {0}".format(data.fullpath))
print(" axes: {0}".format(data.axis_names))
print(" shape: {0}".format(data.shape))
return data
|
|
import copy
from django.http.response import HttpResponse
from django.utils.translation import ugettext as _
from corehq.apps.groups.models import Group
from corehq.apps.reports.cache import request_cache
from corehq.apps.reports.standard.cases.basic import CaseListReport
from corehq.apps.api.es import CaseES
from corehq.apps.reports.standard import CustomProjectReport
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn, DataTablesColumnGroup
from dimagi.utils.decorators.memoized import memoized
from corehq.elastic import stream_es_query, ES_URLS
from custom.bihar.reports.display import MCHMotherDisplay, MCHChildDisplay
from dimagi.utils.timezones import utils as tz_utils
import pytz
from corehq.apps.reports.tasks import export_all_rows_task
from custom.bihar.utils import get_all_owner_ids_from_group
class MCHBaseReport(CustomProjectReport, CaseListReport):
ajax_pagination = True
asynchronous = True
exportable = True
exportable_all = True
emailable = False
fix_left_col = True
report_template_path = "bihar/reports/report.html"
model = None
fields = [
'corehq.apps.reports.filters.select.GroupFilter',
'corehq.apps.reports.filters.select.SelectOpenCloseFilter',
]
@property
def case_filter(self):
group_id = self.request_params.get('group', '')
filters = []
if group_id:
group = Group.get(group_id)
users_in_group = get_all_owner_ids_from_group(group)
if users_in_group:
or_stm = []
for user_id in users_in_group:
or_stm.append({'term': {'owner_id': user_id}})
filters.append({"or": or_stm})
else:
filters.append({'term': {'owner_id': group_id}})
return {'and': filters} if filters else {}
@property
@memoized
def case_es(self):
return CaseES(self.domain)
@property
@memoized
def rendered_report_title(self):
return self.name
def date_to_json(self, date):
return tz_utils.adjust_datetime_to_timezone\
(date, pytz.utc.zone, self.timezone.zone).strftime\
('%d/%m/%Y') if date else ""
@property
def get_all_rows(self):
query_results = stream_es_query(q=self.es_query, es_url=ES_URLS["cases"], size=999999, chunksize=100)
case_displays = (self.model(self, self.get_case(case))
for case in query_results)
return self.get_cases(case_displays)
def build_query(self, case_type=None, afilter=None, status=None, owner_ids=None, user_ids=None, search_string=None):
def _domain_term():
return {"term": {"domain.exact": self.domain}}
subterms = [_domain_term(), afilter] if afilter else [_domain_term()]
if case_type:
subterms.append({"term": {"type.exact": case_type}})
if status:
subterms.append({"term": {"closed": (status == 'closed')}})
and_block = {'and': subterms} if subterms else {}
es_query = {
'query': {
'filtered': {
'query': {"match_all": {}},
'filter': and_block
}
},
'sort': self.get_sorting_block(),
'from': self.pagination.start,
'size': self.pagination.count,
}
return es_query
@property
@memoized
def es_query(self):
query = self.build_query(case_type=self.case_type, afilter=self.case_filter,
status=self.case_status)
return query
@property
@request_cache("export")
def export_response(self):
self.request.datespan = None
export_all_rows_task.delay(self.__class__, self.__getstate__())
return HttpResponse()
@property
def rows(self):
case_displays = (self.model(self, self.get_case(case))
for case in self.es_results['hits'].get('hits', []))
return self.get_cases(case_displays)
@property
def export_table(self):
table = super(MCHBaseReport, self).export_table
# remove first row from table headers
table[0][1].pop(0)
return table
class MotherMCHRegister(MCHBaseReport):
name = "Mother MCH register"
slug = "mother_mch_register"
default_case_type = "cc_bihar_pregnancy"
model = MCHMotherDisplay
@property
def headers(self):
headers = DataTablesHeader(DataTablesColumn(_("CHW Name")),
DataTablesColumn(_("Mother Name"), sortable=False),
DataTablesColumnGroup(
_("Beneficiary Information"),
DataTablesColumn(_("Husband Name"), sortable=False),
DataTablesColumn(_("City/ward/village"), sortable=False),
DataTablesColumn(_("Full address"), sortable=False),
DataTablesColumn(_("MCTS ID"), sortable=False),
DataTablesColumn(_("Mobile number"), sortable=False),
DataTablesColumn(_("Whose Mobile Number"), sortable=False),
DataTablesColumn(_("Mother DOB / AGE"), sortable=False),
DataTablesColumn(_("JSY beneficiary"), sortable=False),
DataTablesColumn(_("Caste"), sortable=False)),
DataTablesColumnGroup(
_("Provider Information"),
DataTablesColumn(_("ASHA Name"), sortable=False),
DataTablesColumn(_("Asha phone"), sortable=False),
DataTablesColumn(_("AWC Code , AWC name"), sortable=False),
DataTablesColumn(_("AWW name"), sortable=False),
DataTablesColumn(_("AWW phone number"), sortable=False),
DataTablesColumn(_("LMP"), sortable=False),
DataTablesColumn(_("EDD"), sortable=False)),
DataTablesColumnGroup(
_("First ANC (within 12 weeks)"),
DataTablesColumn(_("ANC 1 Date"), sortable=False),
DataTablesColumn(_("ANC 1 Blood Pressure"), sortable=False),
DataTablesColumn(_("ANC 1 Weight"), sortable=False),
DataTablesColumn(_("ANC Hb"), sortable=False),
DataTablesColumn(_("ANC1 completed within 12 weeks? "), sortable=False)),
DataTablesColumnGroup(
_("Second ANC (14-26 weeks)"),
DataTablesColumn(_("ANC 2 Date"), sortable=False),
DataTablesColumn(_("ANC 2 Blood Pressure"), sortable=False),
DataTablesColumn(_("ANC 2 Weight"), sortable=False)),
DataTablesColumnGroup(
_("Third ANC (28-34 weeks)"),
DataTablesColumn(_("ANC 3 Date"), sortable=False),
DataTablesColumn(_("ANC 3 Blood Pressure"), sortable=False),
DataTablesColumn(_("ANC 3 Weight"), sortable=False)),
DataTablesColumnGroup(
_("Fourth ANC (34 weeks to Delivery)"),
DataTablesColumn(_("ANC 4 Date"), sortable=False),
DataTablesColumn(_("ANC 4 Blood Pressure"), sortable=False),
DataTablesColumn(_("ANC 4 Weight"), sortable=False),
DataTablesColumn(_("TT1 date"), sortable=False),
DataTablesColumn(_("TT2 date"), sortable=False),
DataTablesColumn(_("TT Booster"), sortable=False),
DataTablesColumn(_("Received date of 100 IFA tablets "), sortable=False),
DataTablesColumn(_("Anemia"), sortable=False),
DataTablesColumn(_("Any complications"), sortable=False),
DataTablesColumn(_("RTI /STI <yes/no>"), sortable=False)),
DataTablesColumnGroup(
_("Pregnancy Outcome"),
DataTablesColumn(_("Date of delivery"), sortable=False),
DataTablesColumn(
_("Place of delivery (home - SBA/Non-SBA) (Hospital - public/private)"), sortable=False),
DataTablesColumn(_("Nature of delivery"), sortable=False),
DataTablesColumn(_("Complications"), sortable=False),
DataTablesColumn(_("Discharge date"), sortable=False),
DataTablesColumn(_("Received date of JSY benefits"), sortable=False),
DataTablesColumn(_("Abortion type"), sortable=False)),
DataTablesColumnGroup(
_("Post Delivery Details"),
DataTablesColumn(
_("First PNC visit (within 48 hours / within 7 days/ after 7 days)"), sortable=False),
DataTablesColumn(_("Complications after delivery"), sortable=False),
DataTablesColumn(_("Type of family planning adopted after delivery"), sortable=False),
DataTablesColumn(_("Checked mother and infant immediate after delivery?"), sortable=False),
DataTablesColumn(_("Infant outcome number code"), sortable=False)),
DataTablesColumnGroup(
_("Child 1 Details"),
DataTablesColumn(_("Name of the child"), sortable=False),
DataTablesColumn(_("Gender"), sortable=False),
DataTablesColumn(_("First weight at birth"), sortable=False),
DataTablesColumn(_("Breastfed within an hour?"), sortable=False)),
DataTablesColumnGroup(
_("Child 2 Details"),
DataTablesColumn(_("Name of the child"), sortable=False),
DataTablesColumn(_("Gender"), sortable=False),
DataTablesColumn(_("First weight at birth"), sortable=False),
DataTablesColumn(_("Breastfed within an hour?"), sortable=False)),
DataTablesColumnGroup(
_("Child 3 Details"),
DataTablesColumn(_("Name of the child"), sortable=False),
DataTablesColumn(_("Gender"), sortable=False),
DataTablesColumn(_("First weight at birth"), sortable=False),
DataTablesColumn(_("Breastfed within an hour?"), sortable=False)),
DataTablesColumnGroup(
_("Child 4 Details"),
DataTablesColumn(_("Name of the child"), sortable=False),
DataTablesColumn(_("Gender"), sortable=False),
DataTablesColumn(_("First weight at birth"), sortable=False),
DataTablesColumn(_("Breastfed within an hour?"), sortable=False),
DataTablesColumn(_("Migrate status "), sortable=False))
)
return headers
@classmethod
def get_cases(self, case_displays):
for disp in case_displays:
yield [
disp.chw_name,
disp.mother_name,
disp.husband_name,
disp.ward_number,
disp.village,
disp.mcts_id,
disp.mobile_number,
disp.mobile_number_whose,
disp.dob_age,
disp.jsy_beneficiary,
disp.caste,
disp.asha_name,
disp.asha_number,
disp.awc_code_name,
disp.aww_name,
disp.aww_number,
disp.lmp,
disp.edd,
disp.anc_date_1,
disp.blood_pressure_1,
disp.weight_1,
disp.hemoglobin,
disp.anc_completed,
disp.anc_date_2,
disp.blood_pressure_2,
disp.weight_2,
disp.anc_date_3,
disp.blood_pressure_3,
disp.weight_3,
disp.anc_date_4,
disp.blood_pressure_4,
disp.weight_4,
disp.tt1_date,
disp.tt2_date,
disp.tt_booster,
disp.ifa_tablets,
disp.anemia,
disp.complications,
disp.rti_sti,
disp.add,
disp.home_sba_assist,
disp.delivery_nature,
disp.complications,
disp.discharge_date,
disp.jsy_money_date,
disp.abortion_type,
disp.first_pnc_time,
disp.delivery_complications,
disp.family_planning_type,
disp.all_pnc_on_time,
disp.num_children,
disp.case_name_1,
disp.gender_1,
disp.first_weight_1,
disp.breastfed_hour_1,
disp.case_name_2,
disp.gender_2,
disp.first_weight_2,
disp.breastfed_hour_2,
disp.case_name_3,
disp.gender_3,
disp.first_weight_3,
disp.breastfed_hour_3,
disp.case_name_4,
disp.gender_4,
disp.first_weight_4,
disp.breastfed_hour_4,
disp.status
]
@property
def fixed_cols_spec(self):
return dict(num=2, width=350)
class ChildMCHRegister(MCHBaseReport):
name = "Child MCH register"
slug = "child_mch_register"
default_case_type = "cc_bihar_newborn"
model = MCHChildDisplay
@property
def headers(self):
headers = DataTablesHeader(DataTablesColumn(_("CHW Name")),
DataTablesColumn(_("Child Name"), sortable=False),
DataTablesColumn(_("Father and Mother Name"), sortable=False),
DataTablesColumnGroup(
_("Beneficiary Information"),
DataTablesColumn(_("Mother's MCTS ID"), sortable=False),
DataTablesColumn(_("Gender"), sortable=False),
DataTablesColumn(_("City/ward/village"), sortable=False),
DataTablesColumn(_("Address"), sortable=False),
DataTablesColumn(_("Mobile number"), sortable=False),
DataTablesColumn(_("Whose Mobile Number"), sortable=False),
DataTablesColumn(_("DOB / AGE"), sortable=False),
DataTablesColumn(_("Place of delivery (home - SBA/Non-SBA) (Hospital - public/private)"), sortable=False),
DataTablesColumn(_("Caste"), sortable=False)),
DataTablesColumnGroup(
_("Provider Information"),
DataTablesColumn(_("ASHA Name"), sortable=False),
DataTablesColumn(_("Asha phone"), sortable=False),
DataTablesColumn(_("AWC Code , AWC name"), sortable=False),
DataTablesColumn(_("AWW name"), sortable=False),
DataTablesColumn(_("AWW phone number"), sortable=False)),
DataTablesColumnGroup(
_("At Birth"),
DataTablesColumn(_("BCG"), sortable=False),
DataTablesColumn(_("OPV0"), sortable=False),
DataTablesColumn(_("Hepatitis-Birth dose "), sortable=False)),
DataTablesColumnGroup(
_("At 6 Weeks"),
DataTablesColumn(_("DPT1"), sortable=False),
DataTablesColumn(_("OPV1"), sortable=False),
DataTablesColumn(_("Hepatitis-B1"), sortable=False)),
DataTablesColumnGroup(
_("At 10 Weeks"),
DataTablesColumn(_("DPT2"), sortable=False),
DataTablesColumn(_("OPV2"), sortable=False),
DataTablesColumn(_("Hepatitis-B2"), sortable=False)),
DataTablesColumnGroup(
_("At 14 Weeks"),
DataTablesColumn(_("DPT3"), sortable=False),
DataTablesColumn(_("OPV3"), sortable=False),
DataTablesColumn(_("Hepatitis-B3"), sortable=False)),
DataTablesColumnGroup(
_("Between 9-12 Months"),
DataTablesColumn(_("Measles (1st dose)"), sortable=False)),
DataTablesColumnGroup(
_("Between 16-24 Months"),
DataTablesColumn(
_("Vitamin A dose-1 "), sortable=False),
DataTablesColumn(_("Measles (2nd dose)/ MR Vaccine"))),
DataTablesColumnGroup(
_("After 2 Years"),
DataTablesColumn(_("DPT Booster"), sortable=False),
DataTablesColumn(_("OPV Booster"), sortable=False),
DataTablesColumn(_("Vitamin A dose-2"), sortable=False),
DataTablesColumn(_("Vitamin A dose-3"), sortable=False),
DataTablesColumn(_("JE Vaccine"), sortable=False))
)
return headers
@classmethod
def get_cases(self, case_displays):
for disp in case_displays:
yield [
disp.chw_name,
disp.child_name,
disp.father_mother_name,
disp.mcts_id,
disp.gender,
disp.ward_number,
disp.village,
disp.mobile_number,
disp.mobile_number_whose,
disp.dob_age,
disp.home_sba_assist,
disp.caste,
disp.asha_name,
disp.asha_number,
disp.awc_code_name,
disp.aww_name,
disp.aww_number,
disp.bcg_date,
disp.opv_0_date,
disp.hep_b_0_date,
disp.dpt_1_date,
disp.opv_1_date,
disp.hep_b_1_date,
disp.dpt_2_date,
disp.opv_2_date,
disp.hep_b_2_date,
disp.dpt_3_date,
disp.opv_3_date,
disp.hep_b_3_date,
disp.measles_date,
disp.vit_a_1_date,
disp.date_measles_booster,
disp.dpt_booster_date,
disp.opv_booster_date,
disp.vit_a_2_date,
disp.vit_a_3_date,
disp.date_je
]
@property
def fixed_cols_spec(self):
return dict(num=3, width=450)
|
|
# -*- coding: utf-8 -*-
from future.builtins import filter, int, range, str, super, zip
from future.utils import with_metaclass
from copy import copy
from datetime import date
import datetime
from itertools import dropwhile, takewhile
from locale import localeconv
from re import match
from django import forms
from django.forms.models import BaseInlineFormSet, ModelFormMetaclass
from django.forms.models import inlineformset_factory
from django.utils.datastructures import SortedDict
from django.utils.safestring import mark_safe
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from mezzanine.conf import settings
from mezzanine.core.templatetags.mezzanine_tags import thumbnail
from cartridge.shop import checkout
from cartridge.shop.models import Product, ReservableProduct, ProductOption, ProductVariation
from cartridge.shop.models import Cart, CartItem, Order, DiscountCode, SpecialPrice
from cartridge.shop.utils import (make_choices, set_locale, set_shipping,
clear_session)
ADD_PRODUCT_ERRORS = {
"invalid_options": _("The selected options are currently unavailable."),
"no_stock": _("The selected options are currently not in stock."),
"no_stock_quantity": _("The selected quantity is currently unavailable."),
"period_not_available": _("The whole period is not available."),
"cannot_edit_reservable_quantity": _("Cannot edit quantity of reservable product."),
}
class AddProductForm(forms.Form):
"""
A form for adding the given product to the cart or the
wishlist.
"""
quantity = forms.IntegerField(label=_("Quantity"), min_value=1)
sku = forms.CharField(required=False, widget=forms.HiddenInput())
def __init__(self, *args, **kwargs):
"""
Handles adding a variation to the cart or wishlist.
When adding from the product page, the product is provided
from the view and a set of choice fields for all the
product options for this product's variations are added to
the form. When the form is validated, the selected options
are used to determine the chosen variation.
A ``to_cart`` boolean keyword arg is also given specifying
whether the product is being added to a cart or wishlist.
If a product is being added to the cart, then its stock
level is also validated.
When adding to the cart from the wishlist page, a sku is
given for the variation, so the creation of choice fields
is skipped.
"""
self._product = kwargs.pop("product", None)
self._to_cart = kwargs.pop("to_cart")
super(AddProductForm, self).__init__(*args, **kwargs)
# Adding from the wishlist with a sku, bail out.
if args[0] is not None and args[0].get("sku", None):
return
# Adding from the product page, remove the sku field
# and build the choice fields for the variations.
del self.fields["sku"]
option_fields = ProductVariation.option_fields()
if not option_fields:
return
option_names, option_labels = list(zip(*[(f.name, f.verbose_name)
for f in option_fields]))
option_values = list(zip(*self._product.variations.filter(
unit_price__isnull=False).values_list(*option_names)))
if option_values:
for i, name in enumerate(option_names):
values = [_f for _f in set(option_values[i]) if _f]
if values:
field = forms.ChoiceField(label=option_labels[i],
choices=make_choices(values))
self.fields[name] = field
if self._product.content_model == 'reservableproduct':
# ReservableProduct needs from/to dates and does not need quantity
self.fields["from_date"] = forms.DateField(input_formats=["%d.%m.%Y"], widget=forms.HiddenInput())
self.fields["to_date"] = forms.DateField(input_formats=["%d.%m.%Y"], widget=forms.HiddenInput())
self.fields["quantity"] = forms.IntegerField(min_value=1, initial=1, widget=forms.HiddenInput())
def clean(self):
"""
Determine the chosen variation, validate it and assign it as
an attribute to be used in views.
"""
if not self.is_valid():
return
# Posted data will either be a sku, or product options for
# a variation.
data = self.cleaned_data.copy()
quantity = data.pop("quantity")
try:
from_date = data.pop("from_date")
to_date = data.pop("to_date")
except:
pass
# Ensure the product has a price if adding to cart.
if self._to_cart:
data["unit_price__isnull"] = False
error = None
if self._product is not None:
# Chosen options will be passed to the product's
# variations.
qs = self._product.variations
else:
# A product hasn't been given since we have a direct sku.
qs = ProductVariation.objects
try:
variation = qs.get(**data)
except ProductVariation.DoesNotExist:
error = "invalid_options"
else:
if self._to_cart:
if self._product.content_model == 'reservableproduct':
# check if available to reserve
reservableproduct = ReservableProduct.objects.get(pk=self._product.id)
if not reservableproduct.is_available(from_date, to_date):
error = "period_not_available"
else:
# fix quantity
self.cleaned_data["quantity"] = (to_date - from_date).days
else:
# Validate stock if adding to cart.
if not variation.has_stock():
error = "no_stock"
elif not variation.has_stock(quantity):
error = "no_stock_quantity"
if error is not None:
raise forms.ValidationError(ADD_PRODUCT_ERRORS[error])
self.variation = variation
return self.cleaned_data
class CartItemForm(forms.ModelForm):
"""
Model form for each item in the cart - used for the
``CartItemFormSet`` below which controls editing the entire cart.
"""
class Meta:
model = CartItem
fields = ("quantity",)
def clean_quantity(self):
"""
Validate that the given quantity is available.
"""
variation = ProductVariation.objects.get(sku=self.instance.sku)
if variation.product.content_model == 'reservableproduct':
# not allowed to edit quantity of reservable
error = ADD_PRODUCT_ERRORS["cannot_edit_reservable_quantity"]
raise forms.ValidationError(error)
quantity = self.cleaned_data["quantity"]
if not variation.has_stock(quantity - self.instance.quantity):
error = ADD_PRODUCT_ERRORS["no_stock_quantity"].rstrip(".")
raise forms.ValidationError("%s: %s" % (error, quantity))
return quantity
CartItemFormSet = inlineformset_factory(Cart, CartItem, form=CartItemForm,
can_delete=True, extra=0)
class FormsetForm(object):
"""
Form mixin that provides template methods for iterating through
sets of fields by prefix, single fields and finally remaning
fields that haven't been, iterated with each fieldset made up from
a copy of the original form, giving access to as_* methods.
The use case for this is ``OrderForm`` below. It contains a
handful of fields named with the prefixes ``billing_detail_XXX``
and ``shipping_detail_XXX``. Using ``FormsetForm`` we can then
group these into fieldsets in our templates::
<!-- Fields prefixed with "billing_detail_" -->
<fieldset>{{ form.billing_detail_fields.as_p }}</fieldset>
<!-- Fields prefixed with "shipping_detail_" -->
<fieldset>{{ form.shipping_detail_fields.as_p }}</fieldset>
<!-- All remaining fields -->
<fieldset>{{ form.other_fields.as_p }}</fieldset>
Some other helpers exist for use with an individual field name:
- ``XXX_field`` returns a fieldset containing the field named XXX
- ``fields_before_XXX`` returns a fieldset with all fields before
the field named XXX
- ``fields_after_XXX`` returns a fieldset with all fields after
the field named XXX
"""
def _fieldset(self, field_names):
"""
Return a subset of fields by making a copy of the form
containing only the given field names.
"""
fieldset = copy(self)
if not hasattr(self, "_fields_done"):
self._fields_done = []
fieldset.non_field_errors = lambda *args: None
names = [f for f in field_names if f not in self._fields_done]
fieldset.fields = SortedDict([(f, self.fields[f]) for f in names])
self._fields_done.extend(names)
return fieldset
def values(self):
"""
Return pairs of label and value for each field.
"""
for field in self.fields:
label = self.fields[field].label
if label is None:
label = field[0].upper() + field[1:].replace("_", " ")
yield (label, self.initial.get(field, self.data.get(field, "")))
def __getattr__(self, name):
"""
Dynamic fieldset caller - matches requested attribute name
against pattern for creating the list of field names to use
for the fieldset.
"""
if name == "errors":
return None
filters = (
("^other_fields$", lambda:
self.fields.keys()),
("^hidden_fields$", lambda:
[n for n, f in self.fields.items()
if isinstance(f.widget, forms.HiddenInput)]),
("^(\w*)_fields$", lambda name:
[f for f in self.fields.keys() if f.startswith(name)]),
("^(\w*)_field$", lambda name:
[f for f in self.fields.keys() if f == name]),
("^fields_before_(\w*)$", lambda name:
takewhile(lambda f: f != name, self.fields.keys())),
("^fields_after_(\w*)$", lambda name:
dropwhile(lambda f: f != name, self.fields.keys())[1:]),
)
for filter_exp, filter_func in filters:
filter_args = match(filter_exp, name)
if filter_args is not None:
return self._fieldset(filter_func(*filter_args.groups()))
raise AttributeError(name)
class DiscountForm(forms.ModelForm):
class Meta:
model = Order
fields = ("discount_code",)
def __init__(self, request, data=None, initial=None):
"""
Store the request so that it can be used to retrieve the cart
which is required to validate the discount code when entered.
"""
super(DiscountForm, self).__init__(data=data, initial=initial)
self._request = request
def clean_discount_code(self):
"""
Validate the discount code if given, and attach the discount
instance to the form.
"""
code = self.cleaned_data.get("discount_code", "")
cart = self._request.cart
if code:
try:
discount = DiscountCode.objects.get_valid(code=code, cart=cart)
self._discount = discount
except DiscountCode.DoesNotExist:
error = _("The discount code entered is invalid.")
raise forms.ValidationError(error)
return code
def set_discount(self):
"""
Assigns the session variables for the discount.
"""
discount = getattr(self, "_discount", None)
if discount is not None:
# Clear out any previously defined discount code
# session vars.
names = ("free_shipping", "discount_code", "discount_total")
clear_session(self._request, *names)
total = self._request.cart.calculate_discount(discount)
if discount.free_shipping:
set_shipping(self._request, _("Free shipping"), 0)
else:
# A previously entered discount code providing free
# shipping may have been entered prior to this
# discount code beign entered, so clear out any
# previously set shipping vars.
clear_session(self._request, "shipping_type", "shipping_total")
self._request.session["free_shipping"] = discount.free_shipping
self._request.session["discount_code"] = discount.code
self._request.session["discount_total"] = str(total)
class OrderForm(FormsetForm, DiscountForm):
"""
Main Form for the checkout process - ModelForm for the Order Model
with extra fields for credit card. Used across each step of the
checkout process with fields being hidden where applicable.
"""
step = forms.IntegerField(widget=forms.HiddenInput())
same_billing_shipping = forms.BooleanField(required=False, initial=True,
label=_("My delivery details are the same as my billing details"))
remember = forms.BooleanField(required=False, initial=True,
label=_("Remember my address for next time"))
card_name = forms.CharField(label=_("Cardholder name"))
card_type = forms.ChoiceField(label=_("Card type"),
widget=forms.RadioSelect,
choices=make_choices(settings.SHOP_CARD_TYPES))
card_number = forms.CharField(label=_("Card number"))
card_expiry_month = forms.ChoiceField(label=_("Card expiry month"),
initial="%02d" % date.today().month,
choices=make_choices(["%02d" % i for i in range(1, 13)]))
card_expiry_year = forms.ChoiceField(label=_("Card expiry year"))
card_ccv = forms.CharField(label=_("CCV"), help_text=_("A security code, "
"usually the last 3 digits found on the back of your card."))
class Meta:
model = Order
fields = ([f.name for f in Order._meta.fields if
(f.name.startswith("billing_detail") or
f.name.startswith("shipping_detail")) and
f.name.replace("billing_detail_", "").replace("shipping_detail_", "")
not in settings.SHOP_HIDE_BILLING_SHIPPING_FIELDS] +
["additional_instructions", "discount_code", "persons_adults", "persons_childs"])
def __init__(self, request, step, data=None, initial=None, errors=None):
"""
Setup for each order form step which does a few things:
- Calls OrderForm.preprocess on posted data
- Sets up any custom checkout errors
- Hides the discount code field if applicable
- Hides sets of fields based on the checkout step
- Sets year choices for cc expiry field based on current date
"""
# ``data`` is usually the POST attribute of a Request object,
# which is an immutable QueryDict. We want to modify it, so we
# need to make a copy.
data = copy(data)
# Force the specified step in the posted data, which is
# required to allow moving backwards in steps. Also handle any
# data pre-processing, which subclasses may override.
if data is not None:
data["step"] = step
data = self.preprocess(data)
if initial is not None:
initial["step"] = step
super(OrderForm, self).__init__(request, data=data, initial=initial)
self._checkout_errors = errors
# Hide discount code field if it shouldn't appear in checkout,
# or if no discount codes are active.
settings.use_editable()
if not (settings.SHOP_DISCOUNT_FIELD_IN_CHECKOUT and
DiscountCode.objects.active().exists()):
self.fields["discount_code"].widget = forms.HiddenInput()
# Determine which sets of fields to hide for each checkout step.
# A ``hidden_filter`` function is defined that's used for
# filtering out the fields to hide.
is_first_step = step == checkout.CHECKOUT_STEP_FIRST
is_last_step = step == checkout.CHECKOUT_STEP_LAST
is_payment_step = step == checkout.CHECKOUT_STEP_PAYMENT
hidden_filter = lambda f: False
if settings.SHOP_CHECKOUT_STEPS_SPLIT:
if is_first_step:
# Hide cc fields for billing/shipping if steps are split.
hidden_filter = lambda f: f.startswith("card_")
elif is_payment_step:
# Hide non-cc fields for payment if steps are split.
hidden_filter = lambda f: not f.startswith("card_")
elif not settings.SHOP_PAYMENT_STEP_ENABLED:
# Hide all cc fields if payment step is not enabled.
hidden_filter = lambda f: f.startswith("card_")
if settings.SHOP_CHECKOUT_STEPS_CONFIRMATION and is_last_step:
# Hide all fields for the confirmation step.
hidden_filter = lambda f: True
for field in filter(hidden_filter, self.fields):
self.fields[field].widget = forms.HiddenInput()
self.fields[field].required = False
if settings.SHOP_ALWAYS_SAME_BILLING_SHIPPING:
for field in self.fields:
if field == 'same_billing_shipping' or field.startswith('shipping_'):
self.fields[field].widget = forms.HiddenInput()
self.fields[field].required = False
# Set year choices for cc expiry, relative to the current year.
year = now().year
choices = make_choices(list(range(year, year + 21)))
self.fields["card_expiry_year"].choices = choices
@classmethod
def preprocess(cls, data):
"""
A preprocessor for the order form data that can be overridden
by custom form classes. The default preprocessor here handles
copying billing fields to shipping fields if "same" checked.
"""
if data.get("same_billing_shipping", "") == "on":
for field in data:
bill_field = field.replace("shipping_detail", "billing_detail")
if field.startswith("shipping_detail") and bill_field in data:
data[field] = data[bill_field]
return data
def clean_card_expiry_year(self):
"""
Ensure the card expiry doesn't occur in the past.
"""
try:
month = int(self.cleaned_data["card_expiry_month"])
year = int(self.cleaned_data["card_expiry_year"])
except ValueError:
# Haven't reached payment step yet.
return
n = now()
if year == n.year and month < n.month:
raise forms.ValidationError(_("A valid expiry date is required."))
return str(year)
def clean(self):
"""
Raise ``ValidationError`` if any errors have been assigned
externally, via one of the custom checkout step handlers.
"""
if self._checkout_errors:
raise forms.ValidationError(self._checkout_errors)
# Validate necessary fields are filled since hideable fields are
# blank=True in the Order model
for field in self.fields:
if (field.startswith("billing_detail") or (field.startswith("shipping_detail") and not settings.SHOP_ALWAYS_SAME_BILLING_SHIPPING)) and len(self.data[field]) == 0 and field.replace("billing_detail_", "").replace("shipping_detail_", "") not in settings.SHOP_HIDE_BILLING_SHIPPING_FIELDS:
self.errors[field] = [_("This field is required.")]
raise forms.ValidationError(_("Please fill out all fields."))
return super(OrderForm, self).clean()
#######################
# ADMIN WIDGETS #
#######################
class ImageWidget(forms.FileInput):
"""
Render a visible thumbnail for image fields.
"""
def render(self, name, value, attrs):
rendered = super(ImageWidget, self).render(name, value, attrs)
if value:
orig = u"%s%s" % (settings.MEDIA_URL, value)
thumb = u"%s%s" % (settings.MEDIA_URL, thumbnail(value, 48, 48))
rendered = (u"<a target='_blank' href='%s'>"
u"<img style='margin-right:6px;' src='%s'>"
u"</a>%s" % (orig, thumb, rendered))
return mark_safe(rendered)
class MoneyWidget(forms.TextInput):
"""
Render missing decimal places for money fields.
"""
def render(self, name, value, attrs):
try:
value = float(value)
except (TypeError, ValueError):
pass
else:
set_locale()
value = ("%%.%sf" % localeconv()["frac_digits"]) % value
attrs["style"] = "text-align:right;"
return super(MoneyWidget, self).render(name, value, attrs)
class ProductAdminFormMetaclass(ModelFormMetaclass):
"""
Metaclass for the Product Admin form that dynamically assigns each
of the types of product options as sets of checkboxes for selecting
which options to use when creating new product variations.
"""
def __new__(cls, name, bases, attrs):
for option in settings.SHOP_OPTION_TYPE_CHOICES:
field = forms.MultipleChoiceField(label=option[1],
required=False, widget=forms.CheckboxSelectMultiple)
attrs["option%s" % option[0]] = field
args = (cls, name, bases, attrs)
return super(ProductAdminFormMetaclass, cls).__new__(*args)
class ProductAdminForm(with_metaclass(ProductAdminFormMetaclass,
forms.ModelForm)):
"""
Admin form for the Product model.
"""
class Meta:
model = Product
def __init__(self, *args, **kwargs):
"""
Set the choices for each of the fields for product options.
Also remove the current instance from choices for related and
upsell products (if enabled).
"""
super(ProductAdminForm, self).__init__(*args, **kwargs)
for field, options in list(ProductOption.objects.as_fields().items()):
self.fields[field].choices = make_choices(options)
instance = kwargs.get("instance")
if instance:
queryset = Product.objects.exclude(id=instance.id)
if settings.SHOP_USE_RELATED_PRODUCTS:
self.fields["related_products"].queryset = queryset
if settings.SHOP_USE_UPSELL_PRODUCTS:
self.fields["upsell_products"].queryset = queryset
class ProductVariationAdminForm(forms.ModelForm):
"""
Ensure the list of images for the variation are specific to the
variation's product.
"""
def __init__(self, *args, **kwargs):
super(ProductVariationAdminForm, self).__init__(*args, **kwargs)
if "instance" in kwargs:
product = kwargs["instance"].product
qs = self.fields["image"].queryset.filter(product=product)
self.fields["image"].queryset = qs
class ProductVariationAdminFormset(BaseInlineFormSet):
"""
Ensure no more than one variation is checked as default.
"""
def clean(self):
super(ProductVariationAdminFormset, self).clean()
if len([f for f in self.forms if hasattr(f, "cleaned_data") and
f.cleaned_data.get("default", False)]) > 1:
error = _("Only one variation can be checked as the default.")
raise forms.ValidationError(error)
class DiscountAdminForm(forms.ModelForm):
"""
Ensure only one discount field is given a value and if not, assign
the error to the first discount field so that it displays correctly.
"""
def clean(self):
fields = [f for f in self.fields if f.startswith("discount_")]
reductions = [self.cleaned_data.get(f) for f in fields
if self.cleaned_data.get(f)]
if len(reductions) > 1:
error = _("Please enter a value for only one type of reduction.")
self._errors[fields[0]] = self.error_class([error])
return super(DiscountAdminForm, self).clean()
class SpecialPriceAdminForm(forms.ModelForm):
"""
Special price checks for admin form.
"""
def clean(self):
return self.cleaned_data
class ReservableProductAvailabilityAdminForm(forms.ModelForm):
"""
Reservable product availabilities admin form.
"""
def clean(self):
return self.cleaned_data
|
|
# The MIT License (MIT)
#
# Copyright (c) 2016 Francis T. O'Donovan
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Convert vCard-formatted string to the JSON format expected by Name Shark."""
# coding=utf-8
import base64
import json
import collections
import argparse
import vobject
NAMES = collections.namedtuple('Names', ['first_name', 'surname'])
def get_pp_names(fn_field):
"""
Use probablepeople to extract firstname/surname from vCard 'fn' field.
:param fn_field: the input vCard 'fn' field.
:return: a namedtuple containing the first name and surname.
>>> get_names('John Smith')
Extracting data for John Smith
Names(first_name='John', surname='Smith')
"""
first_name = None
surname = None
try:
import probablepeople as pp # not python 2.6 compatible
# Use probablepeople to tag the parts of the name.
full_name_dict = pp.tag(fn_field)[0]
if 'GivenName' in full_name_dict:
# If probablepeople has successfully extracted the first name,
# use it.
first_name = full_name_dict['GivenName']
if 'Surname' in full_name_dict:
# If probablepeople has successfully extracted the surname,
# use it.
surname = full_name_dict['Surname']
except (ImportError, SyntaxError, TypeError) as error:
print(error)
return NAMES(first_name, surname)
def get_names(fn_field):
"""
Extract the first name and surname from a vCard 'fn' field.
:param fn_field: the input vCard 'fn' field.
:return: a namedtuple containing the first name and surname.
>>> get_names('John Smith')
Extracting data for John Smith
Names(first_name='John', surname='Smith')
"""
names = get_pp_names(fn_field)
first_name = names.first_name
surname = names.surname
try:
fn_field_split = fn_field.split(' ')
except (TypeError, AttributeError):
fn_field_split = ['']
if first_name is None:
# If we can't get first name from probablepeople, assume it's the
# first part of the string.
first_name = fn_field_split[0]
if first_name == surname:
first_name = ''
if surname is None:
# If we can't get surname from probablepeople, assume it's the
# second part of the string, if that exists.
surname = fn_field_split[1] if len(fn_field_split) > 1 else ''
print('Extracting data for ' + first_name + ' ' + surname)
return NAMES(first_name, surname)
def get_photo(photo):
"""
Extract the photo data (if it exists) from a vCard 'photo' field.
:param photo: the input vCard 'photo' field.
:return: a base64-encoded string containing the photo data.
"""
if photo is not None:
photo_data = base64.b64encode(photo)
photo_data = 'data:image/jpeg;base64,' + photo_data.decode('utf8')
else:
photo_data = ''
return photo_data
def extract_contact_from_component(component):
"""
Extract the contact info from a vCard component.
:param component: the input vCard component text.
:return: a dictionary containing the extracted contact info.
"""
names = get_names(component.getChildValue('fn'))
photo_data = get_photo(component.getChildValue('photo'))
if photo_data == '':
print(
'Warning: Missing photo for ' + names.first_name + ' ' +
names.surname + '...!',
)
return {
'first': names.first_name, 'last': names.surname,
'photoData': photo_data, 'details': '',
}
def extract_contacts_from_vcard(vcard):
"""
Extract the contact info from a vCard.
:param vcard: the vCard text to convert.
:return: a list containing the extracted contact info.
"""
contacts = []
for v_component in vobject.readComponents(vcard):
entry = extract_contact_from_component(v_component)
contacts.append(entry)
return contacts
def convert_to_nameshark(group_name, contacts):
"""
Convert a list containing contact info into JSON for Name Shark.
:param group_name: the Name Shark group to use.
:param contacts:
:return: the list containing contact info extracted from a vCard.
"""
shark = {'name': group_name, 'contacts': contacts}
return json.dumps(shark, sort_keys=True, indent=4)
def vcard_to_nameshark(vcard, group_name):
"""
Convert vCard-formatted string to the JSON format expected by Name Shark.
:param vcard: the vCard text to convert.
:param group_name: the Name Shark group to use.
:return: JSON version of vCard input.
"""
contacts = extract_contacts_from_vcard(vcard)
return convert_to_nameshark(group_name, contacts)
def main():
"""
The main nameshark_vcard module.
:return: None
"""
parser = argparse.ArgumentParser()
parser.add_argument('file', help='the input file')
parser.add_argument('group', help='the output group name')
args = parser.parse_args()
with open(args.file, 'r') as input_file:
text = input_file.read()
json_str = vcard_to_nameshark(text, args.group)
with open(args.group + '.json', 'w') as output_file:
output_file.write(json_str)
if __name__ == '__main__':
main()
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for partitioned_variables.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class PartitionerCreatorsTest(test.TestCase):
def testFixedSizePartitioner(self):
with self.test_session():
partitioner = partitioned_variables.fixed_size_partitioner(5, axis=0)
with variable_scope.variable_scope("root", partitioner=partitioner):
v0 = variable_scope.get_variable(
"v0", dtype=dtypes.float32, shape=(10, 10))
v0_list = v0._get_variable_list()
v0_part = v0._get_partitions()
self.assertEqual(len(v0_list), 5)
self.assertAllEqual(v0_part, (5, 1))
def testFixedSizePartitionerInt64(self):
with self.test_session():
partitioner = partitioned_variables.fixed_size_partitioner(4, axis=0)
with variable_scope.variable_scope("root", partitioner=partitioner):
v0 = variable_scope.get_variable("v0", dtype=dtypes.int64, shape=[20])
v0_list = v0._get_variable_list()
self.assertEqual(len(v0_list), 4)
def testResourceFixedSizePartitioner(self):
with self.test_session():
partitioner = partitioned_variables.fixed_size_partitioner(5, axis=0)
with variable_scope.variable_scope(
"root", partitioner=partitioner, use_resource=True):
v0 = variable_scope.get_variable(
"v0", dtype=dtypes.float32, shape=(10, 10))
v0_list = v0._get_variable_list()
v0_part = v0._get_partitions()
self.assertEqual(len(v0_list), 5)
self.assertAllEqual(v0_part, (5, 1))
def _testVariableAxisSizePartitioner(self,
name,
axis,
max_shard_bytes,
expected_axis_shards,
expected_partitions,
max_shards=None):
partitioner = partitioned_variables.variable_axis_size_partitioner(
axis=axis, max_shard_bytes=max_shard_bytes, max_shards=max_shards)
with variable_scope.variable_scope("root", partitioner=partitioner):
v0 = variable_scope.get_variable(
name, dtype=dtypes.float32, shape=(4, 8, 16, 32))
v0_list = v0._get_variable_list()
v0_part = v0._get_partitions()
self.assertEqual(len(v0_list), expected_axis_shards)
self.assertAllEqual(v0_part, expected_partitions)
def testVariableAxisSizePartitioner(self):
with self.test_session():
# Create a partitioned variable of shape (4, 8, 16, 32) type float32
# Bytes per slice along the given axes:
# 8 * 16 * 32 * sizeof(float32) = 16384 / slice on axis 0
# 4 * 16 * 32 * sizeof(float32) = 8192 / slice on axis 1
# 4 * 8 * 32 * sizeof(float32) = 4096 / slice on axis 2
# 4 * 8 * 16 * sizeof(float32) = 2048 / slice on axis 3
# Now partition it in different ways...
# No need to slice: bytes_per_slice * dim0 = 65536 < max_shard_bytes
self._testVariableAxisSizePartitioner(
"v0",
axis=0,
max_shard_bytes=131072,
expected_axis_shards=1,
expected_partitions=(1, 1, 1, 1))
# Slice exactly once: bytes_per_slice * dim1 = 65536 = max_shard_bytes
self._testVariableAxisSizePartitioner(
"v1",
axis=1,
max_shard_bytes=65536,
expected_axis_shards=1,
expected_partitions=(1, 1, 1, 1))
# Slice into 2 parts:
# bytes_per_slice = 4096
# slices_per_shard = 32768 / 4096 = 8
# axis_shards = 16 / 8 = 2
self._testVariableAxisSizePartitioner(
"v2",
axis=2,
max_shard_bytes=32768,
expected_axis_shards=2,
expected_partitions=(1, 1, 2, 1))
# This partitioner makes sure we maximize the number of shards along
# axis 3. Slice it into 32 parts:
# bytes_per_slice = 2048
# slices_per_shard = 2048 / 2048 = 1
# axis_shards = 32 / 1 = 32
self._testVariableAxisSizePartitioner(
"v3a",
axis=3,
max_shard_bytes=2048,
expected_axis_shards=32,
expected_partitions=(1, 1, 1, 32))
# This partitioner makes sure we do not go past the bound of allowable
# number of shards along axis 3.
# Slice into 32 parts:
# bytes_per_slice = 2048
# slices_per_shard = max(1, 1024 / 2048) = 1
# axis_shards = 32 / 1 = 32
# Slice into max of 32 parts because: max_shard_bytes < bytes_per_slice
self._testVariableAxisSizePartitioner(
"v3b",
axis=3,
max_shard_bytes=1024,
expected_axis_shards=32,
expected_partitions=(1, 1, 1, 32))
# Specify max_shards so that it won't affect sharding.
self._testVariableAxisSizePartitioner(
"v3c",
axis=3,
max_shard_bytes=1024,
expected_axis_shards=32,
expected_partitions=(1, 1, 1, 32),
max_shards=33)
# Specify max_shards so that it will affect sharding.
self._testVariableAxisSizePartitioner(
"v3d",
axis=3,
max_shard_bytes=1024,
expected_axis_shards=2,
expected_partitions=(1, 1, 1, 2),
max_shards=2)
# Use the partitioner with strings
partitioner_axis3_str = partitioned_variables.variable_axis_size_partitioner( # pylint: disable=line-too-long
axis=3,
max_shard_bytes=32768,
bytes_per_string_element=8)
with variable_scope.variable_scope(
"root", partitioner=partitioner_axis3_str):
v3str = variable_scope.get_variable(
"v3str",
initializer=np.array([""] * 4 * 8 * 16 * 32).reshape(4, 8, 16, 32),
dtype=dtypes.string,
shape=(4, 8, 16, 32))
v3str_list = v3str._get_variable_list()
v3str_part = v3str._get_partitions()
# Now the estimated bytes_per_slice = 4*8*16*bytes_per_string_element
# which is equal to 4096. Setting a max_shard_bytes of 32768
# and we should get a split of 4.
# Slice into 4 parts:
# bytes_per_slice = 4096
# slices_per_shard = 32768 / 4096 = 8
# axis_shards = 32 / 8 = 4
self.assertEqual(len(v3str_list), 4)
self.assertAllEqual(v3str_part, (1, 1, 1, 4))
def _testMinMaxVariablePartitioner(self, max_partitions, axis, min_slice_size,
var_name, var_shape, expected_axis_shards,
expected_partitions):
partitioner = partitioned_variables.min_max_variable_partitioner(
max_partitions=max_partitions, axis=axis, min_slice_size=min_slice_size)
with variable_scope.variable_scope("root", partitioner=partitioner):
v0 = variable_scope.get_variable(
var_name, dtype=dtypes.float32, shape=var_shape)
v0_list = v0._get_variable_list()
v0_part = v0._get_partitions()
self.assertEqual(len(v0_list), expected_axis_shards)
self.assertAllEqual(v0_part, expected_partitions)
def testMinMaxVariablePartitioner(self):
with self.test_session():
# Partitioning a variable of shape=[2048] with a minimum of 2K per slice.
self._testMinMaxVariablePartitioner(
max_partitions=100,
axis=0,
min_slice_size=2 << 10,
var_name="v0_0",
var_shape=[2048],
expected_axis_shards=4,
expected_partitions=[4])
# Partitioning a variable of shape=[2048, 1024] with a minimum of 256K per
# slice.
self._testMinMaxVariablePartitioner(
max_partitions=100,
axis=0,
min_slice_size=256 << 10,
var_name="v0",
var_shape=[2048, 1024],
expected_axis_shards=32,
expected_partitions=[32, 1])
# max_partitions restricts partitioning of the variable.
self._testMinMaxVariablePartitioner(
max_partitions=16,
axis=0,
min_slice_size=256 << 10,
var_name="v1_max",
var_shape=[2048, 1024],
expected_axis_shards=16,
expected_partitions=[16, 1])
self._testMinMaxVariablePartitioner(
max_partitions=1,
axis=0,
min_slice_size=256 << 10,
var_name="v2_max",
var_shape=[2048, 1024],
expected_axis_shards=1,
expected_partitions=[1, 1])
# Reducing/Increasing min_slice_size proportionately increases/reduces the
# number of partitions.
self._testMinMaxVariablePartitioner(
max_partitions=100,
axis=0,
min_slice_size=128 << 10,
var_name="v3_slice",
var_shape=[2048, 1024],
expected_axis_shards=64,
expected_partitions=[64, 1])
self._testMinMaxVariablePartitioner(
max_partitions=100,
axis=0,
min_slice_size=512 << 10,
var_name="v4_slice",
var_shape=[2048, 1024],
expected_axis_shards=16,
expected_partitions=[16, 1])
# Partitioning the variable along a different axis.
self._testMinMaxVariablePartitioner(
max_partitions=100,
axis=1,
min_slice_size=256 << 10,
var_name="v5_axis",
var_shape=[64, 1024, 1, 3],
expected_axis_shards=3,
expected_partitions=[1, 3, 1, 1])
self._testMinMaxVariablePartitioner(
max_partitions=100,
axis=3,
min_slice_size=256 << 10,
var_name="v6_axis",
var_shape=[64, 1024, 1, 3],
expected_axis_shards=3,
expected_partitions=[1, 1, 1, 3])
# Can not partition the variable more than what its shape allows.
self._testMinMaxVariablePartitioner(
max_partitions=100,
axis=0,
min_slice_size=256 << 10,
var_name="v7_shape",
var_shape=[16, 128, 1024],
expected_axis_shards=16,
expected_partitions=[16, 1, 1])
self._testMinMaxVariablePartitioner(
max_partitions=100,
axis=0,
min_slice_size=256 << 10,
var_name="v8_shape",
var_shape=[4, 512, 1024],
expected_axis_shards=4,
expected_partitions=[4, 1, 1])
def _IotaInitializer(shape, dtype=dtypes.float32, partition_info=None):
assert dtype == dtypes.float32
if len(shape) == 1:
return range(shape[0])
else:
val = _IotaInitializer(shape[1:], dtype)
return [[(10**i) * v for v in val] for i in range(shape[0])]
class PartitionedVariablesTestCase(test.TestCase):
def _TestSaveSpec(self, slices, expected_specs):
self.assertEqual(len(expected_specs), len(slices))
for i in xrange(len(expected_specs)):
self.assertEquals(expected_specs[i], slices[i]._save_slice_info.spec)
def testVecConstantInit(self):
with self.test_session():
rnd_par = constant_op.constant([1, 2, 3, 4])
vs = partitioned_variables.create_partitioned_variables([4], [4], rnd_par)
variables.global_variables_initializer().run()
val = array_ops.concat(vs, 0).eval()
rnd = rnd_par.eval()
self.assertAllClose(rnd, val)
self.assertEqual([dtypes.int32] * 4, [v.dtype.base_dtype for v in vs])
self._TestSaveSpec(vs, ["4 0,1", "4 1,1", "4 2,1", "4 3,1"])
def testConstantInit(self):
with self.test_session():
rnd_par = constant_op.constant([[1, 2, 3, 4], [5, 6, 7, 8]])
vs = partitioned_variables.create_partitioned_variables([2, 4], [1, 2],
rnd_par)
variables.global_variables_initializer().run()
val = array_ops.concat(vs, 1).eval()
rnd = rnd_par.eval()
self.assertAllClose(rnd, val)
self.assertEqual([dtypes.int32] * 2, [v.dtype.base_dtype for v in vs])
self._TestSaveSpec(vs, ["2 4 0,2:0,2", "2 4 0,2:2,2"])
def _testNameHelper(self, use_resource=False):
with self.test_session():
rnd_par = constant_op.constant([[1, 2, 3, 4], [5, 6, 7, 8]])
with variable_scope.variable_scope("hi", use_resource=use_resource):
vs1 = partitioned_variables.create_partitioned_variables([2, 4], [1, 2],
rnd_par)
vs2 = partitioned_variables.create_partitioned_variables([2, 4], [1, 2],
rnd_par)
variables.global_variables_initializer().run()
var1_name = vs1[0]._save_slice_info.full_name
var2_name = vs2[0]._save_slice_info.full_name
self.assertEqual("hi/PartitionedVariable", var1_name)
self.assertEqual("hi/PartitionedVariable_1", var2_name)
self.assertEqual(var1_name + "/part_0:0", vs1[0].name)
self.assertEqual(var1_name + "/part_1:0", vs1[1].name)
self.assertEqual(var2_name + "/part_0:0", vs2[0].name)
self.assertEqual(var2_name + "/part_1:0", vs2[1].name)
# Test same variable.
with self.test_session():
rnd_par = constant_op.constant([[1, 2, 3, 4], [5, 6, 7, 8]])
with variable_scope.variable_scope(
"hola", use_resource=use_resource) as vs:
vs1 = partitioned_variables.create_partitioned_variables(
[2, 4], [1, 2], rnd_par, dtype=dtypes.int32)
with variable_scope.variable_scope(
vs, reuse=True, use_resource=use_resource):
vs2 = partitioned_variables.create_partitioned_variables(
[2, 4], [1, 2], rnd_par, dtype=dtypes.int32)
variables.global_variables_initializer().run()
var1_name = vs1[0]._save_slice_info.full_name
var2_name = vs2[0]._save_slice_info.full_name
self.assertEqual("hola/PartitionedVariable", var1_name)
self.assertEqual("hola/PartitionedVariable", var2_name)
self.assertEqual(var1_name + "/part_0:0", vs1[0].name)
self.assertEqual(var1_name + "/part_1:0", vs1[1].name)
self.assertEqual(var2_name + "/part_0:0", vs2[0].name)
self.assertEqual(var2_name + "/part_1:0", vs2[1].name)
# Test name_scope
with self.test_session():
rnd_par = constant_op.constant([[1, 2, 3, 4], [5, 6, 7, 8]])
with ops.name_scope("ola"):
vs1 = partitioned_variables.create_partitioned_variables([2, 4], [1, 2],
rnd_par)
vs2 = partitioned_variables.create_partitioned_variables([2, 4], [1, 2],
rnd_par)
variables.global_variables_initializer().run()
var1_name = vs1[0]._save_slice_info.full_name
var2_name = vs2[0]._save_slice_info.full_name
# Currently, the name scope 'ola' has no effect.
self.assertEqual("PartitionedVariable", var1_name)
self.assertEqual("PartitionedVariable_1", var2_name)
self.assertEqual(var1_name + "/part_0:0", vs1[0].name)
self.assertEqual(var1_name + "/part_1:0", vs1[1].name)
self.assertEqual(var2_name + "/part_0:0", vs2[0].name)
self.assertEqual(var2_name + "/part_1:0", vs2[1].name)
def testName(self):
self._testNameHelper(use_resource=False)
def testResourceName(self):
self._testNameHelper(use_resource=True)
def testRandomInitValue(self):
with self.test_session():
rnd = variables.Variable(random_ops.random_uniform([200, 40]))
vs = partitioned_variables.create_partitioned_variables(
rnd.get_shape(), [1, 10], rnd.initialized_value())
variables.global_variables_initializer().run()
val = array_ops.concat(vs, 1).eval()
rnd = rnd.eval()
self.assertAllClose(rnd, val)
self.assertEqual([dtypes.float32] * 10, [v.dtype.base_dtype for v in vs])
self._TestSaveSpec(vs, [
"200 40 0,200:0,4", "200 40 0,200:4,4", "200 40 0,200:8,4",
"200 40 0,200:12,4", "200 40 0,200:16,4", "200 40 0,200:20,4",
"200 40 0,200:24,4", "200 40 0,200:28,4", "200 40 0,200:32,4",
"200 40 0,200:36,4"
])
def testRandomInitUnevenPartitions(self):
with self.test_session():
rnd = variables.Variable(
random_ops.random_uniform([20, 43], dtype=dtypes.float64))
var_lists = [
partitioned_variables.create_partitioned_variables(
rnd.get_shape(), [1, i], rnd.initialized_value())
for i in xrange(1, 10)
]
variables.global_variables_initializer().run()
rnd_val = rnd.eval()
# Only check the slice save specs for the first 5 tf.
save_specs = [
# One slice
["20 43 0,20:0,43"],
# Two slices
["20 43 0,20:0,22", "20 43 0,20:22,21"],
# Three slices
["20 43 0,20:0,15", "20 43 0,20:15,14", "20 43 0,20:29,14"],
# Four slices
[
"20 43 0,20:0,11", "20 43 0,20:11,11", "20 43 0,20:22,11",
"20 43 0,20:33,10"
],
# Five slices
[
"20 43 0,20:0,9", "20 43 0,20:9,9", "20 43 0,20:18,9",
"20 43 0,20:27,8", "20 43 0,20:35,8"
]
]
for i, vs in enumerate(var_lists):
var_val = array_ops.concat(vs, 1).eval()
self.assertAllClose(rnd_val, var_val)
self.assertEqual([dtypes.float64] * len(vs),
[v.dtype.base_dtype for v in vs])
if i < len(save_specs):
self._TestSaveSpec(vs, save_specs[i])
def testDegenerate(self):
with self.test_session():
rnd = variables.Variable(random_ops.random_uniform([10, 43]))
vs = partitioned_variables.create_partitioned_variables(
rnd.get_shape(), [1, 1], rnd.initialized_value())
variables.global_variables_initializer().run()
val = array_ops.concat(vs, 0).eval()
rnd = rnd.eval()
self.assertAllClose(rnd, val)
self._TestSaveSpec(vs, ["10 43 0,10:0,43"])
def testSliceSizeOne(self):
with self.test_session():
rnd = variables.Variable(random_ops.random_uniform([10, 43]))
vs = partitioned_variables.create_partitioned_variables(
rnd.get_shape(), [10, 1], rnd.initialized_value())
variables.global_variables_initializer().run()
val = array_ops.concat(vs, 0).eval()
rnd = rnd.eval()
self.assertAllClose(rnd, val)
self._TestSaveSpec(vs, [
"10 43 0,1:0,43", "10 43 1,1:0,43", "10 43 2,1:0,43",
"10 43 3,1:0,43", "10 43 4,1:0,43", "10 43 5,1:0,43",
"10 43 6,1:0,43", "10 43 7,1:0,43", "10 43 8,1:0,43", "10 43 9,1:0,43"
])
def testIotaInitializer(self):
self.assertAllClose([0., 1., 2., 3.], _IotaInitializer([4]))
self.assertAllClose([[0., 1.], [0., 10.], [0., 100.], [0., 1000.]],
_IotaInitializer([4, 2]))
with self.test_session():
vs = partitioned_variables.create_partitioned_variables([13, 5], [3, 1],
_IotaInitializer)
variables.global_variables_initializer().run()
slice0 = _IotaInitializer([5, 5])
slice1 = _IotaInitializer([4, 5])
slice2 = _IotaInitializer([4, 5])
val = array_ops.concat(vs, 0).eval()
self.assertAllClose(slice0 + slice1 + slice2, val)
self._TestSaveSpec(vs, ["13 5 0,5:0,5", "13 5 5,4:0,5", "13 5 9,4:0,5"])
def testRandomInitializer(self):
# Sanity check that the slices uses a different seed when using a random
# initializer function.
with self.test_session():
var0, var1 = partitioned_variables.create_partitioned_variables(
[20, 12], [1, 2], init_ops.random_uniform_initializer())
variables.global_variables_initializer().run()
val0, val1 = var0.eval().flatten(), var1.eval().flatten()
self.assertTrue(np.linalg.norm(val0 - val1) > 1e-6)
# Negative test that proves that slices have the same values if
# the random initializer uses a seed.
with self.test_session():
var0, var1 = partitioned_variables.create_partitioned_variables(
[20, 12], [1, 2], init_ops.random_uniform_initializer(seed=201))
variables.global_variables_initializer().run()
val0, val1 = var0.eval().flatten(), var1.eval().flatten()
self.assertAllClose(val0, val1)
def testSomeErrors(self):
with self.test_session():
rnd = variables.Variable(random_ops.random_uniform([10, 43]))
with self.assertRaises(ValueError):
partitioned_variables.create_partitioned_variables(
[10], [1, 1], rnd.initialized_value())
with self.assertRaises(ValueError):
partitioned_variables.create_partitioned_variables(
[10, 20], [1], rnd.initialized_value())
with self.assertRaises(ValueError):
partitioned_variables.create_partitioned_variables(
[10, 43], [1], rnd.initialized_value())
with self.assertRaises(ValueError):
partitioned_variables.create_partitioned_variables(
[10, 43], [1, 2, 3], rnd.initialized_value())
with self.assertRaises(ValueError):
partitioned_variables.create_partitioned_variables(
[10, 43], [11, 1], rnd.initialized_value())
with self.assertRaises(ValueError):
partitioned_variables.create_partitioned_variables(
[10, 43], [20, 1], rnd.initialized_value())
with self.assertRaises(ValueError):
partitioned_variables.create_partitioned_variables(
[10, 43], [1, 50], rnd.initialized_value())
def testControlDepsNone(self):
with self.test_session() as session:
c = constant_op.constant(1.0)
with ops.control_dependencies([c]):
# d get the control dependency.
d = constant_op.constant(2.0)
# Partitioned variables do not.
var_x = variable_scope.get_variable(
"x",
shape=[2],
initializer=init_ops.ones_initializer(),
partitioner=partitioned_variables.variable_axis_size_partitioner(4))
ops_before_read = session.graph.get_operations()
var_x.as_tensor() # Caches the ops for subsequent reads.
reading_ops = [
op for op in session.graph.get_operations()
if op not in ops_before_read
]
self.assertEqual([c.op], d.op.control_inputs)
# Tests that no control dependencies are added to reading a partitioned
# variable which is similar to reading a variable.
for op in reading_ops:
self.assertEqual([], op.control_inputs)
def testConcat(self):
with self.test_session() as session:
var_x = variable_scope.get_variable(
"x",
initializer=constant_op.constant([1., 2.]),
partitioner=partitioned_variables.variable_axis_size_partitioner(4))
c = constant_op.constant(1.0)
with ops.control_dependencies([c]):
ops_before_concat = session.graph.get_operations()
value = var_x._concat() # pylint: disable=protected-access
concat_ops = [
op for op in session.graph.get_operations()
if op not in ops_before_concat
]
concat_control_inputs = [
ci for op in concat_ops for ci in op.control_inputs
]
self.assertTrue(
c.op in concat_control_inputs,
"var_x._concat() should get control dependencies from its scope.")
variables.global_variables_initializer().run()
self.assertAllClose(value.eval(), var_x.as_tensor().eval())
if __name__ == "__main__":
test.main()
|
|
from __future__ import absolute_import, print_function, division
import numpy
import theano
import theano.tensor as T
from six.moves import xrange
from theano.gof import local_optimizer
from theano.sandbox.cuda.basic_ops import as_cuda_ndarray_variable
from theano.misc import strutil
from theano.tensor.nnet.ConvGrad3D import ConvGrad3D
from theano.sandbox.cuda.opt import gpu_optimizer
from theano.sandbox.cuda import (CudaNdarrayType, HostFromGpu,
host_from_gpu, GpuOp)
class GpuConvGrad3D(GpuOp):
"""
GPU version of gradient of ConvGrad3D with respect to W.
"""
def make_node(self, V, d, WShape, dCdH):
"""
Parameters
----------
V
Visible.
d
Strides.
WShape
Shapes of the weights -> shape of this op output.
dCdH
Other input with what V will be convolved.
"""
V_ = as_cuda_ndarray_variable(V)
d_ = T.as_tensor_variable(d)
WShape_ = T.as_tensor_variable(WShape)
dCdH_ = as_cuda_ndarray_variable(dCdH)
broad = (False,) * 5
return theano.Apply(self, inputs=[V_, d_, WShape_, dCdH_],
outputs=[CudaNdarrayType(dtype=V_.dtype,
broadcastable=broad)()])
def perform_(self, node, inputs, output_storage):
V, d, WShape, dCdH = inputs
print("GpuConvGrad3D python code (warning not updated to new format)")
# partial C / partial W[j,z,k,l,m] = sum_i sum_p sum_q sum_r (partial C /partial H[i,j,p,q,r] ) * V[i,z,dr*p+k,dc*q+l,dt*r+m]
batchSize = dCdH.shape[0]
outputHeight = dCdH.shape[2]
outputWidth = dCdH.shape[3]
outputDur = dCdH.shape[4]
assert V.shape[0] == batchSize
dr, dc, dt = d
dCdW = numpy.zeros(WShape, dtype=V.dtype)
# block
for j in xrange(0, WShape[0]):
for z in xrange(0, WShape[1]):
for k in xrange(0, WShape[2]):
for l in xrange(0, WShape[3]):
# threads
for m in xrange(0, WShape[4]):
# thread
for i in xrange(0, batchSize):
for p in xrange(0, outputHeight):
for q in xrange(0, outputWidth):
for r in xrange(0, outputDur):
dCdW[j, z, k, l, m] += dCdH[
i, j, p, q, r] * \
V[i, z, dr * p + k,
dc * q + l,
dt * r + m]
output_storage[0][0] = dCdW
def c_code(self, node, nodename, inputs, outputs, sub):
V, d, WShape, dCdH = inputs
fail = sub['fail']
dCdW = outputs[0]
codeSource = """
///////////// < code generated by GpuConvGrad3D >
//printf("\t\t\t\tGpuConvGrad3DW c code\\n");
//Check dimensionality of inputs
if (CudaNdarray_NDIM(%(dCdH)s) != 5)
{
PyErr_Format(PyExc_ValueError, "GpuConvGrad3D: dCdH must be a 5-d CudaNdArray");
%(fail)s
}
if (CudaNdarray_NDIM(%(V)s) != 5)
{
PyErr_Format(PyExc_ValueError, "GpuConvGrad3D: V must be a 5-d CudaNdArray");
%(fail)s
}
if (CudaNdarray_NDIM(%(WShape)s) != 1)
{
PyErr_Format(PyExc_ValueError, "GpuConvGrad3D: WShape must be a 1-d CudaNdArray");
%(fail)s
}
if (PyArray_NDIM(%(d)s) != 1)
{
PyErr_Format(PyExc_ValueError, "GpuConvGrad3D: d must be a 1-d CudaNdArray");
%(fail)s
}
if (PyArray_DIMS(%(d)s)[0] != 3)
{
PyErr_Format(PyExc_ValueError, "GpuConvGrad3D: 3 stride lengths arguments expected(for row, col, and time) but %%li were given", PyArray_DIMS(%(d)s)[0]);
%(fail)s
}
{ // for fail
//Read and check sizes of inputs
const int batchSize = CudaNdarray_HOST_DIMS(%(V)s)[0];
if (PyArray_DIMS(%(WShape)s)[0] != 5)
{
PyErr_Format(PyExc_ValueError, "GpuConvGrad3D: WShape must specify a 5-d shape");
%(fail)s
}
if (!PyArray_ISCONTIGUOUS(%(WShape)s))
{
PyErr_Format(PyExc_ValueError, "GpuConvGrad3D: WShape must be contiguous");
%(fail)s
}
{ //for fail
dtype_%(WShape)s * WShape = (dtype_%(WShape)s *) PyArray_DATA(%(WShape)s);
const int outputChannels = WShape[0];
const int inputChannels = CudaNdarray_HOST_DIMS(%(V)s)[4];
if (WShape[4] != inputChannels)
{
PyErr_Format(PyExc_ValueError, "ConvGrad3D: W operates on a %%d channel image but the image has %%d channels",WShape[4],inputChannels);
%(fail)s
}
{ //extra scope so fail works
const int filterHeight = WShape[1];
const int filterWidth = WShape[2];
const int filterDur = WShape[3];
const int vidHeight = CudaNdarray_HOST_DIMS(%(V)s)[1];
const int vidWidth = CudaNdarray_HOST_DIMS(%(V)s)[2];
const int vidDur = CudaNdarray_HOST_DIMS(%(V)s)[3];
if (vidHeight < filterHeight)
{
PyErr_Format(PyExc_ValueError, "W has a height of %%i but V is only %%i pixels tall", filterHeight, vidHeight);
%(fail)s
}
if (vidWidth < filterWidth)
{
PyErr_Format(PyExc_ValueError, "GpuConvGrad3D: W has a width of %%i but V is only %%i pixels wide", filterWidth, vidWidth);
%(fail)s
}
if (vidDur < filterDur)
{
PyErr_Format(PyExc_ValueError, "GpuConvGrad3D: W has a duration of %%i but V is only %%i pixels long", filterWidth, vidWidth);
%(fail)s
}
{ // extra scope so fail works
//Read and check stride arguments
const int dr = *(dtype_%(d)s*)PyArray_GETPTR1(%(d)s,0);
const int dc = *(dtype_%(d)s*)PyArray_GETPTR1(%(d)s,1);
const int dt = *(dtype_%(d)s*)PyArray_GETPTR1(%(d)s,2);
if (dr <= 0 || dc <= 0 || dt <= 0)
{
PyErr_Format(PyExc_ValueError, "GpuConvGrad3D: Strides must all be positive but are %%i, %%i, %%i",dr,dc,dt);
%(fail)s
}
//Compute correctl sized of output
const int outputHeight = int( (vidHeight - filterHeight) / dr )+1;
const int outputWidth = int( (vidWidth - filterWidth) / dc )+1;
const int outputDur = int( (vidDur - filterDur) / dt ) +1;
if (CudaNdarray_HOST_DIMS(%(dCdH)s)[0] != batchSize ||
CudaNdarray_HOST_DIMS(%(dCdH)s)[4] != outputChannels ||
CudaNdarray_HOST_DIMS(%(dCdH)s)[1] != outputHeight ||
CudaNdarray_HOST_DIMS(%(dCdH)s)[2] != outputWidth ||
CudaNdarray_HOST_DIMS(%(dCdH)s)[3] != outputDur)
{
PyErr_Format(PyExc_ValueError, "dCdH is the wrong size, expected (%%i,%%i,%%i,%%i,%%i), got (%%i,%%i,%%i,%%i,%%i)", batchSize, outputHeight, outputWidth, outputDur, outputChannels, CudaNdarray_HOST_DIMS(%(dCdH)s)[0], CudaNdarray_HOST_DIMS(%(dCdH)s)[1], CudaNdarray_HOST_DIMS(%(dCdH)s)[2] ,CudaNdarray_HOST_DIMS(%(dCdH)s)[3], CudaNdarray_HOST_DIMS(%(dCdH)s)[4] );
%(fail)s
}
{ // extra scope for fail
npy_intp dims[5];
dims[0] = outputChannels;
dims[4] = inputChannels;
dims[1] = filterHeight;
dims[2] = filterWidth;
dims[3] = filterDur;
if(!(%(dCdW)s) || CudaNdarray_HOST_DIMS(%(dCdW)s)[0]!=dims[0] ||
CudaNdarray_HOST_DIMS(%(dCdW)s)[1]!=dims[1] ||
CudaNdarray_HOST_DIMS(%(dCdW)s)[2]!=dims[2] ||
CudaNdarray_HOST_DIMS(%(dCdW)s)[3]!=dims[3] ||
CudaNdarray_HOST_DIMS(%(dCdW)s)[4]!=dims[4] ){
Py_XDECREF(%(dCdW)s);
%(dCdW)s = (CudaNdarray*)CudaNdarray_NewDims(5,dims);
if (!(%(dCdW)s)) {
PyErr_Format(PyExc_MemoryError, "GpuConvGrad3D: Could not allocated dCdW");
%(fail)s
}
}
{ //for fail
const int dcdhs4 = CudaNdarray_HOST_STRIDES(%(dCdH)s)[4];
const int dcdhs3 = CudaNdarray_HOST_STRIDES(%(dCdH)s)[3];
const int dcdhs1 = CudaNdarray_HOST_STRIDES(%(dCdH)s)[1];
const int dcdhs2 = CudaNdarray_HOST_STRIDES(%(dCdH)s)[2];
const int dcdhs0 = CudaNdarray_HOST_STRIDES(%(dCdH)s)[0];
const int vs4 = CudaNdarray_HOST_STRIDES(%(V)s)[4];
const int vs3 = CudaNdarray_HOST_STRIDES(%(V)s)[3];
const int vs2 = CudaNdarray_HOST_STRIDES(%(V)s)[2];
const int vs1 = CudaNdarray_HOST_STRIDES(%(V)s)[1];
const int vs0 = CudaNdarray_HOST_STRIDES(%(V)s)[0];
bool out_contiguous = CudaNdarray_is_c_contiguous(%(dCdW)s);
int version = -1;
int verbose = 0;
bool subsample =(dr>1)||(dc>1)||(dt>1);
bool work_complete = false;
if(out_contiguous && (version==0||version==-1) && WShape[4]<=512 && !work_complete){
//conv_rows_stack
dim3 grid(WShape[0]*WShape[4],WShape[1]*WShape[2]);//outputHeight*outputWidth);
dim3 threads(WShape[3]);
int shared_size=0;
convgrad_rows_stack<<<grid, threads, shared_size>>>(
CudaNdarray_DEV_DATA(%(V)s), CudaNdarray_DEV_DATA(%(dCdH)s), CudaNdarray_DEV_DATA(%(dCdW)s),
vidHeight, vidWidth, vidDur,
filterHeight, filterWidth, filterDur,
WShape[0], WShape[1], WShape[2], WShape[3], WShape[4],
outputHeight,outputWidth,outputDur,
batchSize, outputChannels, inputChannels,
dr,dc,dt,
vs3,vs2,vs1,vs4,vs0,
dcdhs3,dcdhs2,dcdhs1,dcdhs4,dcdhs0);
CNDA_THREAD_SYNC;
cudaError_t sts = cudaGetLastError();
if (cudaSuccess == sts)
{
work_complete = true;
if (verbose>1) printf("threads.x=%%i, threads.y=%%i, grid.x=%%i, grid.y=%%i, shared_size=%%i, nb_threads=%%i\\n", threads.x, threads.y, grid.x, grid.y, shared_size, threads.x * threads.y);
if (verbose) printf("INFO: used 'conv_rows_stack' version\\n");
}
else
{
if (verbose) printf("threads.x=%%i, threads.y=%%i, grid.x=%%i, grid.y=%%i, shared_size=%%i, nb_threads=%%i\\n", threads.x, threads.y, grid.x, grid.y, shared_size, threads.x * threads.y);
if (verbose) printf("ERROR: all implementations failed for GpuConv3D! (%%s)",cudaGetErrorString(sts));
PyErr_Format(PyExc_RuntimeError, "ERROR: all implementations failed for GpuConvGrad3D! (%%s)",
cudaGetErrorString(sts));
%(fail)s
}
}
if(!work_complete){
PyErr_Format(PyExc_RuntimeError, "ERROR: no implementations executed for this GpuConv3D!");
%(fail)s
}
}}}}} // extra scope for fail
///////////// < /code generated by GpuConvGrad3D >
"""
return strutil.render_string(codeSource, locals())
def c_support_code_apply(self, node, nodename):
# This code is not sensitive to the ignore_border flag.
# It runs for every position in the output z, and then computes the gradient for the
# input pixels that were downsampled to that z-position.
codeSource = """
__global__ void
//thread block size = WShape[4]
//grid block size = (WShape[0]*WShape[1],WShape[2]*WShape[3])
//
convgrad_rows_stack( float* img, float* dCdH, float* dCdW,
int img_len, int img_wid, int img_dur,
int dCdW_len, int dCdW_wid, int dCdW_dur,
int wsh0, int wsh1, int wsh2, int wsh3, int wsh4,
int out_len, int out_wid, int out_dur,
int batchSize, int nkern, int nstack,
int dr, int dc, int dt,
int img_stride_frame, int img_stride_col, int img_stride_row,
int img_stride_stack, int img_stride_batch,
int dCdW_stride_frame, int dCdW_stride_col, int dCdW_stride_row,
int dCdW_stride_stack, int dCdW_stride_nkern)
{
int __shared__ kern_id, stack_id;
float __shared__ *d_img, *d_kern;
kern_id= blockIdx.x%nkern;
stack_id = blockIdx.x/nkern;
const int dCdW_row = blockIdx.y%ws1;
const int dCdW_col = blockIdx.y/ws1;
const int dCdW_frame=threadIdx.x;
img +=stack_id*img_stride_stack;
dCdH +=kern_id*dCdW_stride_stack;
float sum = 0.0f;
for(int i=0;i<batchSize;i++){
for(int p=0;p<out_len;p++){
for(int q=0;q<out_wid;q++){
for(int r=0;r<out_dur;r++){
sum += dCdH[i*dCdW_stride_nkern+p*dCdW_stride_row+q*dCdW_stride_col+r*dCdW_stride_frame] *
img[i*img_stride_batch+(dr*p+dCdW_row)*img_stride_row+(dc*q+dCdW_col)*img_stride_col+(dt*r+dCdW_frame)*img_stride_frame];
}
}
}
}
dCdW[kern_id*wsh1*wsh2*wsh3*wsh4+//the good batch
stack_id+//the output image
dCdW_row*wsh2*wsh3*wsh4+//the output row
dCdW_col*wsh3*wsh4 + //the output_col
dCdW_frame*wsh4] = sum;
}
/*
#block
for j in xrange(0,WShape[0]):
for z in xrange(0,WShape[1]):
for k in xrange(0,WShape[2]):
for l in xrange(0,WShape[3]):
#threads
for m in xrange(0,WShape[4]):
#thread
for i in xrange(0,batchSize):
for p in xrange(0,outputHeight):
for q in xrange(0,outputWidth):
for r in xrange(0,outputDur):
dCdW[j,z,k,l,m] += dCdH[i,j,p,q,r] * V[i,z,dr*p+k,dc*q+l,dt*r+m]
*/
"""
return codeSource
gpu_conv_grad3d = GpuConvGrad3D()
@local_optimizer([ConvGrad3D])
def local_gpu_conv_grad3d(node):
if isinstance(node.op, ConvGrad3D):
if numpy.any([i.owner and isinstance(i.owner.op, HostFromGpu)
for i in node.inputs]):
if numpy.all([o.type.dtype == 'float32' for o in node.outputs]):
V, d, WShape, dCdH = node.inputs
return [host_from_gpu(gpu_conv_grad3d(
as_cuda_ndarray_variable(V),
d,
WShape,
as_cuda_ndarray_variable(dCdH)))]
# Not enabled by default as we don't want people to use it.
gpu_optimizer.register("local_gpu_conv_grad3d", local_gpu_conv_grad3d)
|
|
from collections import defaultdict
import random
import sys
import numpy as np
import matplotlib.pyplot as plt
from weather_model import fft, read_daily, simulate_temp, snowfall_rainfall_model, simulate_precip, simulate_wind_speed
from weather_model import wind_chill_model, wind_chill_transaction_probability, rainfall_transaction_probability
from weather_model import snowfall_transaction_probability, wind_speed_transaction_probability
from weather_model import weather_transaction_probability, autocorr
RECORDS = ["201110", "201111", "201112", "201201", "201202", "201203", "201204", "201205", "201206", "201207", "201208",
"201209", "201210", "201211", "201212", "201301", "201302", "201303", "201304", "201305", "201306", "201307", "201308",
"201309", "201310", "201311", "201312", "201401", "201402", "201403", "201404", "201405", "201406", "201407", "201408",
"201409"]
def plot_sim_autocorr(flname, temps, sim_temps):
t_corr = autocorr(temps)
st_corr = autocorr(sim_temps)
plt.clf()
plt.hold(True)
plt.plot(t_corr, color="c", label="Real")
plt.plot(st_corr, color="k", label="Simulated")
plt.legend()
plt.ylim([-1.0, 1.0])
plt.xlabel("Time Lag (days)", fontsize=16)
plt.ylabel("Correlation Coefficient", fontsize=16)
plt.xlim([0, len(temps)])
plt.savefig(flname, DPI=300)
def plot_sim_fft(flname, real, simulated):
real_freq, real_amplitudes, _ = fft(autocorr(real))
sim_freq, sim_amp, _ = fft(autocorr(simulated))
n_real_samples = len(real)
n_sim_samples = len(simulated)
plt.clf()
plt.hold(True)
plt.plot(real_freq[1:(n_real_samples+1)/2], real_amplitudes[1:(n_real_samples+1)/2], color="c", label="Real")
plt.plot(sim_freq[1:(n_sim_samples+1)/2], sim_amp[1:(n_sim_samples+1)/2], color="k", alpha=0.7, label="Simulated")
plt.xlabel("Frequency (cycles/day)", fontsize=16)
plt.ylabel("Amplitude", fontsize=16)
plt.legend(loc="upper right")
plt.xlim([0, 0.05])
plt.savefig(flname, DPI=300)
def plot_sim_vel(flname, vel, sim_vel):
plt.clf()
plt.hold(True)
plt.plot(sim_vel, color="k", label="Simulated")
plt.plot(vel, color="c", label="Real")
plt.xlabel("Time (Days)", fontsize=16)
plt.ylabel("dT/dt (F)", fontsize=16)
plt.legend()
plt.xlim([0, len(vel)])
plt.savefig(flname, DPI=300)
def plot_vel(flname, vel):
plt.clf()
plt.plot(vel, color="c")
plt.xlabel("Time (Days)", fontsize=16)
plt.ylabel("dT/dt (F)", fontsize=16)
plt.xlim([0, len(vel)])
plt.savefig(flname, DPI=300)
def plot_hist_sim_vel(flname, vel, sim_vel):
plt.clf()
plt.hold(True)
bins = np.linspace(-40.0, 40.0, 40.0)
plt.hist(vel, bins=bins, color="c", label="Real")
plt.hist(sim_vel, bins=bins, color="k", label="Simulated", alpha=0.7)
plt.ylabel("Occurrences (Days)", fontsize=16)
plt.xlabel("dT/dt (F)", fontsize=16)
plt.legend()
plt.savefig(flname, DPI=300)
def plot_sim_temps(flname, temps, sim_temps):
plt.clf()
plt.hold(True)
plt.plot(temps, color="c", label="Real")
plt.plot(sim_temps, color="k", label="Simulated")
plt.xlabel("Time (Days)", fontsize=16)
plt.ylabel("Temperature (F)", fontsize=16)
plt.xlim([0, len(temps)])
plt.legend(fontsize=14)
plt.savefig(flname, DPI=300)
def hist_temps(flname, temps, sim_temps):
plt.clf()
plt.hold(True)
bins = np.linspace(-20.0, 100.0, 30.0)
plt.hist(temps, bins=bins, color="c", label="Real")
plt.hist(sim_temps, bins=bins, color="k", alpha=0.7, label="Simulated")
plt.ylabel("Occurrences (Days)", fontsize=16)
plt.xlabel("Temperature (F)", fontsize=16)
plt.legend(loc="upper right")
plt.savefig(flname, DPI=300)
def plot_precip(flname, precip, sim_precip):
plt.clf()
plt.hold(True)
plt.plot(precip[:, 2], color="c", label="Real")
plt.plot(sim_precip, color="k", label="Simulated")
plt.xlabel("Time (Days)", fontsize=16)
plt.ylabel("Total Precipitation (in)", fontsize=16)
plt.legend()
plt.savefig(flname, DPI=300)
def plot_hist_precip(flname, precip, sim_precip):
plt.clf()
plt.hold(True)
bins = np.linspace(0.0, 2.0, 20.0)
plt.hist(precip[:, 2], bins=bins, color="c", label="Real")
plt.hist(sim_precip, bins=bins, color="k", alpha=0.7, label="Simulated")
plt.ylabel("Occurrences (Days)", fontsize=16)
plt.xlabel("Total Precipitation (in)", fontsize=16)
plt.xlim([0.0, 2.0])
plt.legend()
plt.savefig(flname, DPI=300)
def plot_snowfall(flname, precip, sim_snowfall):
plt.clf()
plt.hold(True)
plt.plot(precip[:, 0], color="c", alpha=0.7, label="Real")
plt.plot(sim_snowfall, color="k", alpha=0.7, label="Simulated")
plt.xlabel("Time (Days)", fontsize=16)
plt.ylabel("Snowfall (in)", fontsize=16)
plt.legend()
plt.savefig(flname, DPI=300)
def plot_hist_snowfall(flname, precip, sim_snowfall):
plt.clf()
plt.hold(True)
bins = np.linspace(0.0, 16.0, 32.0)
plt.hist(precip[:, 0], bins=bins, color="c", alpha=0.7, label="Real")
plt.hist(sim_snowfall, bins=bins, color="k", alpha=0.7, label="Simulated")
plt.ylabel("Occurrences (Days)", fontsize=16)
plt.xlabel("Snowfall (in)", fontsize=16)
#plt.xlim([0.0, 2.0])
plt.legend()
plt.savefig(flname, DPI=300)
def plot_rainfall(flname, precip, sim_rainfall):
plt.clf()
plt.hold(True)
plt.plot(precip[:, 1], color="c", alpha=0.7, label="Real")
plt.plot(sim_rainfall, color="k", alpha=0.7, label="Simulated")
plt.xlabel("Time (Days)", fontsize=16)
plt.ylabel("Rainfall (in)", fontsize=16)
plt.legend()
plt.savefig(flname, DPI=300)
def plot_hist_rainfall(flname, precip, sim_rainfall):
plt.clf()
plt.hold(True)
bins = np.linspace(0.0, 3.0, 30.0)
plt.hist(precip[:, 1], bins=bins, color="c", alpha=0.7, label="Real")
plt.hist(sim_rainfall, bins=bins, color="k", alpha=0.7, label="Simulated")
plt.ylabel("Occurrences (Days)", fontsize=16)
plt.xlabel("Rainfall (in)", fontsize=16)
#plt.xlim([0.0, 2.0])
plt.legend()
plt.savefig(flname, DPI=300)
def plot_wind_chill_probability(flname):
plt.clf()
temps = np.linspace(-20.0, 90.0, 180.0)
probability = wind_chill_transaction_probability(temps)
plt.plot(temps, probability, color="c")
plt.grid(True)
plt.xlabel("Wind Chill (F)", fontsize=16)
plt.ylabel("Probability", fontsize=16)
plt.ylim([0.0, 1.0])
plt.legend()
plt.savefig(flname, DPI=300)
def plot_wind_chill_prob_hist(flname, temps, sim_temps):
plt.clf()
plt.hold(True)
wc_prob = wind_chill_transaction_probability(temps)
sim_wc_prob = wind_chill_transaction_probability(sim_temps)
bins = np.linspace(0.0, 1.0, 20.0)
plt.hist(wc_prob, bins=bins, color="c", label="Real")
plt.hist(sim_wc_prob, bins=bins, color="k", alpha = 0.7, label="Simulated")
plt.ylabel("Frequency", fontsize=16)
plt.xlabel("Probability", fontsize=16)
plt.xlim([0.0, 1.0])
plt.grid(True)
plt.legend(loc="upper left")
plt.savefig(flname, DPI=300)
def plot_snowfall_trans_prob(flname):
plt.clf()
snowfall = np.linspace(0.0, 12.0, 60.0)
quality = snowfall_transaction_probability(snowfall)
plt.plot(snowfall, quality, color="c")
plt.grid(True)
plt.xlabel("Snowfall (in)", fontsize=16)
plt.ylabel("Probability", fontsize=16)
plt.ylim([0.0, 1.0])
plt.savefig(flname, DPI=300)
def plot_hist_snowfall_trans_prob(flname, precip, sim_snowfall):
plt.clf()
plt.hold(True)
bins = np.linspace(0.0, 1.0, 20.0)
plt.hist(snowfall_transaction_probability(precip[:, 0]), bins=bins, color="c", alpha=0.7, label="Real")
plt.hist(snowfall_transaction_probability(sim_snowfall), bins=bins, color="k", alpha=0.7, label="Simulated")
plt.ylabel("Days (Frequency)", fontsize=16)
plt.xlabel("Probability", fontsize=16)
plt.xlim([0.0, 1.0])
plt.grid(True)
plt.legend(loc="upper left")
plt.savefig(flname, DPI=300)
def plot_rainfall_trans_prob(flname):
plt.clf()
rainfall = np.linspace(0.0, 6.0, 60.0)
prob = rainfall_transaction_probability(rainfall)
plt.plot(rainfall, prob, color="c")
plt.grid(True)
plt.xlabel("Rainfall (in)", fontsize=16)
plt.ylabel("Probability", fontsize=16)
plt.ylim([0.0, 1.0])
plt.savefig(flname, DPI=300)
def plot_hist_rainfall_trans_prob(flname, precip, sim_rainfall):
plt.clf()
plt.hold(True)
bins = np.linspace(0.0, 1.0, 20.0)
plt.hist(rainfall_transaction_probability(precip[:, 1]), bins=bins, color="c", alpha=0.7, label="Real")
plt.hist(rainfall_transaction_probability(sim_rainfall), bins=bins, color="k", alpha=0.7, label="Simulated")
plt.ylabel("Days (Frequency)", fontsize=16)
plt.xlabel("Probability", fontsize=16)
plt.xlim([0.0, 1.0])
plt.grid(True)
plt.legend(loc="upper left")
plt.savefig(flname, DPI=300)
def plot_scatter_temp_precip(flname, temp, precip, a=0.4, b=27.0):
plt.clf()
percent = []
for snowfall, rainfall, total_precip in precip:
if total_precip > 0.0:
percent.append(rainfall / total_precip)
else:
percent.append(0.0)
temp_percent = np.array([(temp, precip) for temp, precip in zip(temp, percent) if precip > 0.0 and precip < 1.0])
ts = np.linspace(0.0, 60.0, 120.0)
predicted = [1.0 / (1.0 + np.exp(-a * (T - b))) for T in ts]
plt.scatter(temp_percent[:, 0], temp_percent[:, 1])
plt.hold(True)
plt.plot(ts, predicted)
plt.ylim([0.0, 1.0])
plt.xlabel("Temperature (F)", fontsize=16)
plt.ylabel("Rainfall (% of Precip)", fontsize=16)
plt.savefig(flname, DPI=300)
def plot_wind_speed(flname, wind_speed, sim_wind_speed=None):
plt.clf()
plt.hold(True)
plt.plot(wind_speed, color="c", label="Real")
if sim_wind_speed != None:
plt.plot(sim_wind_speed, color="k", label="Simulated")
plt.xlabel("Time (Days)", fontsize=16)
plt.ylabel("Wind Speed (mph)", fontsize=16)
plt.legend()
plt.savefig(flname, DPI=300)
def plot_hist_wind_speed(flname, wind_speed, sim_wind_speed=None):
plt.clf()
plt.hold(True)
bins = np.linspace(0.0, 30.0, 30.0)
plt.hist(wind_speed, bins=bins, color="c", alpha=0.7, label="Real")
if sim_wind_speed != None:
plt.hist(sim_wind_speed, bins=bins, color="k", alpha=0.7, label="Simulated")
plt.ylabel("Occurrences (Days)", fontsize=16)
plt.xlabel("Wind Speed (mph)", fontsize=16)
plt.legend()
plt.savefig(flname, DPI=300)
def plot_wind_speed_fft(flname, real, simulated=None):
real_freq, real_amplitudes, _ = fft(real)
n_real_samples = len(real)
plt.clf()
plt.hold(True)
plt.plot(real_freq[1:(n_real_samples+1)/2], real_amplitudes[1:(n_real_samples+1)/2], color="c", label="Real")
if simulated != None:
sim_freq, sim_amp, _ = fft(simulated)
n_sim_samples = len(simulated)
plt.plot(sim_freq[1:(n_sim_samples+1)/2], sim_amp[1:(n_sim_samples+1)/2], color="k", alpha=0.7, label="Simulated")
plt.xlabel("Frequency (cycles/day)", fontsize=16)
plt.ylabel("Amplitude", fontsize=16)
plt.legend(loc="upper right")
plt.xlim([0, 0.05])
plt.savefig(flname, DPI=300)
def plot_wind_speed_trans_prob(flname):
plt.clf()
wind_speed = np.linspace(0.0, 30.0, 60.0)
prob = wind_speed_transaction_probability(wind_speed)
plt.plot(wind_speed, prob, color="c")
plt.grid(True)
plt.xlabel("Wind Speed (mph)", fontsize=16)
plt.ylabel("Probability", fontsize=16)
plt.ylim([0.0, 1.0])
plt.savefig(flname, DPI=300)
def plot_hist_wind_speed_trans_prob(flname, wind_speed, sim_wind_speed):
plt.clf()
plt.hold(True)
bins = np.linspace(0.0, 1.0, 20.0)
plt.hist(wind_speed_transaction_probability(sim_wind_speed), bins=bins, color="k", alpha=0.7, label="Simulated")
plt.hist(wind_speed_transaction_probability(wind_speed), bins=bins, color="c", alpha=0.7, label="Real")
plt.ylabel("Days (Frequency)", fontsize=16)
plt.xlabel("Probability", fontsize=16)
plt.xlim([0.0, 1.0])
plt.grid(True)
plt.legend(loc="upper left")
plt.savefig(flname, DPI=300)
def plot_weather_probability(flname, wind_chill, wind_speed, snowfall, rainfall, sim_wind_chill, sim_wind_speed, sim_snowfall, sim_rainfall):
plt.clf()
plt.hold(True)
plt.plot(weather_transaction_probability(wind_chill, wind_speed, snowfall, rainfall), color="c", label="Real")
plt.plot(weather_transaction_probability(sim_wind_chill, sim_wind_speed, sim_snowfall, sim_rainfall), color="k", alpha=0.7, label="Simulated")
plt.xlabel("Time (Days)", fontsize=16)
plt.ylabel("Probability", fontsize=16)
plt.ylim([0.0, 1.0])
plt.grid(True)
plt.legend(loc="lower left")
plt.savefig(flname, DPI=300)
def plot_weather_prob_hist(flname, wind_chill, wind_speed, snowfall, rainfall, sim_wind_chill, sim_wind_speed, sim_snowfall, sim_rainfall):
plt.clf()
plt.hold(True)
bins = np.linspace(0.0, 1.0, 20.0)
plt.hist(weather_transaction_probability(wind_chill, wind_speed, snowfall, rainfall), bins=bins, color="c", label="Real")
plt.hist(weather_transaction_probability(sim_wind_chill, sim_wind_speed, sim_snowfall, sim_rainfall), bins=bins, color="k", alpha=0.7, label="Simulated")
plt.xlabel("Occurrences (Days)", fontsize=16)
plt.ylabel("Probability", fontsize=16)
plt.xlim([0.0, 1.0])
plt.grid(True)
plt.legend(loc="upper left")
plt.savefig(flname, DPI=300)
data_dir = sys.argv[1]
output_dir = sys.argv[2]
records = defaultdict(list)
for date in RECORDS:
read_daily(data_dir + "/" + date + "daily.txt", records)
sbn_data = records["14848"] #records["12815"]
sbn_data.sort()
temps = np.array([temp for (t, temp, _, _, _, _) in sbn_data])
vel = temps[1:] - temps[:len(temps) - 1]
#avg = np.average(temps)
std = np.std(vel)
freq, ampl, coeff = fft(temps)
fourier_coeff = coeff[3]
avg = 0.5 * np.abs(coeff)[0]
sim_temps = simulate_temp(avg, std, 1.0, len(temps), fourier_coeff)
sim_vel = sim_temps[1:] - sim_temps[:len(sim_temps) - 1]
precip = np.array([(snowfall, rainfall, total_precip) for t, _, snowfall, rainfall, total_precip, _ in sbn_data])
sim_precip, sim_snowfall, sim_rainfall = simulate_precip(1.5 * np.average(precip[:, 2]), temps, precip.shape[0])
wind_speeds = np.array([wind_speed for _, _, _, _, _, wind_speed in sbn_data])
k = np.sqrt(np.var(wind_speeds))
theta = np.average(wind_speeds) / k
freq, ampl, coeff = fft(wind_speeds)
fourier_coeff = coeff[3]
sim_wind_speeds = simulate_wind_speed(0.0, k, theta, len(wind_speeds), fourier_coeff)
wind_chill = wind_chill_model(temps, wind_speeds)
sim_wind_chill = wind_chill_model(sim_temps, sim_wind_speeds)
hist_temps(output_dir + "/sim_temp_hist.pdf", temps, sim_temps)
plot_sim_temps(output_dir + "/sim_temp.pdf", temps, sim_temps)
plot_sim_vel(output_dir + "/sim_vel.pdf", vel, sim_vel)
plot_hist_sim_vel(output_dir + "/sim_vel_hist.pdf", vel, sim_vel)
plot_sim_autocorr(output_dir + "/sim_temp_autocorr.pdf", temps, sim_temps)
plot_sim_fft(output_dir + "/sim_temp_fft.pdf", temps, sim_temps)
plot_precip(output_dir + "/daily_precip.pdf", precip, sim_precip)
plot_snowfall(output_dir + "/daily_snowfall.pdf", precip, sim_snowfall)
plot_hist_precip(output_dir + "/daily_precip_hist.pdf", precip, sim_precip)
plot_hist_snowfall(output_dir + "/daily_snowfall_hist.pdf", precip, sim_snowfall)
plot_scatter_temp_precip(output_dir + "/daily_temp_precip_scatter.pdf", temps, precip, a=0.2)
plot_rainfall(output_dir + "/daily_rainfall.pdf", precip, sim_rainfall)
plot_hist_rainfall(output_dir + "/daily_rainfall_hist.pdf", precip, sim_rainfall)
plot_wind_speed(output_dir + "/daily_wind_speeds.pdf", wind_speeds, sim_wind_speeds)
plot_hist_wind_speed(output_dir + "/daily_wind_speed_hist.pdf", wind_speeds, sim_wind_speeds)
plot_wind_speed_fft(output_dir + "/daily_wind_speed_fft.pdf", wind_speeds, sim_wind_speeds)
plot_snowfall_trans_prob(output_dir + "/snowfall_trans_prob.pdf")
plot_hist_snowfall_trans_prob(output_dir + "/snowfall_trans_prob_hist.pdf", precip, sim_snowfall)
plot_rainfall_trans_prob(output_dir + "/rainfall_trans_prob.pdf")
plot_hist_rainfall_trans_prob(output_dir + "/rainfall_trans_prob_hist.pdf", precip, sim_rainfall)
plot_scatter_temp_precip(output_dir + "/daily_wind_chill_precip_scatter.pdf", wind_chill, precip, a=0.2, b=20.0)
plot_wind_chill_probability(output_dir + "/wind_chill_trans_prob.pdf")
plot_wind_chill_prob_hist(output_dir + "/wind_chill_trans_prob_hist.pdf", wind_chill, sim_wind_chill)
plot_wind_speed_trans_prob(output_dir + "/wind_speed_trans_prob.pdf")
plot_hist_wind_speed_trans_prob(output_dir + "/wind_speed_trans_prob_hist.pdf", wind_speeds, sim_wind_speeds)
plot_weather_probability(output_dir + "/weather_trans_prob.pdf", wind_chill, wind_speeds, precip[:, 0], precip[:, 1], sim_wind_chill, sim_wind_speeds, sim_snowfall, sim_rainfall)
plot_weather_prob_hist(output_dir + "/weather_trans_prob_hist.pdf", wind_chill, wind_speeds, precip[:, 0], precip[:, 1], sim_wind_chill, sim_wind_speeds, sim_snowfall, sim_rainfall)
|
|
# COMMON.PY
# A common module for all scripts in the Interactions toolbox.
import sys, os, arcpy, operator, traceback, numpy, random
# constants defining neighbour table field names
NEIGH_FROM_FLD = 'ID_FROM'
NEIGH_TO_FLD = 'ID_TO'
# signals if debug messages are to be printed
debugMode = False
try:
from debug import *
except ImportError:
pass
# field type names for new fields
PY_TYPE_TO_OUT = {unicode : 'TEXT', str : 'TEXT', int : 'LONG', float : 'DOUBLE', numpy.float64 : 'DOUBLE'}
IN_TYPE_TO_PY = {'Integer' : int, 'Double' : float, 'String' : unicode, 'SmallInteger' : int, 'OID' : int}
PY_TYPE_TO_STR = {unicode : 'unicode', str : 'str', int : 'int', float : 'float', numpy.float64 : 'float'}
INT_FIELD_DESCRIBE = 'Integer'
NET_FIELDS = [('SourceID', 'SourceID', None), ('SourceOID', 'SourceOID', None), ('PosAlong', 'PosAlong', None), ('SideOfEdge', 'SideOfEdge', None)]
NET_FIELD_MAPPINGS = 'SourceID SourceID #;SourceOID SourceOID #;PosAlong PosAlong #;SideOfEdge SideOfEdge #'
DEFAULT_SPATIAL_EXT = 'shp' # nondatabase default file extension
DEFAULT_TABLE_EXT = 'dbf'
ORIGIN_MARKER = 'O_' # field markers for od attributes of interactions
DESTINATION_MARKER = 'D_'
SHAPE_KEY = 'SHAPE' # auxiliary key for shape information
SHAPE_TYPE = 'SHAPE'
def checkFile(file):
if not arcpy.Exists(file):
raise IOError, '%s does not exist' % file.decode('cp1250').encode('utf8')
return file
def query(target, mssql, *args):
'''Prepares a mssql query to target such that fields are properly quoted.'''
main = (mssql % args)
if '.mdb' in target: # is in personal geodatabase (square brackets used)
return main
else: # quotes used
return main.replace('[', '"').replace(']', '"')
def inTypeToOut(inType):
try:
return PY_TYPE_TO_OUT[IN_TYPE_TO_PY[inType]]
except KeyError:
raise ValueError, 'field of unknown type: ' + str(inType)
def inTypeToPy(inType):
try:
return IN_TYPE_TO_PY[inType]
except KeyError:
raise ValueError, 'field of unknown type: ' + str(inType)
def pyTypeToOut(pyType):
try:
return PY_TYPE_TO_OUT[pyType]
except KeyError:
raise ValueError, 'field of unknown type: ' + str(pyType)
def fieldType(pythonType):
'''Returns a string for the passed Python type that may be used in arcpy.AddField as field type.'''
return pyTypeToOut(pythonType)
def describeToField(describe):
return inTypeToOut(describe)
def typeOfField(layer, field):
return outTypeOfField(layer, field)
def outTypeOfField(layer, field):
return inTypeToOut(inTypeOfField(layer, field))
def pyTypeOfField(layer, field):
return inTypeToPy(inTypeOfField(layer, field))
def inTypeOfField(layer, field):
typeList = fieldTypeList(layer)
if field in typeList:
return typeList[field]
else:
fldNames = list(typeList.keys())
for lyrFld in fldNames:
typeList[lyrFld.upper()] = typeList[lyrFld]
if field.upper() in typeList:
return typeList[field.upper()]
else:
raise ValueError, u'field %s not found in %s' % (field, layer)
def pyStrOfType(pyType):
try:
return PY_TYPE_TO_STR[pyType]
except KeyError:
raise ValueError, 'field of unknown type: ' + str(pyType)
def addFields(layer, fields, strict=False):
fieldList = fieldTypeList(layer)
for name, fldType in fields:
if name in fieldList:
if strict:
raise ValueError, 'field %s already exists' % name
else:
if inTypeToPy(fieldList[name]) == fldType:
warning('field %s already exists' % name)
continue
else:
warning('field %s already exists with different type %s: will be deleted' % (name, fieldList[name]))
arcpy.DeleteField_management(layer, [name])
arcpy.AddField_management(layer, name, pyTypeToOut(fldType))
def featurePath(location, file, ext=DEFAULT_SPATIAL_EXT):
return addExt(os.path.join(location, file), ext)
def tablePath(location, file, ext=DEFAULT_TABLE_EXT):
return addExt(os.path.join(location, file), ext)
def tableName(location, file, ext=DEFAULT_TABLE_EXT):
if isInDatabase(location):
return file
else:
return addTableExt(file, ext)
def addExt(path, ext=DEFAULT_SPATIAL_EXT):
if not isInDatabase(path) and not hasExt(path):
return path + '.' + ext
else:
return path
def addFeatureExt(path, ext=DEFAULT_SPATIAL_EXT):
return addExt(path, ext)
def addTableExt(path, ext=DEFAULT_TABLE_EXT):
return addExt(path, ext)
def hasExt(path):
return path.rfind('.') > max(path.rfind('/'), path.rfind('\\'))
def parameters(number):
'''Returns tool parameters from the tool input as strings.'''
if len(sys.argv) == 1:
sys.exit(1)
params = []
for i in range(number):
params.append(arcpy.GetParameterAsText(i))
return params
def setParameter(paramIndex, output):
arcpy.SetParameterAsText(paramIndex, output)
def count(layer):
'''Counts the number of features (rows) in the layer.'''
return int(arcpy.GetCount_management(layer).getOutput(0))
def fieldTypeList(layer, type=None):
'''Returns a dict of field names : types of the specified layer attributes.'''
if type is None:
flist = arcpy.ListFields(layer)
else:
flist = arcpy.ListFields(layer, '', type)
fields = {}
for field in flist:
fields[field.name] = field.type
return fields
def fieldList(layer, type=None):
'''Returns a list of field names of the specified layer attributes.'''
if type is None:
return [field.name for field in arcpy.ListFields(layer)]
else:
return [field.name for field in arcpy.ListFields(layer, '', type)]
def parseFields(fieldList):
'''Parses fields passed from the tool input.'''
fields = fieldList.split(';')
i = 0
while i < len(fields):
if fields[i] == '':
fields.pop(i)
else:
fields[i] = fields[i].strip("'")
i += 1
return fields
def parseStats(stats):
'''Parses statistics setup.'''
return [item.split(' ') for item in stats.split(';')]
def statListToFields(statList):
'''Converts a list of statistics setup to resulting field names.'''
return [item[1] + '_' + item[0] for item in statList]
def isView(layer):
desc = arcpy.Describe(layer)
return (desc.dataType in (u'FeatureLayer', u'TableView'))
def isLayer(layer):
desc = arcpy.Describe(layer)
return (desc.dataType == u'FeatureLayer')
def isShapefile(layer):
desc = arcpy.Describe(layer)
return (desc.dataType == u'ShapeFile' or (desc.dataType == u'FeatureLayer' and desc.dataElement.dataType == u'ShapeFile'))
def isTableView(layer):
desc = arcpy.Describe(layer)
return (desc.dataType == u'TableView')
def isFeatureClass(layer):
desc = arcpy.Describe(layer)
return (desc.dataType in (u'FeatureClass', u'ShapeFile'))
def hasGeometry(layer):
desc = arcpy.Describe(layer)
print desc.dataType, desc.dataType, desc.dataType
return (desc.dataType in (u'FeatureLayer', u'FeatureClass', u'ShapeFile'))
def toFeatureClass(layer):
desc = arcpy.Describe(layer)
if desc.dataType in (u'FeatureClass', u'ShapeFile'):
return layer
elif desc.dataType == u'FeatureLayer':
return desc.dataElement.catalogPath
else:
raise ValueError, 'cannot convert %s (type %s) to feature class' % (layer, desc.dataType, desc.dataElementType)
def selection(source, target, query):
if hasGeometry(source):
arcpy.MakeFeatureLayer_management(source, target, query)
else:
arcpy.MakeTableView_management(source, target, query)
def select(source, target, query):
if hasGeometry(source):
arcpy.Select_analysis(source, target, query)
else:
arcpy.TableSelect_analysis(source, target, query)
def copy(source, target):
if hasGeometry(source):
arcpy.CopyFeatures_management(source, target)
else:
arcpy.CopyRows_management(source, target)
def multiplyDistance(dist, mult):
distNum, distUnit = dist.split()
return str(int(float(distNum) * mult)) + ' ' + distUnit
def getSource(layer):
desc = arcpy.Describe(layer)
return (desc.dataType == u'FeatureClass')
def isInDatabase(location):
'''Returns True if the location is inside a file or personal geodatabase, False otherwise.'''
return ('.gdb' in location or'.mdb' in location)
def folder(location):
'''Returns a system folder in which the specified file is located.'''
while isInDatabase(location):
location = os.path.dirname(location)
return location
def location(path):
if hasExt(path):
while hasExt(path):
path = os.path.dirname(path)
return path
else:
if isFeatureClass(path):
return os.path.dirname(path)
else:
return location(toFeatureClass(path))
def toInt(value, name):
try:
return int(str(value))
except (ValueError, UnicodeEncodeError):
raise ValueError, 'invalid ' + name + ' format: must be an integral number, got ' + str(value)
def toFloat(value, name):
try:
return float(str(value).replace(',', '.'))
except (ValueError, UnicodeEncodeError):
raise ValueError, 'invalid ' + name + ' format: must be a number, got ' + str(value)
def toBool(value, name):
try:
return bool(str(value).lower() == 'true' or str(value) == '1')
except (ValueError, TypeError):
raise ValueError, 'invalid ' + name + ' format: must be a number or "true" or "false", got ' + str(value)
def invertQuery(query):
return '' if not query else ('NOT (' + query + ')')
def inBounds(val, min, max):
if val < min: return min
elif val > max: return max
else: return val
def maxKey(dictionary):
'''Returns a key from the dictionary that corresponds to the highest value.'''
return max(dictionary.iteritems(), key=operator.itemgetter(1))[0]
def constantLambda(value):
return (lambda whatever: value)
def sublayer(layer, subName):
return arcpy.mapping.ListLayers(layer, subName)[0]
def getShapeType(layer):
return arcpy.Describe(layer).shapeType.upper()
def recreate(layer, location, outName, transferFlds):
outPath = addFeatureExt(arcpy.CreateFeatureclass_management(location, outName, getShapeType(layer), spatial_reference=layer).getOutput(0))
fieldTypes = fieldTypeList(layer)
for fld in transferFlds:
arcpy.AddField_management(outPath, fld, inTypeToOut(fieldTypes[fld]))
return outPath
def createTable(path, useDBF=True):
location = os.path.dirname(path)
name = os.path.basename(path)
return arcpy.CreateTable_management(
location, (addTableExt(name) if (useDBF and not isInDatabase(location)) else name)
).getOutput(0)
def createFeatureClass(path, shapeType=None, template=None, crs=None):
if not (template or crs):
raise ValueError, 'insufficient shape data for {} provided'.format(path)
if shapeType is None and crs is not None:
shapeType = getShapeType(crs)
return addFeatureExt(arcpy.CreateFeatureclass_management(
os.path.dirname(path), os.path.basename(path),
geometry_type=shapeType, template=template, spatial_reference=crs).getOutput(0))
def overwrite(val=True):
arcpy.env.overwriteOutput = val
def delete(*args):
try:
for item in args:
arcpy.Delete_management(item)
except:
pass
def addField(layer, name, fldType):
if isShapefile(layer):
name = name[:10]
arcpy.AddField_management(layer, name, pyTypeToOut(fldType))
class runtool:
def __init__(self, parcount=0, debug=None, overwrite=True):
if debug is not None:
debugMode = debug
arcpy.env.overwriteOutput = overwrite
if parcount:
self.params = parameters(parcount)
def __getitem__(self, index):
return self.params[index]
def __enter__(self):
return self.params
def __exit__(self, exc_type, exc_value, tb):
if exc_type is not None:
if debugMode:
debug('\n'.join(traceback.format_exception(exc_type, exc_value, tb)))
else:
arcpy.AddError(u'{} {}'.format(exc_type, exc_value))
return True
else:
done()
class Messenger:
def __init__(self, debugMode=False):
pass
def done(self):
'''Signals ArcPy that the script has successfully terminated. If the script is running in a debug mode, raises an error to bring the tool dialog up again for debugger's convenience; otherwise just displays the message.'''
done()
def message(self, mess):
'''Adds a simple message to the output log.'''
message(mess)
def progress(self, message):
'''Signals tool progress by setting the progressor label. In debug mode, prints a message instead.'''
progress(message)
def progressor(self, message, count):
return progressor(message, count)
def debug(self, message):
'''Displays a debug message (only in debug mode).'''
debug(message)
def warning(self, message):
'''Displays a warning.'''
warning(message)
def error(self, message):
'''Raises an error with the specified message using the ArcPy mechanism.'''
error(message)
def getDebugMode(self):
return debugMode
class MutedMessenger(Messenger):
def __init__(self):
pass
def message(self, message):
pass
def progress(self, message):
pass
def progressor(self, message, count):
return MutedProgressBar()
class ProgressBar:
'''ArcPy progress bar control class.'''
def __init__(self, text, count):
'''Initializes the progress bar going from 0 to 100 % with the text to display above it and the number of steps to be performed.'''
self.text = text
self.count = count
self.progressBy = 100.0 / self.count
self.posBy = int(self.progressBy) if int(self.progressBy) >= 1 else 1
self.position = 0 # progressbar position
self.posExact = 0 # exact position (noninteger)
self.progress = 0 # how many of count has passed
arcpy.SetProgressor('step', self.text, self.position, 100, self.posBy)
def move(self):
'''Signals the progress bar that one step has been performed. The progress bar may move forward if the step means at least one per cent difference.'''
self.progress += 1
self.posExact += self.progressBy
if int(self.posExact) > self.position: # if crossed per cent to one more
self.position = int(self.posExact)
arcpy.SetProgressorPosition(self.position)
def end(self):
'''Ends the counting and resets the progressor.'''
arcpy.ResetProgressor()
class MutedProgressBar(ProgressBar):
def __init__(self, *args):
pass
def move(self):
pass
def end(self):
pass
class MessagingClass:
def __init__(self, messenger=Messenger(True)):
self.messenger = messenger
def done(self):
self.messenger.done()
class PathManager:
def __init__(self, outPath, delete=True):
self.outPath = outPath
self.location = os.path.dirname(self.outPath)
self.outName = os.path.basename(self.outPath)
self.tmpCount = -1
self.delete = True
def __enter__(self):
self.tmpFiles = []
self.tmpFields = {}
random.seed(self.location)
return self
def tmpFile(self):
tmp = self._tmpPath()
self.tmpFiles.append(tmp)
return tmp
def tmpLayer(self):
return self._tmpName()
def tmpField(self, layer, fldType):
name = self._tmpName().upper()
arcpy.AddField_management(layer, name, pyTypeToOut(fldType))
if layer not in self.tmpFields: self.tmpFields[layer] = []
self.tmpFields[layer].append(name)
return name
def _tmpName(self):
self.tmpCount += 1
return 'tmp_{:02d}{:04d}'.format(self.tmpCount, int(1e4 * random.random()))
def _tmpPath(self):
return featurePath(self.location, self._tmpName())
def __exit__(self, *args):
if self.delete:
progress('deleting temporary files')
for file in self.tmpFiles:
try:
arcpy.Delete_management(file)
except:
pass
progress('deleting temporary fields')
for layer in self.tmpFields:
try:
arcpy.DeleteField_management(layer, self.tmpFields[layer])
except:
pass
def getLocation(self):
return self.location
def getOutputName(self):
return self.outName
def warning(text):
'''Displays a warning to the tool user.'''
arcpy.AddWarning((u'WARNING: ' if debugMode else u'') + encodeMessage(text))
def debug(*args):
'''Displays a debug message (only in debug mode).'''
if debugMode:
arcpy.AddMessage(u'DEBUG: ' + ' '.join(encodeMessage(arg) for arg in args))
def progress(text):
'''Signals tool progress by setting the progressor label.'''
if debugMode:
arcpy.AddMessage(u'PROGRESS: ' + encodeProgress(text))
arcpy.SetProgressorLabel(encodeProgress(text))
def done():
'''Signals ArcPy that the script has successfully terminated. If the script is running in a debug mode, raises an error to bring the tool dialog up again for debugger's convenience; otherwise just displays the message.'''
if debugMode:
arcpy.AddMessage('PROGRESS: Done.')
# else:
arcpy.SetProgressor('default', 'Done.')
def message(text):
'''Signals an ordinary message to the user.'''
arcpy.AddMessage(encodeMessage(text))
def encodeMessage(text):
'''Encodes the message to UNICODE.'''
try:
if isinstance(text, unicode):
return text
elif isinstance(text, str):
return unicode(text, encoding='utf8')
else:
return str(text)
except (UnicodeEncodeError, UnicodeDecodeError):
return 'unknown message'
def encodeProgress(text):
return encodeMessage(text[:1].upper() + text[1:] + '...')
def progressor(text, count):
progress(text)
return ProgressBar(encodeProgress(text), count)
def getDebugMode():
return debugMode
|
|
"""Approximate nearest neighbor search"""
# Author: Maheshakya Wijewardena <maheshakya.10@cse.mrt.ac.lk>
# Joel Nothman <joel.nothman@gmail.com>
import numpy as np
import warnings
from scipy import sparse
from .base import KNeighborsMixin, RadiusNeighborsMixin
from ..base import BaseEstimator
from ..utils.validation import check_array
from ..utils import check_random_state
from ..metrics.pairwise import pairwise_distances
from ..random_projection import GaussianRandomProjection
__all__ = ["LSHForest"]
HASH_DTYPE = '>u4'
MAX_HASH_SIZE = np.dtype(HASH_DTYPE).itemsize * 8
def _find_matching_indices(tree, bin_X, left_mask, right_mask):
"""Finds indices in sorted array of integers.
Most significant h bits in the binary representations of the
integers are matched with the items' most significant h bits.
"""
left_index = np.searchsorted(tree, bin_X & left_mask)
right_index = np.searchsorted(tree, bin_X | right_mask,
side='right')
return left_index, right_index
def _find_longest_prefix_match(tree, bin_X, hash_size,
left_masks, right_masks):
"""Find the longest prefix match in tree for each query in bin_X
Most significant bits are considered as the prefix.
"""
hi = np.empty_like(bin_X, dtype=np.intp)
hi.fill(hash_size)
lo = np.zeros_like(bin_X, dtype=np.intp)
res = np.empty_like(bin_X, dtype=np.intp)
left_idx, right_idx = _find_matching_indices(tree, bin_X,
left_masks[hi],
right_masks[hi])
found = right_idx > left_idx
res[found] = lo[found] = hash_size
r = np.arange(bin_X.shape[0])
kept = r[lo < hi] # indices remaining in bin_X mask
while kept.shape[0]:
mid = (lo.take(kept) + hi.take(kept)) // 2
left_idx, right_idx = _find_matching_indices(tree,
bin_X.take(kept),
left_masks[mid],
right_masks[mid])
found = right_idx > left_idx
mid_found = mid[found]
lo[kept[found]] = mid_found + 1
res[kept[found]] = mid_found
hi[kept[~found]] = mid[~found]
kept = r[lo < hi]
return res
class ProjectionToHashMixin(object):
"""Turn a transformed real-valued array into a hash"""
@staticmethod
def _to_hash(projected):
if projected.shape[1] % 8 != 0:
raise ValueError('Require reduced dimensionality to be a multiple '
'of 8 for hashing')
# XXX: perhaps non-copying operation better
out = np.packbits((projected > 0).astype(int)).view(dtype=HASH_DTYPE)
return out.reshape(projected.shape[0], -1)
def fit_transform(self, X, y=None):
self.fit(X)
return self.transform(X)
def transform(self, X, y=None):
return self._to_hash(super(ProjectionToHashMixin, self).transform(X))
class GaussianRandomProjectionHash(ProjectionToHashMixin,
GaussianRandomProjection):
"""Use GaussianRandomProjection to produce a cosine LSH fingerprint"""
def __init__(self,
n_components=8,
random_state=None):
super(GaussianRandomProjectionHash, self).__init__(
n_components=n_components,
random_state=random_state)
def _array_of_arrays(list_of_arrays):
"""Creates an array of array from list of arrays."""
out = np.empty(len(list_of_arrays), dtype=object)
out[:] = list_of_arrays
return out
class LSHForest(BaseEstimator, KNeighborsMixin, RadiusNeighborsMixin):
"""Performs approximate nearest neighbor search using LSH forest.
LSH Forest: Locality Sensitive Hashing forest [1] is an alternative
method for vanilla approximate nearest neighbor search methods.
LSH forest data structure has been implemented using sorted
arrays and binary search and 32 bit fixed-length hashes.
Random projection is used as the hash family which approximates
cosine distance.
The cosine distance is defined as ``1 - cosine_similarity``: the lowest
value is 0 (identical point) but it is bounded above by 2 for the farthest
points. Its value does not depend on the norm of the vector points but
only on their relative angles.
Read more in the :ref:`User Guide <approximate_nearest_neighbors>`.
Parameters
----------
n_estimators : int (default = 10)
Number of trees in the LSH Forest.
min_hash_match : int (default = 4)
lowest hash length to be searched when candidate selection is
performed for nearest neighbors.
n_candidates : int (default = 10)
Minimum number of candidates evaluated per estimator, assuming enough
items meet the `min_hash_match` constraint.
n_neighbors : int (default = 5)
Number of neighbors to be returned from query function when
it is not provided to the :meth:`kneighbors` method.
radius : float, optinal (default = 1.0)
Radius from the data point to its neighbors. This is the parameter
space to use by default for the :meth`radius_neighbors` queries.
radius_cutoff_ratio : float, optional (default = 0.9)
A value ranges from 0 to 1. Radius neighbors will be searched until
the ratio between total neighbors within the radius and the total
candidates becomes less than this value unless it is terminated by
hash length reaching `min_hash_match`.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
hash_functions_ : list of GaussianRandomProjectionHash objects
Hash function g(p,x) for a tree is an array of 32 randomly generated
float arrays with the same dimenstion as the data set. This array is
stored in GaussianRandomProjectionHash object and can be obtained
from ``components_`` attribute.
trees_ : array, shape (n_estimators, n_samples)
Each tree (corresponding to a hash function) contains an array of
sorted hashed values. The array representation may change in future
versions.
original_indices_ : array, shape (n_estimators, n_samples)
Original indices of sorted hashed values in the fitted index.
References
----------
.. [1] M. Bawa, T. Condie and P. Ganesan, "LSH Forest: Self-Tuning
Indexes for Similarity Search", WWW '05 Proceedings of the
14th international conference on World Wide Web, 651-660,
2005.
Examples
--------
>>> from sklearn.neighbors import LSHForest
>>> X_train = [[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1], [6, 10, 2]]
>>> X_test = [[9, 1, 6], [3, 1, 10], [7, 10, 3]]
>>> lshf = LSHForest()
>>> lshf.fit(X_train) # doctest: +NORMALIZE_WHITESPACE
LSHForest(min_hash_match=4, n_candidates=50, n_estimators=10,
n_neighbors=5, radius=1.0, radius_cutoff_ratio=0.9,
random_state=None)
>>> distances, indices = lshf.kneighbors(X_test, n_neighbors=2)
>>> distances # doctest: +ELLIPSIS
array([[ 0.069..., 0.149...],
[ 0.229..., 0.481...],
[ 0.004..., 0.014...]])
>>> indices
array([[1, 2],
[2, 0],
[4, 0]])
"""
def __init__(self, n_estimators=10, radius=1.0, n_candidates=50,
n_neighbors=5, min_hash_match=4, radius_cutoff_ratio=.9,
random_state=None):
self.n_estimators = n_estimators
self.radius = radius
self.random_state = random_state
self.n_candidates = n_candidates
self.n_neighbors = n_neighbors
self.min_hash_match = min_hash_match
self.radius_cutoff_ratio = radius_cutoff_ratio
def _compute_distances(self, query, candidates):
"""Computes the cosine distance.
Distance is from the query to points in the candidates array.
Returns argsort of distances in the candidates
array and sorted distances.
"""
if candidates.shape == (0,):
# needed since _fit_X[np.array([])] doesn't work if _fit_X sparse
return np.empty(0, dtype=np.int), np.empty(0, dtype=float)
if sparse.issparse(self._fit_X):
candidate_X = self._fit_X[candidates]
else:
candidate_X = self._fit_X.take(candidates, axis=0, mode='clip')
distances = pairwise_distances(query, candidate_X,
metric='cosine')[0]
distance_positions = np.argsort(distances)
distances = distances.take(distance_positions, mode='clip', axis=0)
return distance_positions, distances
def _generate_masks(self):
"""Creates left and right masks for all hash lengths."""
tri_size = MAX_HASH_SIZE + 1
# Called once on fitting, output is independent of hashes
left_mask = np.tril(np.ones((tri_size, tri_size), dtype=int))[:, 1:]
right_mask = left_mask[::-1, ::-1]
self._left_mask = np.packbits(left_mask).view(dtype=HASH_DTYPE)
self._right_mask = np.packbits(right_mask).view(dtype=HASH_DTYPE)
def _get_candidates(self, query, max_depth, bin_queries, n_neighbors):
"""Performs the Synchronous ascending phase.
Returns an array of candidates, their distance ranks and
distances.
"""
index_size = self._fit_X.shape[0]
# Number of candidates considered including duplicates
# XXX: not sure whether this is being calculated correctly wrt
# duplicates from different iterations through a single tree
n_candidates = 0
candidate_set = set()
min_candidates = self.n_candidates * self.n_estimators
while (max_depth > self.min_hash_match and
(n_candidates < min_candidates or
len(candidate_set) < n_neighbors)):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
n_candidates += stop - start
candidate_set.update(
self.original_indices_[i][start:stop].tolist())
max_depth -= 1
candidates = np.fromiter(candidate_set, count=len(candidate_set),
dtype=np.intp)
# For insufficient candidates, candidates are filled.
# Candidates are filled from unselected indices uniformly.
if candidates.shape[0] < n_neighbors:
warnings.warn(
"Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (n_neighbors, self.min_hash_match))
remaining = np.setdiff1d(np.arange(0, index_size), candidates)
to_fill = n_neighbors - candidates.shape[0]
candidates = np.concatenate((candidates, remaining[:to_fill]))
ranks, distances = self._compute_distances(query,
candidates.astype(int))
return (candidates[ranks[:n_neighbors]],
distances[:n_neighbors])
def _get_radius_neighbors(self, query, max_depth, bin_queries, radius):
"""Finds radius neighbors from the candidates obtained.
Their distances from query are smaller than radius.
Returns radius neighbors and distances.
"""
ratio_within_radius = 1
threshold = 1 - self.radius_cutoff_ratio
total_candidates = np.array([], dtype=int)
total_neighbors = np.array([], dtype=int)
total_distances = np.array([], dtype=float)
while (max_depth > self.min_hash_match and
ratio_within_radius > threshold):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
candidates = []
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
candidates.extend(
self.original_indices_[i][start:stop].tolist())
candidates = np.setdiff1d(candidates, total_candidates)
total_candidates = np.append(total_candidates, candidates)
ranks, distances = self._compute_distances(query, candidates)
m = np.searchsorted(distances, radius, side='right')
positions = np.searchsorted(total_distances, distances[:m])
total_neighbors = np.insert(total_neighbors, positions,
candidates[ranks[:m]])
total_distances = np.insert(total_distances, positions,
distances[:m])
ratio_within_radius = (total_neighbors.shape[0] /
float(total_candidates.shape[0]))
max_depth = max_depth - 1
return total_neighbors, total_distances
def fit(self, X, y=None):
"""Fit the LSH forest on the data.
This creates binary hashes of input data points by getting the
dot product of input points and hash_function then
transforming the projection into a binary string array based
on the sign (positive/negative) of the projection.
A sorted array of binary hashes is created.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self : object
Returns self.
"""
self._fit_X = check_array(X, accept_sparse='csr')
# Creates a g(p,x) for each tree
self.hash_functions_ = []
self.trees_ = []
self.original_indices_ = []
rng = check_random_state(self.random_state)
int_max = np.iinfo(np.int32).max
for i in range(self.n_estimators):
# This is g(p,x) for a particular tree.
# Builds a single tree. Hashing is done on an array of data points.
# `GaussianRandomProjection` is used for hashing.
# `n_components=hash size and n_features=n_dim.
hasher = GaussianRandomProjectionHash(MAX_HASH_SIZE,
rng.randint(0, int_max))
hashes = hasher.fit_transform(self._fit_X)[:, 0]
original_index = np.argsort(hashes)
bin_hashes = hashes[original_index]
self.original_indices_.append(original_index)
self.trees_.append(bin_hashes)
self.hash_functions_.append(hasher)
self._generate_masks()
return self
def _query(self, X):
"""Performs descending phase to find maximum depth."""
# Calculate hashes of shape (n_samples, n_estimators, [hash_size])
bin_queries = np.asarray([hasher.transform(X)[:, 0]
for hasher in self.hash_functions_])
bin_queries = np.rollaxis(bin_queries, 1)
# descend phase
depths = [_find_longest_prefix_match(tree, tree_queries, MAX_HASH_SIZE,
self._left_mask, self._right_mask)
for tree, tree_queries in zip(self.trees_,
np.rollaxis(bin_queries, 1))]
return bin_queries, np.max(depths, axis=0)
def kneighbors(self, X, n_neighbors=None, return_distance=True):
"""Returns n_neighbors of approximate nearest neighbors.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
n_neighbors : int, opitonal (default = None)
Number of neighbors required. If not provided, this will
return the number specified at the initialization.
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples, n_neighbors)
Array representing the cosine distances to each point,
only present if return_distance=True.
ind : array, shape (n_samples, n_neighbors)
Indices of the approximate nearest points in the population
matrix.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_candidates(X[[i]], max_depth[i],
bin_queries[i],
n_neighbors)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return np.array(distances), np.array(neighbors)
else:
return np.array(neighbors)
def radius_neighbors(self, X, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of some points from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
LSH Forest being an approximate method, some true neighbors from the
indexed dataset might be missing from the results.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples,) of arrays
Each element is an array representing the cosine distances
to some points found within ``radius`` of the respective query.
Only present if ``return_distance=True``.
ind : array, shape (n_samples,) of arrays
Each element is an array of indices for neighbors within ``radius``
of the respective query.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if radius is None:
radius = self.radius
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_radius_neighbors(X[[i]], max_depth[i],
bin_queries[i], radius)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return _array_of_arrays(distances), _array_of_arrays(neighbors)
else:
return _array_of_arrays(neighbors)
def partial_fit(self, X, y=None):
"""
Inserts new data into the already fitted LSH Forest.
Cost is proportional to new total size, so additions
should be batched.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
New data point to be inserted into the LSH Forest.
"""
X = check_array(X, accept_sparse='csr')
if not hasattr(self, 'hash_functions_'):
return self.fit(X)
if X.shape[1] != self._fit_X.shape[1]:
raise ValueError("Number of features in X and"
" fitted array does not match.")
n_samples = X.shape[0]
n_indexed = self._fit_X.shape[0]
for i in range(self.n_estimators):
bin_X = self.hash_functions_[i].transform(X)[:, 0]
# gets the position to be added in the tree.
positions = self.trees_[i].searchsorted(bin_X)
# adds the hashed value into the tree.
self.trees_[i] = np.insert(self.trees_[i],
positions, bin_X)
# add the entry into the original_indices_.
self.original_indices_[i] = np.insert(self.original_indices_[i],
positions,
np.arange(n_indexed,
n_indexed +
n_samples))
# adds the entry into the input_array.
if sparse.issparse(X) or sparse.issparse(self._fit_X):
self._fit_X = sparse.vstack((self._fit_X, X))
else:
self._fit_X = np.row_stack((self._fit_X, X))
return self
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
:py:mod:`feature_test.py` - Feature testing
-------------------------------------------
A simple test that plots a bunch of different light curves.
The output should be similar to this figure:
.. figure:: ../pysyzygy/img/feature_test.png
:width: 600px
:align: center
:height: 100px
:alt: alternate text
:figclass: align-center
'''
from __future__ import division, print_function, absolute_import, unicode_literals
import pysyzygy as ps
import numpy as np
import matplotlib.pyplot as pl
if __name__ == '__main__':
fig, ax = pl.subplots(5, 5, figsize = (24,16))
fig.subplots_adjust(wspace = 0, hspace = 0, left = 0.01, right = 0.99, bottom = 0.01, top = 0.99)
ax = ax.flatten()
for axis in ax:
axis.xaxis.set_ticklabels([])
axis.yaxis.set_ticklabels([])
# Different radii
time = np.linspace(-0.5,0.5,1000)
for RpRs in [0.1, 0.09, 0.08]:
trn = ps.Transit(RpRs = RpRs)
ax[0].plot(time, trn(time), label = 'RpRs = %.2f' % RpRs)
ax[0].legend(loc = 'lower left', fontsize = 8)
ax[0].margins(0.,0.2)
ax[0].annotate('DIFFERENT RADII', xy = (0.05, 0.9), xycoords = 'axes fraction')
# Different periods
time = np.linspace(-0.5,0.5,1000)
for per in [10., 20., 30.]:
trn = ps.Transit(per = per)
ax[1].plot(time, trn(time), label = 'per = %.0f' % per)
ax[1].legend(loc = 'lower left', fontsize = 8)
ax[1].margins(0.,0.2)
ax[1].annotate('DIFFERENT PERIODS', xy = (0.05, 0.9), xycoords = 'axes fraction')
# Different impact params
time = np.linspace(-0.5,0.5,1000)
for b in [0., 0.25, 0.5, 0.75, 0.99]:
trn = ps.Transit(b = b)
ax[2].plot(time, trn(time), label = 'b = %.2f' % b)
ax[2].legend(loc = 'lower left', fontsize = 8)
ax[2].margins(0.,0.2)
ax[2].annotate('DIFFERENT IMPACT', xy = (0.05, 0.9), xycoords = 'axes fraction')
# Different densities
time = np.linspace(-0.5,0.5,1000)
for rhos in [1.4, 10.0, 0.1]:
trn = ps.Transit(rhos = rhos)
ax[3].plot(time, trn(time), label = 'rhos = %.2f' % rhos)
ax[3].legend(loc = 'lower left', fontsize = 8)
ax[3].margins(0.,0.2)
ax[3].annotate('DIFFERENT STELLAR DENSITIES', xy = (0.05, 0.9), xycoords = 'axes fraction')
# Different semi-major axes
time = np.linspace(-0.5,0.5,1000)
for aRs in [19.5, 5., 50.]:
trn = ps.Transit(aRs = aRs)
ax[4].plot(time, trn(time), label = 'aRs = %.2f' % aRs)
ax[4].legend(loc = 'lower left', fontsize = 8)
ax[4].margins(0.,0.2)
ax[4].annotate('DIFFERENT SEMI-MAJOR AXES', xy = (0.05, 0.9), xycoords = 'axes fraction')
# Different masses
time = np.linspace(-0.5,0.5,1000)
for MpMs in [0., 0.1, 1.]:
trn = ps.Transit(MpMs = MpMs)
ax[5].plot(time, trn(time), label = 'MpMs = %.2f' % MpMs)
ax[5].legend(loc = 'lower left', fontsize = 8)
ax[5].margins(0.,0.2)
ax[5].annotate('DIFFERENT MASSES', xy = (0.05, 0.9), xycoords = 'axes fraction')
# Different eccentricities
time = np.linspace(-0.5,0.5,1000)
for ecc in [0., 0.25, 0.5, 0.75, 0.9]:
trn = ps.Transit(ecc = ecc)
ax[6].plot(time, trn(time), label = 'ecc = %.2f' % ecc)
ax[6].legend(loc = 'lower left', fontsize = 8)
ax[6].margins(0.,0.2)
ax[6].annotate('DIFFERENT ECCENTRICITIES', xy = (0.05, 0.9), xycoords = 'axes fraction')
# Different omegas
time = np.linspace(-0.5,0.5,1000)
for w in [0., np.pi/8, np.pi/4, np.pi/3, np.pi/2]:
trn = ps.Transit(aRs = 5, ecc = 0.75, w = w)
ax[7].plot(time, trn(time), label = 'w = %.2f' % w)
ax[7].legend(loc = 'lower left', fontsize = 8)
ax[7].margins(0.,0.2)
ax[7].annotate('DIFFERENT OMEGAS', xy = (0.05, 0.9), xycoords = 'axes fraction')
# Different esw/ecw
time = np.linspace(-0.5,0.5,1000)
for esw, ecw in zip([0., 0.25, -0.25, 0.5], [0., -0.25, 0.5, 0.25]):
trn = ps.Transit(aRs = 10, esw = esw, ecw = ecw)
ax[8].plot(time, trn(time), label = 'esw = %.2f, ecw = %.2f' % (esw, ecw))
ax[8].legend(loc = 'lower left', fontsize = 8)
ax[8].margins(0.,0.2)
ax[8].annotate('DIFFERENT esinw/ecosw', xy = (0.05, 0.9), xycoords = 'axes fraction')
# Different limb darkening 1
time = np.linspace(-0.5,0.5,1000)
for u1, u2 in zip([0., 0.25, 0.5, 0.75], [0., 0.25, -0.5, -0.25]):
trn = ps.Transit(aRs = 5, u1 = u1, u2 = u2)
ax[9].plot(time, trn(time), label = 'u1 = %.2f, u2 = %.2f' % (u1, u2))
ax[9].legend(loc = 'upper center', fontsize = 8)
ax[9].margins(0.,0.2)
ax[9].annotate('DIFFERENT LD', xy = (0.05, 0.9), xycoords = 'axes fraction')
# Different limb darkening 2
time = np.linspace(-0.5,0.5,1000)
for q1, q2 in zip([0., 0.25, 0.5, 0.75], [0., 0.5, 0.25, 0.1]):
trn = ps.Transit(aRs = 5, q1 = q1, q2 = q2)
ax[10].plot(time, trn(time), label = 'q1 = %.2f, q2 = %.2f' % (q1, q2))
ax[10].legend(loc = 'upper center', fontsize = 8)
ax[10].margins(0.,0.2)
ax[10].annotate('DIFFERENT LD', xy = (0.05, 0.9), xycoords = 'axes fraction')
# Different t0
time = np.linspace(-0.5,0.5,1000)
for t0 in [-0.1, 0., 0.1]:
trn = ps.Transit(t0 = t0)
ax[11].plot(time, trn(time), label = 't0 = %.2f' % t0)
ax[11].legend(loc = 'lower left', fontsize = 8)
ax[11].margins(0.,0.2)
ax[11].annotate('DIFFERENT t0', xy = (0.05, 0.9), xycoords = 'axes fraction')
# Different times
time = np.linspace(-0.5,0.5,1000)
for times in [[-0.3, 0.3], [-0.4, 0., 0.4], [0.]]:
trn = ps.Transit(times = times)
ax[12].plot(time, trn(time), label = 'times = %s' % times)
ax[12].legend(loc = 'lower left', fontsize = 8)
ax[12].margins(0.,0.2)
ax[12].annotate('DIFFERENT TRANSIT TIMES', xy = (0.05, 0.9), xycoords = 'axes fraction')
# Different exposure times
time = np.linspace(-0.5,0.5,1000)
for exptime in [(1765.5/86400.), 10 * (1765.5/86400.), 0.2 * (1765.5/86400.)]:
trn = ps.Transit(aRs = 5, exptime = exptime)
ax[13].plot(time, trn(time), label = 'exptime = %.3f' % exptime)
ax[13].legend(loc = 'lower left', fontsize = 8)
ax[13].margins(0.,0.2)
ax[13].annotate('DIFFERENT EXP TIMES', xy = (0.05, 0.9), xycoords = 'axes fraction')
# Different number of exposure points
time = np.linspace(-0.5,0.5,1000)
for exppts in [4, 10, 100]:
trn = ps.Transit(aRs = 5, exptime = 0.1, exppts = exppts)
ax[14].plot(time, trn(time), label = 'exppts = %d' % exppts)
ax[14].legend(loc = 'lower left', fontsize = 8)
ax[14].margins(0.,0.2)
ax[14].annotate('DIFFERENT NUMBER OF EXP PTS', xy = (0.05, 0.9), xycoords = 'axes fraction')
# Different binning method
time = np.linspace(-0.5,0.5,1000)
for binmethod in [ps.RIEMANN, ps.TRAPEZOID]:
trn = ps.Transit(aRs = 5, binmethod = binmethod, exppts = 4, exptime = 0.1)
ax[15].plot(time, trn(time), label = 'binmethod = %d' % binmethod)
ax[15].legend(loc = 'lower left', fontsize = 8)
ax[15].margins(0.,0.2)
ax[15].annotate('DIFFERENT BIN METHOD', xy = (0.05, 0.9), xycoords = 'axes fraction')
# Different integration method (curves should be identical)
time = np.linspace(-0.5,0.5,1000)
for intmethod in [ps.SMARTINT, ps.SLOWINT]:
trn = ps.Transit(aRs = 5, intmethod = intmethod, exppts = 4, exptime = 0.1)
ax[16].plot(time, trn(time), label = 'intmethod = %d' % intmethod)
ax[16].legend(loc = 'lower left', fontsize = 8)
ax[16].margins(0.,0.2)
ax[16].annotate('SHOULD BE IDENTICAL', xy = (0.05, 0.9), xycoords = 'axes fraction')
# Different kepler solver (curves should be identical)
time = np.linspace(-0.5,0.5,1000)
for kepsolver in [ps.MDFAST, ps.NEWTON]:
trn = ps.Transit(aRs = 5, ecc = 0.75, w = np.pi/10, kepsolver = kepsolver)
ax[17].plot(time, trn(time), label = 'kepsolver = %d' % kepsolver)
ax[17].legend(loc = 'lower left', fontsize = 8)
ax[17].margins(0.,0.2)
ax[17].annotate('SHOULD BE IDENTICAL', xy = (0.05, 0.9), xycoords = 'axes fraction')
# Binned/unbinned flux
time = np.linspace(-0.5,0.5,1000)
for param in ['binned', 'unbinned']:
trn = ps.Transit(exptime = 0.1)
ax[18].plot(time, trn(time, param = param), label = 'param = %s' % param)
ax[18].legend(loc = 'lower left', fontsize = 8)
ax[18].margins(0.,0.2)
ax[18].annotate('BINNED/UNBINNED', xy = (0.05, 0.9), xycoords = 'axes fraction')
# Mean anomaly / eccentric anomaly / true anomaly
time = np.linspace(-6,6,1000)
trn = ps.Transit(per = 5., ecc = 0.4, fullorbit = True, maxpts = 20000)
for param in ['M', 'E', 'f']:
ax[19].plot(time, trn(time, param = param), label = 'param = %s' % param)
ax[19].legend(loc = 'lower left', fontsize = 8)
ax[19].margins(0.,0.2)
ax[19].annotate('ANOMALIES', xy = (0.05, 0.9), xycoords = 'axes fraction')
# radius / impact parameter
time = np.linspace(-6,6,1000)
trn = ps.Transit(per = 5., ecc = 0.4, fullorbit = True, maxpts = 20000)
for param in ['r', 'b']:
ax[20].plot(time, trn(time, param = param), label = 'param = %s' % param)
ax[20].legend(loc = 'lower left', fontsize = 8)
ax[20].margins(0.,0.2)
ax[20].annotate('r and b', xy = (0.05, 0.9), xycoords = 'axes fraction')
# xyz
time = np.linspace(-5,5,1000)
trn = ps.Transit(ecc = 0.5, w = np.pi/8, b = 0.5, per = 5., fullorbit = True, maxpts = 20000)
ax[21].plot(trn(time, 'x'), trn(time, 'y'), label = 'XY')
ax[21].plot(trn(time, 'x'), trn(time, 'z'), label = 'XZ')
ax[21].plot(trn(time, 'y'), trn(time, 'z'), label = 'YZ')
ax[21].legend(loc = 'lower left', fontsize = 8)
ax[21].margins(0.,0.2)
ax[21].annotate('ORBIT', xy = (0.05, 0.9), xycoords = 'axes fraction')
# Different omega, top view orbit
time = np.linspace(-5,5,1000)
for w in np.arange(4) * np.pi / 4:
trn = ps.Transit(w = w, ecc = 0.5, b = 0., per = 5., fullorbit = True, maxpts = 20000)
ax[22].plot(trn(time, 'x'), trn(time, 'z'), label = r'w = %.0f$^\circ$' % (w * 180./np.pi))
ax[22].axvline(0, alpha = 0.5)
ax[22].axhline(0, alpha = 0.5)
ax[22].legend(loc = 'lower left', fontsize = 8)
ax[22].margins(0.,0.2)
ax[22].annotate('TOP VIEW', xy = (0.05, 0.9), xycoords = 'axes fraction')
# Show the plot
pl.show()
|
|
#-*- coding: utf-8 -*-
# Copyright 2010 Bastian Bowe
#
# This file is part of JayDeBeApi.
# JayDeBeApi is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# JayDeBeApi is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with JayDeBeApi. If not, see
# <http://www.gnu.org/licenses/>.
import datetime
import exceptions
import time
import re
import sys
_jdbc_connect = None
_java_array_byte = None
def _jdbc_connect_jython(jclassname, *args):
if _converters is None:
from java.sql import Types
types = Types
types_map = {}
const_re = re.compile('[A-Z][A-Z_]*$')
for i in dir(types):
if const_re.match(i):
types_map[i] = getattr(types, i)
_init_converters(types_map)
# register driver for DriverManager
__import__(jclassname)
global _java_array_byte
if _java_array_byte is None:
import jarray
def _java_array_byte(data):
return jarray.array(data, 'b')
from java.sql import DriverManager
return DriverManager.getConnection(*args)
def _prepare_jython():
global _jdbc_connect
_jdbc_connect = _jdbc_connect_jython
def _jdbc_connect_jpype(jclassname, *args):
import jpype
if not jpype.isJVMStarted():
jpype.startJVM(jpype.getDefaultJVMPath())
if not jpype.isThreadAttachedToJVM():
jpype.attachThreadToJVM()
if _converters is None:
types = jpype.java.sql.Types
types_map = {}
for i in types.__javaclass__.getClassFields():
types_map[i.getName()] = i.getStaticAttribute()
_init_converters(types_map)
global _java_array_byte
if _java_array_byte is None:
def _java_array_byte(data):
return jpype.JArray(jpype.JByte, 1)(data)
# register driver for DriverManager
jpype.JClass(jclassname)
return jpype.java.sql.DriverManager.getConnection(*args)
def _prepare_jpype():
global _jdbc_connect
_jdbc_connect = _jdbc_connect_jpype
if sys.platform.lower().startswith('java'):
_prepare_jython()
else:
_prepare_jpype()
apilevel = '2.0'
threadsafety = 1
paramstyle = 'qmark'
class DBAPITypeObject(object):
def __init__(self,*values):
self.values = values
def __cmp__(self,other):
if other in self.values:
return 0
if other < self.values:
return 1
else:
return -1
STRING = DBAPITypeObject("CHARACTER", "CHAR", "VARCHAR",
"CHARACTER VARYING", "CHAR VARYING", "STRING",)
TEXT = DBAPITypeObject("CLOB", "CHARACTER LARGE OBJECT",
"CHAR LARGE OBJECT", "XML",)
BINARY = DBAPITypeObject("BLOB", "BINARY LARGE OBJECT",)
NUMBER = DBAPITypeObject("INTEGER", "INT", "SMALLINT", "BIGINT",)
FLOAT = DBAPITypeObject("FLOAT", "REAL", "DOUBLE", "DECFLOAT")
DECIMAL = DBAPITypeObject("DECIMAL", "DEC", "NUMERIC", "NUM",)
DATE = DBAPITypeObject("DATE",)
TIME = DBAPITypeObject("TIME",)
DATETIME = DBAPITypeObject("TIMESTAMP",)
ROWID = DBAPITypeObject(())
# DB-API 2.0 Module Interface Exceptions
class Error(exceptions.StandardError):
pass
class Warning(exceptions.StandardError):
pass
class InterfaceError(Error):
pass
class DatabaseError(Error):
pass
class InternalError(DatabaseError):
pass
class OperationalError(DatabaseError):
pass
class ProgrammingError(DatabaseError):
pass
class IntegrityError(DatabaseError):
pass
class DataError(DatabaseError):
pass
class NotSupportedError(DatabaseError):
pass
# DB-API 2.0 Type Objects and Constructors
def _java_sql_blob(data):
return _java_array_byte(data)
Binary = _java_sql_blob
def _str_func(func):
def to_str(*parms):
return str(func(*parms))
return to_str
Date = _str_func(datetime.date)
Time = _str_func(datetime.time)
Timestamp = _str_func(datetime.datetime)
def DateFromTicks(ticks):
return apply(Date, time.localtime(ticks)[:3])
def TimeFromTicks(ticks):
return apply(Time, time.localtime(ticks)[3:6])
def TimestampFromTicks(ticks):
return apply(Timestamp, time.localtime(ticks)[:6])
# DB-API 2.0 Module Interface connect constructor
def connect(jclassname, *args):
jconn = _jdbc_connect(jclassname, *args)
return Connection(jconn)
# DB-API 2.0 Connection Object
class Connection(object):
jconn = None
def __init__(self, jconn):
self.jconn = jconn
def close(self):
self.jconn.close()
def commit(self):
return self.jconn.commit()
def rollback(self):
return self.jconn.rollback()
def cursor(self):
return Cursor(self)
# DB-API 2.0 Cursor Object
class Cursor(object):
rowcount = -1
_meta = None
_prep = None
_rs = None
_description = None
def __init__(self, connection):
self._connection = connection
@property
def description(self):
if self._description:
return self._description
m = self._meta
if m:
count = m.getColumnCount()
self._description = []
for col in range(1, count + 1):
size = m.getColumnDisplaySize(col)
col_desc = ( m.getColumnName(col),
m.getColumnTypeName(col),
size,
size,
m.getPrecision(col),
m.getScale(col),
m.isNullable(col),
)
self._description.append(col_desc)
return self._description
# optional callproc(self, procname, *parameters) unsupported
def close(self):
self._close_last()
self.connection = None
def _close_last(self):
"""Close the resultset and reset collected meta data.
"""
if self._rs:
self._rs.close()
if self._prep:
self._prep.close()
self._rs = None
self._meta = None
self._description = None
# TODO: this is a possible way to close the open result sets
# but I'm not sure when __del__ will be called
__del__ = _close_last
def _set_stmt_parms(self, prep_stmt, parameters):
for i in range(len(parameters)):
# print (i, parameters[i], type(parameters[i]))
prep_stmt.setObject(i + 1, parameters[i])
def execute(self, operation, parameters=None):
if not parameters:
parameters = ()
self._close_last()
self._prep = self._connection.jconn.prepareStatement(operation)
self._set_stmt_parms(self._prep, parameters)
is_rs = self._prep.execute()
self.update_count = self._prep.getUpdateCount()
if is_rs:
self._rs = self._prep.getResultSet()
self._meta = self._rs.getMetaData()
# self._prep.getWarnings() ???
def executemany(self, operation, seq_of_parameters):
self._close_last()
self._prep = self._connection.jconn.prepareStatement(operation)
for parameters in seq_of_parameters:
self._set_stmt_parms(self._prep, parameters)
self._prep.addBatch()
update_counts = self._prep.executeBatch()
# self._prep.getWarnings() ???
self.rowcount = sum(update_counts)
self._close_last()
def fetchone(self):
#raise if not rs
if not self._rs.next():
return None
row = []
for col in range(1, self._meta.getColumnCount() + 1):
sqltype = self._meta.getColumnType(col)
# print sqltype
# TODO: Oracle 11 will read a oracle.sql.TIMESTAMP
# which can't be converted to string easyly
v = self._rs.getObject(col)
if v:
converter = _converters.get(sqltype)
if converter:
v = converter(v)
row.append(v)
return tuple(row)
def fetchmany(self, size=None):
if size is None:
size = self.arraysize
# TODO: handle SQLException if not supported by db
self._rs.setFetchSize(size)
rows = []
row = None
for i in xrange(size):
row = self.fetchone()
if row is None:
break
else:
rows.append(row)
# reset fetch size
if row:
# TODO: handle SQLException if not supported by db
self._rs.setFetchSize(0)
return rows
def fetchall(self):
rows = []
while True:
row = self.fetchone()
if row is None:
break
else:
rows.append(row)
return rows
# optional nextset() unsupported
arraysize = 1
def setinputsizes(self, sizes):
pass
def setoutputsize(self, size, column):
pass
def _to_datetime(java_val):
#d = datetime.datetime.strptime(str(java_val)[:19], "%Y-%m-%d %H:%M:%S")
#if not isinstance(java_val, basestring):
# d = d.replace(microsecond=int(str(java_val.getNanos())[:6]))
#return str(d)
return str(java_val)
def _to_date(java_val):
#d = datetime.datetime.strptime(str(java_val)[:10], "%Y-%m-%d")
#return d.strftime("%Y-%m-%d")
return str(java_val)
def _to_string(java_val):
return str(java_val)
def _java_to_py(java_method):
def to_py(java_val):
if isinstance(java_val, (basestring, int, long, float, bool)):
return java_val
return getattr(java_val, java_method)()
return to_py
_to_double = _java_to_py('doubleValue')
_to_int = _java_to_py('intValue')
def _init_converters(types_map):
"""Prepares the converters for conversion of java types to python
objects.
types_map: Mapping of java.sql.Types field name to java.sql.Types
field constant value"""
global _converters
_converters = {}
for i in _DEFAULT_CONVERTERS:
const_val = types_map[i]
_converters[const_val] = _DEFAULT_CONVERTERS[i]
# Mapping from java.sql.Types field to converter method
_converters = None
_DEFAULT_CONVERTERS = {
# see
# http://download.oracle.com/javase/1.4.2/docs/api/java/sql/Types.html
# for possible keys
'TIMESTAMP': _to_datetime,
'DATE': _to_date,
'BINARY': _to_string,
'DECIMAL': _to_double,
'NUMERIC': _to_double,
'DOUBLE': _to_double,
'FLOAT': _to_double,
'INTEGER': _to_int,
'SMALLINT': _to_int,
'BOOLEAN': _java_to_py('booleanValue'),
}
|
|
"""Test homekit_controller stateless triggers."""
from aiohomekit.model.characteristics import CharacteristicsTypes
from aiohomekit.model.services import ServicesTypes
import pytest
import homeassistant.components.automation as automation
from homeassistant.components.homekit_controller.const import DOMAIN
from homeassistant.setup import async_setup_component
from tests.common import (
assert_lists_same,
async_get_device_automations,
async_mock_service,
)
from tests.components.blueprint.conftest import stub_blueprint_populate # noqa: F401
from tests.components.homekit_controller.common import setup_test_component
# pylint: disable=redefined-outer-name
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
def create_remote(accessory):
"""Define characteristics for a button (that is inn a group)."""
service_label = accessory.add_service(ServicesTypes.SERVICE_LABEL)
char = service_label.add_char(CharacteristicsTypes.SERVICE_LABEL_NAMESPACE)
char.value = 1
for i in range(4):
button = accessory.add_service(ServicesTypes.STATELESS_PROGRAMMABLE_SWITCH)
button.linked.append(service_label)
char = button.add_char(CharacteristicsTypes.INPUT_EVENT)
char.value = 0
char.perms = ["pw", "pr", "ev"]
char = button.add_char(CharacteristicsTypes.NAME)
char.value = f"Button {i + 1}"
char = button.add_char(CharacteristicsTypes.SERVICE_LABEL_INDEX)
char.value = i
battery = accessory.add_service(ServicesTypes.BATTERY_SERVICE)
battery.add_char(CharacteristicsTypes.BATTERY_LEVEL)
def create_button(accessory):
"""Define a button (that is not in a group)."""
button = accessory.add_service(ServicesTypes.STATELESS_PROGRAMMABLE_SWITCH)
char = button.add_char(CharacteristicsTypes.INPUT_EVENT)
char.value = 0
char.perms = ["pw", "pr", "ev"]
char = button.add_char(CharacteristicsTypes.NAME)
char.value = "Button 1"
battery = accessory.add_service(ServicesTypes.BATTERY_SERVICE)
battery.add_char(CharacteristicsTypes.BATTERY_LEVEL)
def create_doorbell(accessory):
"""Define a button (that is not in a group)."""
button = accessory.add_service(ServicesTypes.DOORBELL)
char = button.add_char(CharacteristicsTypes.INPUT_EVENT)
char.value = 0
char.perms = ["pw", "pr", "ev"]
char = button.add_char(CharacteristicsTypes.NAME)
char.value = "Doorbell"
battery = accessory.add_service(ServicesTypes.BATTERY_SERVICE)
battery.add_char(CharacteristicsTypes.BATTERY_LEVEL)
async def test_enumerate_remote(hass, utcnow):
"""Test that remote is correctly enumerated."""
await setup_test_component(hass, create_remote)
entity_registry = await hass.helpers.entity_registry.async_get_registry()
entry = entity_registry.async_get("sensor.testdevice_battery")
device_registry = await hass.helpers.device_registry.async_get_registry()
device = device_registry.async_get(entry.device_id)
expected = [
{
"device_id": device.id,
"domain": "sensor",
"entity_id": "sensor.testdevice_battery",
"platform": "device",
"type": "battery_level",
}
]
for button in ("button1", "button2", "button3", "button4"):
for subtype in ("single_press", "double_press", "long_press"):
expected.append(
{
"device_id": device.id,
"domain": "homekit_controller",
"platform": "device",
"type": button,
"subtype": subtype,
}
)
triggers = await async_get_device_automations(hass, "trigger", device.id)
assert_lists_same(triggers, expected)
async def test_enumerate_button(hass, utcnow):
"""Test that a button is correctly enumerated."""
await setup_test_component(hass, create_button)
entity_registry = await hass.helpers.entity_registry.async_get_registry()
entry = entity_registry.async_get("sensor.testdevice_battery")
device_registry = await hass.helpers.device_registry.async_get_registry()
device = device_registry.async_get(entry.device_id)
expected = [
{
"device_id": device.id,
"domain": "sensor",
"entity_id": "sensor.testdevice_battery",
"platform": "device",
"type": "battery_level",
}
]
for subtype in ("single_press", "double_press", "long_press"):
expected.append(
{
"device_id": device.id,
"domain": "homekit_controller",
"platform": "device",
"type": "button1",
"subtype": subtype,
}
)
triggers = await async_get_device_automations(hass, "trigger", device.id)
assert_lists_same(triggers, expected)
async def test_enumerate_doorbell(hass, utcnow):
"""Test that a button is correctly enumerated."""
await setup_test_component(hass, create_doorbell)
entity_registry = await hass.helpers.entity_registry.async_get_registry()
entry = entity_registry.async_get("sensor.testdevice_battery")
device_registry = await hass.helpers.device_registry.async_get_registry()
device = device_registry.async_get(entry.device_id)
expected = [
{
"device_id": device.id,
"domain": "sensor",
"entity_id": "sensor.testdevice_battery",
"platform": "device",
"type": "battery_level",
}
]
for subtype in ("single_press", "double_press", "long_press"):
expected.append(
{
"device_id": device.id,
"domain": "homekit_controller",
"platform": "device",
"type": "doorbell",
"subtype": subtype,
}
)
triggers = await async_get_device_automations(hass, "trigger", device.id)
assert_lists_same(triggers, expected)
async def test_handle_events(hass, utcnow, calls):
"""Test that events are handled."""
helper = await setup_test_component(hass, create_remote)
entity_registry = await hass.helpers.entity_registry.async_get_registry()
entry = entity_registry.async_get("sensor.testdevice_battery")
device_registry = await hass.helpers.device_registry.async_get_registry()
device = device_registry.async_get(entry.device_id)
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"alias": "single_press",
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device.id,
"type": "button1",
"subtype": "single_press",
},
"action": {
"service": "test.automation",
"data_template": {
"some": (
"{{ trigger.platform}} - "
"{{ trigger.type }} - {{ trigger.subtype }}"
)
},
},
},
{
"alias": "long_press",
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device.id,
"type": "button2",
"subtype": "long_press",
},
"action": {
"service": "test.automation",
"data_template": {
"some": (
"{{ trigger.platform}} - "
"{{ trigger.type }} - {{ trigger.subtype }}"
)
},
},
},
]
},
)
# Make sure first automation (only) fires for single press
helper.pairing.testing.update_named_service(
"Button 1", {CharacteristicsTypes.INPUT_EVENT: 0}
)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "device - button1 - single_press"
# Make sure automation doesn't trigger for long press
helper.pairing.testing.update_named_service(
"Button 1", {CharacteristicsTypes.INPUT_EVENT: 1}
)
await hass.async_block_till_done()
assert len(calls) == 1
# Make sure automation doesn't trigger for double press
helper.pairing.testing.update_named_service(
"Button 1", {CharacteristicsTypes.INPUT_EVENT: 2}
)
await hass.async_block_till_done()
assert len(calls) == 1
# Make sure second automation fires for long press
helper.pairing.testing.update_named_service(
"Button 2", {CharacteristicsTypes.INPUT_EVENT: 2}
)
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "device - button2 - long_press"
# Turn the automations off
await hass.services.async_call(
"automation",
"turn_off",
{"entity_id": "automation.long_press"},
blocking=True,
)
await hass.services.async_call(
"automation",
"turn_off",
{"entity_id": "automation.single_press"},
blocking=True,
)
# Make sure event no longer fires
helper.pairing.testing.update_named_service(
"Button 2", {CharacteristicsTypes.INPUT_EVENT: 2}
)
await hass.async_block_till_done()
assert len(calls) == 2
|
|
# coding: utf-8
from __future__ import unicode_literals
import json
from .base_object import BaseObject
from boxsdk.config import API
from boxsdk.exception import BoxAPIException
class Item(BaseObject):
"""Box API endpoint for interacting with files and folders."""
def _get_accelerator_upload_url(self, file_id=None):
"""
Make an API call to get the Accelerator upload url for either upload a new file or updating an existing file.
:param file_id:
Box id of the file to be uploaded. Not required for new file uploads.
:type file_id:
`unicode` or None
:return:
The Accelerator upload url or None if cannot get the Accelerator upload url.
:rtype:
`unicode` or None
"""
endpoint = '{0}/content'.format(file_id) if file_id else 'content'
url = '{0}/files/{1}'.format(API.BASE_API_URL, endpoint)
try:
response_json = self._session.options(
url=url,
expect_json_response=True,
).json()
return response_json.get('upload_url', None)
except BoxAPIException:
return None
def _preflight_check(self, size, name=None, file_id=None, parent_id=None):
"""
Make an API call to check if certain file can be uploaded to Box or not.
(https://developers.box.com/docs/#files-preflight-check)
:param size:
The size of the file to be uploaded in bytes. Specify 0 for unknown file sizes.
:type size:
`int`
:param name:
The name of the file to be uploaded. This is optional if `file_id` is specified,
but required for new file uploads.
:type name:
`unicode`
:param file_id:
Box id of the file to be uploaded. Not required for new file uploads.
:type file_id:
`unicode`
:param parent_id:
The ID of the parent folder. Required only for new file uploads.
:type parent_id:
`unicode`
:raises:
:class:`BoxAPIException` when preflight check fails.
"""
endpoint = '{0}/content'.format(file_id) if file_id else 'content'
url = '{0}/files/{1}'.format(API.BASE_API_URL, endpoint)
data = {'size': size}
if name:
data['name'] = name
if parent_id:
data['parent'] = {'id': parent_id}
self._session.options(
url=url,
expect_json_response=False,
data=json.dumps(data),
)
def update_info(self, data, etag=None):
"""Baseclass override.
:param etag:
If specified, instruct the Box API to perform the update only if
the current version's etag matches.
:type etag:
`unicode` or None
:return:
The updated object.
Return a new object of the same type, without modifying the original object passed as self.
Construct the new object with all the default attributes that are returned from the endpoint.
:rtype:
:class:`BaseObject`
"""
# pylint:disable=arguments-differ
headers = {'If-Match': etag} if etag is not None else None
return super(Item, self).update_info(data, headers=headers)
def rename(self, name):
"""
Rename the item to a new name.
:param name:
The new name, you want the item to be renamed to.
:type name:
`unicode`
"""
data = {
'name': name,
}
return self.update_info(data)
def get(self, fields=None, etag=None):
"""Base class override.
:param etag:
If specified, instruct the Box API to get the info only if the current version's etag doesn't match.
:type etag:
`unicode` or None
:returns:
Information about the file or folder.
:rtype:
`dict`
:raises: :class:`BoxAPIException` if the specified etag matches the latest version of the item.
"""
# pylint:disable=arguments-differ
headers = {'If-None-Match': etag} if etag is not None else None
return super(Item, self).get(fields=fields, headers=headers)
def copy(self, parent_folder):
"""Copy the item to the given folder.
:param parent_folder:
The folder to which the item should be copied.
:type parent_folder:
:class:`Folder`
"""
url = self.get_url('copy')
data = {
'parent': {'id': parent_folder.object_id}
}
box_response = self._session.post(url, data=json.dumps(data))
response = box_response.json()
return self.__class__(
session=self._session,
object_id=response['id'],
response_object=response,
)
def move(self, parent_folder):
"""
Move the item to the given folder.
:param parent_folder:
The parent `Folder` object, where the item will be moved to.
:type parent_folder:
`Folder`
"""
data = {
'parent': {'id': parent_folder.object_id}
}
return self.update_info(data)
def get_shared_link(self, access=None, etag=None, unshared_at=None, allow_download=None, allow_preview=None, password=None):
"""Get a shared link for the item with the given access permissions.
:param access:
Determines who can access the shared link. May be open, company, or collaborators. If no access is
specified, the default access will be used.
:type access:
`unicode` or None
:param etag:
If specified, instruct the Box API to create the link only if the current version's etag matches.
:type etag:
`unicode` or None
:param unshared_at:
The date on which this link should be disabled. May only be set if the current user is not a free user
and has permission to set expiration dates.
:type unshared_at:
:class:`datetime.date` or None
:param allow_download:
Whether or not the item being shared can be downloaded when accessed via the shared link.
If this parameter is None, the default setting will be used.
:type allow_download:
`bool` or None
:param allow_preview:
Whether or not the item being shared can be previewed when accessed via the shared link.
If this parameter is None, the default setting will be used.
:type allow_preview:
`bool` or None
:param password:
The password required to view this link. If no password is specified then no password will be set.
Please notice that this is a premium feature, which might not be available to your app.
:type password:
`unicode` or None
:returns:
The URL of the shared link.
:rtype:
`unicode`
:raises: :class:`BoxAPIException` if the specified etag doesn't match the latest version of the item.
"""
data = {
'shared_link': {} if not access else {
'access': access
}
}
if unshared_at is not None:
data['shared_link']['unshared_at'] = unshared_at.isoformat()
if allow_download is not None or allow_preview is not None:
data['shared_link']['permissions'] = permissions = {}
if allow_download is not None:
permissions['can_download'] = allow_download
if allow_preview is not None:
permissions['can_preview'] = allow_preview
if password is not None:
data['shared_link']['password'] = password
item = self.update_info(data, etag=etag)
return item.shared_link['url']
def remove_shared_link(self, etag=None):
"""Delete the shared link for the item.
:param etag:
If specified, instruct the Box API to delete the link only if the current version's etag matches.
:type etag:
`unicode` or None
:returns:
Whether or not the update was successful.
:rtype:
`bool`
:raises: :class:`BoxAPIException` if the specified etag doesn't match the latest version of the item.
"""
data = {'shared_link': None}
item = self.update_info(data, etag=etag)
return item.shared_link is None
def delete(self, params=None, etag=None):
"""Delete the item.
:param params:
Additional parameters to send with the request.
:type params:
`dict`
:param etag:
If specified, instruct the Box API to delete the item only if the current version's etag matches.
:type etag:
`unicode` or None
:returns:
Whether or not the delete was successful.
:rtype:
`bool`
:raises: :class:`BoxAPIException` if the specified etag doesn't match the latest version of the item.
"""
headers = {'If-Match': etag} if etag is not None else None
return super(Item, self).delete(params, headers)
|
|
# Copyright 2010-2012 the SGC project developers.
# See the LICENSE file at the top-level directory of this distribution
# and at http://program.sambull.org/sgc/license.html.
"""
Scroll box. A container widget that provides scroll bars to be able to
view a larger widget.
"""
import pygame.mouse
from pygame.locals import *
from pygame import draw
from ._locals import *
from ._locals import special_case, modal_widgets
from .base_widget import Simple
class ScrollBox(Simple):
"""
Scroll Box
"""
_can_focus = True
_default_size = (300, 200)
_surf_flags = SRCALPHA
_settings_default = {"widget": None, "col": (118, 45, 215)}
_scroll_x = _scroll_y = None
_handle_x = _handle_y = None
def _config(self, **kwargs):
"""
widget: Widget that should be displayed in scroll box.
col: ``tuple`` (r,g,b) Colour used for scroll bars and handles.
"""
if "widget" in kwargs:
self._settings["widget"] = kwargs["widget"]
self._settings["widget"]._parent = self
self._settings["widget"].pos = (0,0)
self._create_handles()
if "col" in kwargs:
self._settings["col"] = kwargs["col"]
def _create_handles(self):
# Create scroll bars and handles
self._scroll_x = self._scroll_y = None
self._handle_x = self._handle_y = None
if self._settings["widget"].rect.w > self.rect.w:
ratio = float(self.rect.w) / self._settings["widget"].rect.w
self._scroll_x = Simple((self.rect.w * ratio, 3))
self._scroll_x._parent = self
self._scroll_x.image.fill(self._settings["col"])
self._scroll_x.pos = (0, self.rect.h - 3)
self._handle_x = _ScrollHandleH(widget=self)
if self._settings["widget"].rect.h > self.rect.h:
ratio = float(self.rect.h) / self._settings["widget"].rect.h
self._scroll_y = Simple((3, self.rect.h * ratio))
self._scroll_y._parent = self
self._scroll_y.image.fill(self._settings["col"])
self._scroll_y.pos = (self.rect.w - 3, 0)
self._handle_y = _ScrollHandleV(widget=self)
def update(self, time):
"""Update scroll box each frame."""
self._settings["widget"].update(time)
self.image.fill((255,255,255,0))
self.image.blit(self._settings["widget"].image,
self._settings["widget"].pos)
pos = pygame.mouse.get_pos()
if self._scroll_y is not None:
self.image.blit(self._scroll_y.image, self._scroll_y.pos)
r = self._scroll_y.rect_abs
# Add scroll handles when cursor moves near scroll bar
if not self._handle_y.active() and \
r.inflate(20, 5).collidepoint(pos):
# Position to left if handle would be off-screen.
edge = (r.right + self._handle_y.rect.w)
if edge < get_screen().rect.w:
self._handle_y.rect.x = r.right
else:
self._handle_y.rect.right = r.left
self._handle_y.update_pos(pos[1])
self._handle_y.add()
if self._scroll_x is not None:
self.image.blit(self._scroll_x.image, self._scroll_x.pos)
r = self._scroll_x.rect_abs
if not self._handle_x.active() and \
r.inflate(5, 20).collidepoint(pos):
edge = (r.bottom + self._handle_x.rect.h)
if edge < get_screen().rect.h:
self._handle_x.rect.y = r.bottom
else:
self._handle_x.rect.bottom = r.top
self._handle_x.update_pos(pos[0])
self._handle_x.add()
def _event(self, event):
"""Respond to events."""
self._settings["widget"]._event(event)
if event.type == MOUSEBUTTONDOWN:
if event.button == 4: # Scroll up
self.scroll(y=-10)
elif event.button == 5: # Scroll down
self.scroll(y=10)
elif event.button == 6: # Scroll left
self.scroll(x=-10)
elif event.button == 7: # Scroll right
self.scroll(x=10)
def scroll(self, x=None, y=None):
"""Scroll by x and y coordinates."""
if x is not None and self._scroll_x is not None:
# Set scroll bar position
r = self._scroll_x.rect
r.x = max(min(r.x + x, self.rect.w - r.w), 0)
# Set widget's position
ratio = r.x / float(self.rect.w - r.w)
max_w = self._settings["widget"].rect.w - self.rect.w
self._settings["widget"].rect.x = -max_w * ratio
if y is not None and self._scroll_y is not None:
r = self._scroll_y.rect
r.y = max(min(r.y + y, self.rect.h - r.h), 0)
ratio = r.y / float(self.rect.h - r.h)
max_h = self._settings["widget"].rect.h - self.rect.h
self._settings["widget"].rect.y = -max_h * ratio
def _change_focus(self, forward=True):
return self._settings["widget"]._change_focus(forward)
def _focus_enter(self, focus):
self._settings["widget"]._focus_enter(focus)
def _focus_exit(self):
self._settings["widget"]._focus_exit()
class _ScrollHandle(Simple):
"""
Scroll bar to manipulate scroll box.
To be inherited from by _ScrollHandle[V/H], not to be used directly.
Uses lots of getattr() and other tricks to provide inheritable functions.
"""
_can_focus = True
_layered = True
_drag = None
def _config(self, **kwargs):
"""
widget: Scroll box that this handle should be synced to.
"""
if "init" in kwargs:
self._rect2 = self.rect_abs.inflate(20, 20)
if "widget" in kwargs:
self._parent_view = kwargs["widget"]
def _draw_base(self):
img = self._images["image"]
img.fill(self._parent_view._settings["col"])
img.fill((200,200,200), self.rect.inflate(-4, -4))
# Draw line in center
r = self.rect
start_pos = (3, r.centery) if self.xy == "y" else (r.centerx, 3)
end_pos = (r.w-4, r.centery) if self.xy == "y" else (r.centerx, r.h-4)
draw.line(img, (100,100,100), start_pos, end_pos)
# Draw arrows
if self.xy == "y":
points1 = ((3, r.h/4), (r.centerx, r.h/5-1), (r.w-3, r.h/4))
points2 = ((3, r.h*.75), (r.centerx, r.h*.8), (r.w-3, r.h*.75))
else:
points1 = ((r.w/4, 3), (r.w/5-1, r.centery), (r.w/4, r.h-3))
points2 = ((r.w*.75, 3), (r.w*.8, r.centery), (r.w*.75, r.h-3))
draw.polygon(img, (50,50,50), points1)
draw.polygon(img, (50,50,50), points2)
def update_pos(self, xy):
"""
Change position of scroll handle.
Args:
xy: Integer to move the scroll handle to, along the correct axis.
"""
scroll_bar = getattr(self._parent_view, "_scroll_%s" % self.xy)
if scroll_bar is not None:
r = scroll_bar.rect_abs
a,b = (r.bottom, r.top) if self.xy == "y" else (r.right, r.left)
xy = min(a, max(xy, b))
setattr(self.rect, "center%s" % self.xy, xy)
self._rect2.center = self.rect.center
def update(self, time):
# Move handle to cursor when cursor not hovering over.
if not self.rect.collidepoint(pygame.mouse.get_pos()):
self.update_pos(pygame.mouse.get_pos()[0 if self.xy == "x" else 1])
# Hide handle when cursor moves too far.
if self._drag is None and \
not self._rect2.collidepoint(pygame.mouse.get_pos()):
self.remove()
def _event(self, event):
index = 1 if self.xy == "y" else 0
if event.type == MOUSEBUTTONDOWN and event.button == 1 and \
self.rect.collidepoint(event.pos):
# Initialise drag
center = getattr(self.rect_abs, "center%s" % self.xy)
self._offset = event.pos[index] - center
self._drag = event.pos[index]
elif self._drag is not None:
if event.type == MOUSEMOTION:
# Move scroll handle and bar
self.update_pos(event.pos[index] - self._offset)
kwarg = {self.xy: event.rel[index]}
self._parent_view.scroll(**kwarg)
elif event.type == MOUSEBUTTONUP and event.button == 1:
# Move scroll box up when clicked
if -5 < (self._drag - event.pos[index]) < 5:
center = getattr(self.rect_abs, "center%s" % self.xy)
if event.pos[index] < center:
kwarg = {self.xy: -40}
self._parent_view.scroll(**kwarg)
else:
kwarg = {self.xy: 40}
self._parent_view.scroll(**kwarg)
# Or stop moving and set final position after drag
else:
self.update_pos(event.pos[index] - self._offset)
self._drag = None
def add(self, order=None, fade=True):
# Only add if child of modal widget or no modal widget.
try:
modal = modal_widgets.sprites()[-1]
parent = self._parent_view
while parent:
if parent is modal:
break
parent = parent._parent
else:
return
except IndexError: pass
special_case.add(self)
super(_ScrollHandle, self).add(order, fade)
def remove(self, fade=True):
special_case.discard(self)
super(_ScrollHandle, self).remove(fade)
class _ScrollHandleV(_ScrollHandle):
_default_size = (12,50)
xy = "y"
class _ScrollHandleH(_ScrollHandle):
_default_size = (50,12)
xy = "x"
|
|
# coding=utf-8
"""
The CPUCollector collects CPU utilization metric using /proc/stat.
#### Dependencies
* /proc/stat
"""
import diamond.collector
import os
import time
from diamond.collector import str_to_bool
try:
import psutil
psutil # workaround for pyflakes issue #13
except ImportError:
psutil = None
class CPUCollector(diamond.collector.Collector):
PROC = '/proc/stat'
INTERVAL = 1
MAX_VALUES = {
'user': diamond.collector.MAX_COUNTER,
'nice': diamond.collector.MAX_COUNTER,
'system': diamond.collector.MAX_COUNTER,
'idle': diamond.collector.MAX_COUNTER,
'iowait': diamond.collector.MAX_COUNTER,
'irq': diamond.collector.MAX_COUNTER,
'softirq': diamond.collector.MAX_COUNTER,
'steal': diamond.collector.MAX_COUNTER,
'guest': diamond.collector.MAX_COUNTER,
'guest_nice': diamond.collector.MAX_COUNTER,
}
def get_default_config_help(self):
config_help = super(CPUCollector, self).get_default_config_help()
config_help.update({
'percore': 'Collect metrics per cpu core or just total',
'simple': 'only return aggregate CPU% metric',
'normalize': 'for cpu totals, divide by the number of CPUs',
'include_aggregate': 'Include aggregate CPU% even if not in simple mode.'
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(CPUCollector, self).get_default_config()
config.update({
'enabled': 'True',
'path': 'cpu',
'percore': 'True',
'xenfix': None,
'simple': 'False',
'normalize': 'False',
'include_aggregate': 'False',
'prefix_total': 'True'
})
return config
def collect(self):
"""
Collector cpu stats
"""
def cpu_time_list():
"""
get cpu time list
"""
statFile = open(self.PROC, "r")
timeList = statFile.readline().split(" ")[2:6]
for i in range(len(timeList)):
timeList[i] = int(timeList[i])
statFile.close()
return timeList
def cpu_delta_time(interval):
"""
Get before and after cpu times for usage calc
"""
pre_check = cpu_time_list()
time.sleep(interval)
post_check = cpu_time_list()
for i in range(len(pre_check)):
post_check[i] -= pre_check[i]
return post_check
if os.access(self.PROC, os.R_OK):
#If simple only return aggregate CPU% metric
simple = str_to_bool(self.config['simple'])
include_aggregate = str_to_bool(self.config['include_aggregate'])
if simple or include_aggregate:
dt = cpu_delta_time(self.INTERVAL)
cpuPct = 100 - (dt[len(dt) - 1] * 100.00 / sum(dt))
self.publish('percent', str('%.4f' % cpuPct))
if simple:
return True
results = {}
# Open file
file = open(self.PROC)
ncpus = -1 # dont want to count the 'cpu'(total) cpu.
for line in file:
if not line.startswith('cpu'):
continue
ncpus += 1
elements = line.split()
cpu = elements[0]
if cpu == 'cpu':
cpu = 'total'
elif not str_to_bool(self.config['percore']):
continue
results[cpu] = {}
if len(elements) >= 2:
results[cpu]['user'] = elements[1]
if len(elements) >= 3:
results[cpu]['nice'] = elements[2]
if len(elements) >= 4:
results[cpu]['system'] = elements[3]
if len(elements) >= 5:
results[cpu]['idle'] = elements[4]
if len(elements) >= 6:
results[cpu]['iowait'] = elements[5]
if len(elements) >= 7:
results[cpu]['irq'] = elements[6]
if len(elements) >= 8:
results[cpu]['softirq'] = elements[7]
if len(elements) >= 9:
results[cpu]['steal'] = elements[8]
if len(elements) >= 10:
results[cpu]['guest'] = elements[9]
if len(elements) >= 11:
results[cpu]['guest_nice'] = elements[10]
# Close File
file.close()
metrics = {}
for cpu in results.keys():
stats = results[cpu]
for s in stats.keys():
# Get Metric Name
prefix = str_to_bool(self.config['prefix_total'])
if prefix or cpu != 'total':
metric_name = '.'.join([cpu, s])
else:
metric_name = s
# Get actual data
if (str_to_bool(self.config['normalize'])
and cpu == 'total' and ncpus > 0):
metrics[metric_name] = self.derivative(
metric_name,
long(stats[s]),
self.MAX_VALUES[s]) / ncpus
else:
metrics[metric_name] = self.derivative(
metric_name,
long(stats[s]),
self.MAX_VALUES[s])
# Check for a bug in xen where the idle time is doubled for guest
# See https://bugzilla.redhat.com/show_bug.cgi?id=624756
if self.config['xenfix'] is None or self.config['xenfix'] is True:
if os.path.isdir('/proc/xen'):
total = 0
for metric_name in metrics.keys():
if 'cpu0.' in metric_name:
total += int(metrics[metric_name])
if total > 110:
self.config['xenfix'] = True
for mname in metrics.keys():
if '.idle' in mname:
metrics[mname] = float(metrics[mname]) / 2
elif total > 0:
self.config['xenfix'] = False
else:
self.config['xenfix'] = False
# Publish Metric Derivative
for metric_name in metrics.keys():
self.publish(metric_name,
metrics[metric_name])
return True
else:
if not psutil:
self.log.error('Unable to import psutil')
self.log.error('No cpu metrics retrieved')
return None
cpu_time = psutil.cpu_times(True)
cpu_count = len(cpu_time)
total_time = psutil.cpu_times()
for i in range(0, len(cpu_time)):
metric_name = 'cpu' + str(i)
self.publish(metric_name + '.user',
self.derivative(metric_name + '.user',
cpu_time[i].user,
self.MAX_VALUES['user']))
if hasattr(cpu_time[i], 'nice'):
self.publish(metric_name + '.nice',
self.derivative(metric_name + '.nice',
cpu_time[i].nice,
self.MAX_VALUES['nice']))
self.publish(metric_name + '.system',
self.derivative(metric_name + '.system',
cpu_time[i].system,
self.MAX_VALUES['system']))
self.publish(metric_name + '.idle',
self.derivative(metric_name + '.idle',
cpu_time[i].idle,
self.MAX_VALUES['idle']))
metric_name = 'total'
self.publish(metric_name + '.user',
self.derivative(metric_name + '.user',
total_time.user,
self.MAX_VALUES['user'])
/ cpu_count)
if hasattr(total_time, 'nice'):
self.publish(metric_name + '.nice',
self.derivative(metric_name + '.nice',
total_time.nice,
self.MAX_VALUES['nice'])
/ cpu_count)
self.publish(metric_name + '.system',
self.derivative(metric_name + '.system',
total_time.system,
self.MAX_VALUES['system'])
/ cpu_count)
self.publish(metric_name + '.idle',
self.derivative(metric_name + '.idle',
total_time.idle,
self.MAX_VALUES['idle'])
/ cpu_count)
return True
return None
|
|
from common import*
from net.mask_rcnn.lib.rcnn_nms import *
from net.mask_rcnn.lib.rcnn_target import *
from net.mask_rcnn.lib.rcnn_loss import *
from net.mask_rcnn.lib.rpn_nms import *
from net.mask_rcnn.lib.rpn_target import *
from net.mask_rcnn.lib.rpn_loss import *
from net.mask_rcnn.model.simple.configuration import *
from net.mask_rcnn.model.simple.feature import FeatureNet
from net.mask_rcnn.model.simple.rpn import RpnNet, rpn_bases, rpn_windows
from net.mask_rcnn.model.simple.crop import CropNet
from net.mask_rcnn.model.simple.rcnn import RcnnNet
from net.mask_rcnn.model.simple.fcn import FcnNet
#---------------------------------------------------------------------------
# https://github.com/longcw/faster_rcnn_pytorch
# https://github.com/longcw/faster_rcnn_pytorch/blob/master/faster_rcnn/faster_rcnn.py
class FasterRcnnNet(object):
mode = 'train'
def __init__(self, cfg):
super(FasterRcnnNet, self).__init__()
self.cfg = cfg
pool_size = cfg.pool_size
num_classes = cfg.num_classes
stride = cfg.rpn.stride
# inference
self.feature_net = FeatureNet(out_channels=32).cuda()
self.rpn_net = RpnNet (cfg, in_channels=32).cuda()
self.crop_net = CropNet(cfg, in_channels=32, out_channels=24).cuda()
self.rcnn_net = RcnnNet(cfg, in_channels=24).cuda()
self.fcn_net = FcnNet (cfg, in_channels=24).cuda()
# check --
self.bases = rpn_bases(cfg)
self.cfg.check(feature_net = self.feature_net, fcn_net = self.fcn_net)
# extract list of sub-net, etc ...
d = self.__dict__
d = { k : d[k] for k,v in d.items() if '_net' in k }
names = list(d.keys())
nets = list(d.values())
l = [ list(net.parameters()) for net in nets ]
params = [ item for sublist in l for item in sublist ]
self.names = names
self.nets = nets
self.params = params
def forward(self, x, annotation=None):
if self.mode=='train':
for net in self.nets :
net.train()
elif self.mode=='eval':
for net in self.nets :
net.eval()
else:
raise ValueError('forward: invalid mode = %s?'%self.model)
cfg = self.cfg
mode = self.mode
bases = self.bases
f = self.feature_net(x)
rpn_s, rpn_d = self.rpn_net(f)
windows, inside_inds = rpn_windows(x, f, bases, cfg)
rois, roi_scores = rpn_nms(x, rpn_s, rpn_d, windows, inside_inds, cfg, mode)
if mode=='train':
rpn_label_inds, rpn_labels, rpn_target_inds, rpn_targets = rpn_target(windows, inside_inds, annotation, cfg)
sampled_rois, rcnn_labels, rcnn_target_inds, rcnn_targets = rcnn_target(rois, annotation, cfg)
elif mode=='eval':
sampled_rois = rois
rpn_label_inds, rpn_labels, rpn_target_inds, rpn_targets = None, None, None, None,
rcnn_labels, rcnn_target_inds, rcnn_targets = None, None, None
crops = self.crop_net(f,sampled_rois)
rcnn_s, rcnn_d = self.rcnn_net(crops)
masks = self.fcn_net(crops)
dets = rcnn_nms(x, rcnn_s, rcnn_d, sampled_rois, cfg)
self.inputs = x
self.features = f
self.rpn_scores_flat = rpn_s
self.rpn_deltas_flat = rpn_d
self.rcnn_scores_flat = rcnn_s
self.rcnn_deltas_flat = rcnn_d
self.windows = windows
self.inside_inds = inside_inds
self.rois = rois
self.roi_scores = roi_scores
self.sampled_rois = sampled_rois
self.dets = dets
self.masks = masks
self.rpn_label_inds = rpn_label_inds
self.rpn_labels = rpn_labels
self.rpn_target_inds = rpn_target_inds
self.rpn_targets = rpn_targets
self.rcnn_labels = rcnn_labels
self.rcnn_target_inds = rcnn_target_inds
self.rcnn_targets = rcnn_targets
return dets
def loss(self, x, annotation):
if self.mode=='train':#this is always in training mode!
cfg = self.cfg
rpn_scores_flat = self.rpn_scores_flat
rpn_deltas_flat = self.rpn_deltas_flat
rcnn_scores_flat = self.rcnn_scores_flat
rcnn_deltas_flat = self.rcnn_deltas_flat
rpn_label_inds = self.rpn_label_inds
rpn_labels = self.rpn_labels
rpn_target_inds = self.rpn_target_inds
rpn_targets = self.rpn_targets
rcnn_labels = self.rcnn_labels
rcnn_target_inds = self.rcnn_target_inds
rcnn_targets = self.rcnn_targets
rpn_cls_loss, rpn_reg_loss = rpn_loss(rpn_scores_flat, rpn_deltas_flat, rpn_label_inds, rpn_labels, rpn_target_inds, rpn_targets)
rcnn_cls_loss, rcnn_reg_loss = rcnn_loss(rcnn_scores_flat, rcnn_deltas_flat, rcnn_labels, rcnn_target_inds, rcnn_targets)
total_loss = rpn_cls_loss + rpn_reg_loss + rcnn_cls_loss + rcnn_reg_loss
self.rpn_cls_loss = rpn_cls_loss
self.rpn_reg_loss = rpn_reg_loss
self.rcnn_cls_loss = rcnn_cls_loss
self.rcnn_reg_loss = rcnn_reg_loss
return total_loss
else:
raise ValueError('loss: invalid mode = %s?'%self.model)
def save(self, model_dir):
os.makedirs(model_dir, exist_ok=True)
#save header
with open(model_dir +'/faster_rcnn_net.txt', 'w') as f:
f.write('%s\n\n'%str(type(self )))
f.write(inspect.getsource(self.__init__)+'\n')
f.write(inspect.getsource(self.forward )+'\n')
f.write('%s\n\n'%('-'*100))
f.write(str(self))
f.write('\n')
self.cfg.save(model_dir +'/cfg')
np.savetxt(model_dir +'/bases.txt',self.bases,fmt='%0.1f')
#save each sub-net
state_dicts = {}
num_nets = len(self.nets)
for n in range(num_nets):
net = self.nets[n]
name = self.names[n]
state_dicts[name] = net.state_dict()
with open(model_dir +'/%s.txt'%name, 'w') as f:
f.write('%s\n\n'%str(type(net )))
f.write(inspect.getsource(net.__init__)+'\n')
f.write(inspect.getsource(net.forward )+'\n')
f.write('%s\n\n'%('-'*100))
f.write(str(net))
f.write('\n')
torch.save(state_dicts, model_dir +'/state_dics.pth')
def load(self, model_dir):
state_dicts = torch.load( model_dir +'/state_dics.pth')
num_nets = len(self.nets)
for n in range(num_nets):
net = self.nets[n]
name = self.names[n]
net.load_state_dict(state_dicts[name])
#-----------------------------------------------------------------------------
if __name__ == '__main__':
print( '%s: calling main function ... ' % os.path.basename(__file__))
#dummy data
annotation_file = '/media/ssd/[data]/dummy/object-det-debug/bird_and_flower/annotations/000.txt'
img_file = '/media/ssd/[data]/dummy/object-det-debug/bird_and_flower/images/000.jpg'
annotation = load_annotation(annotation_file, img_file)
img = cv2.imread(img_file)
x, a = default_transform(img, annotation)
#parameters
cfg = Configuration() #default configuration
#change configurations here ...
cfg.rpn.train_fg_thresh_low = 0.8
cfg.rpn.scales=[64,128,256]
cfg.rpn.ratios=[1,0.5]
faster_rcnn_net = FasterRcnnNet(cfg)
x = Variable(x).cuda()
faster_rcnn_net.mode = 'train'
faster_rcnn_net.forward(x, annotation)
faster_rcnn_net.mode = 'eval'
faster_rcnn_net.forward(x)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2011, Nicolas Clairon
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the University of California, Berkeley nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from mongokit import *
class StructureTestCase(unittest.TestCase):
def setUp(self):
self.connection = Connection()
self.col = self.connection['test']['mongokit']
def tearDown(self):
self.connection['test'].drop_collection('mongokit')
def test_no_structure(self):
failed = False
try:
class MyDoc(SchemaDocument): pass
except StructureError:
failed = True
self.assertEqual(failed, False)
def test_empty_structure(self):
class MyDoc(SchemaDocument):
structure = {}
assert MyDoc() == {}
def test_structure_not_dict(self):
failed = False
try:
class MyDoc(SchemaDocument):
structure = 3
except StructureError:
failed = True
self.assertEqual(failed, True)
def test_load_with_dict(self):
doc = {"foo":1, "bla":{"bar":u"spam"}}
class MyDoc(SchemaDocument):
structure = {"foo":int, "bla":{"bar":unicode}}
mydoc = MyDoc(doc)
assert mydoc == doc
mydoc.validate()
def test_simple_structure(self):
class MyDoc(SchemaDocument):
structure = {
"foo":unicode,
"bar":int
}
assert MyDoc() == {"foo":None, "bar":None}
def test_missed_field(self):
doc = {"foo":u"arf"}
class MyDoc(SchemaDocument):
structure = {
"foo":unicode,
"bar":{"bla":int}
}
mydoc = MyDoc(doc)
self.assertRaises(StructureError, mydoc.validate)
def test_unknown_field(self):
class MyDoc(SchemaDocument):
structure = {
"foo":unicode,
}
mydoc = MyDoc()
mydoc["bar"] = 4
self.assertRaises(StructureError, mydoc.validate)
def test_None(self):
class MyDoc(SchemaDocument):
structure = {
"foo":None,
"bar":{
"bla":None
}
}
mydoc = MyDoc()
mydoc['foo'] = u'bla'
mydoc.validate()
mydoc['foo'] = 3
mydoc['bar']['bla'] = 2
mydoc.validate()
mydoc['foo'] = 'arf'
self.assertRaises(AuthorizedTypeError, mydoc.validate)
def test_big_nested_structure(self):
class MyDoc(SchemaDocument):
structure = {
"1":{
"2":{
"3":{
"4":{
"5":{
"6":{
"7":int,
"8":{
unicode:{int:int}
}
}
}
}
}
}
}
}
mydoc = MyDoc()
assert mydoc._namespaces == ['1', '1.2', '1.2.3', '1.2.3.4', '1.2.3.4.5', '1.2.3.4.5.6', '1.2.3.4.5.6.8', '1.2.3.4.5.6.8.$unicode', '1.2.3.4.5.6.8.$unicode.$int', '1.2.3.4.5.6.7']
mydoc['1']['2']['3']['4']['5']['6']['7'] = 8
mydoc['1']['2']['3']['4']['5']['6']['8'] = {u"bla":{3:u"bla"}}
self.assertRaises(SchemaTypeError, mydoc.validate)
mydoc['1']['2']['3']['4']['5']['6']['8'] = {9:{3:10}}
self.assertRaises(SchemaTypeError, mydoc.validate)
mydoc['1']['2']['3']['4']['5']['6']['8'] = {u"bla":{3:4}}
mydoc.validate()
def test_big_nested_structure_mongo_document(self):
class MyDoc(Document):
structure = {
"1":{
"2":{
"3":{
"4":{
"5":{
"6":{
"7":int,
"8":{
unicode:{unicode:int}
}
}
}
}
}
}
}
}
self.connection.register([MyDoc])
mydoc = self.col.MyDoc()
assert mydoc._namespaces == ['1', '1.2', '1.2.3', '1.2.3.4', '1.2.3.4.5', '1.2.3.4.5.6', '1.2.3.4.5.6.8', '1.2.3.4.5.6.8.$unicode', '1.2.3.4.5.6.8.$unicode.$unicode', '1.2.3.4.5.6.7']
mydoc['1']['2']['3']['4']['5']['6']['7'] = 8
mydoc['1']['2']['3']['4']['5']['6']['8'] = {u"bla":{"3":u"bla"}}
self.assertRaises(SchemaTypeError, mydoc.validate)
mydoc['1']['2']['3']['4']['5']['6']['8'] = {"9":{"3":10}}
self.assertRaises(SchemaTypeError, mydoc.validate)
mydoc['1']['2']['3']['4']['5']['6']['8'] = {u"bla":{u"3":4}}
mydoc.validate()
def test_dot_notation(self):
class MyDoc(SchemaDocument):
use_dot_notation = True
structure = {
"foo":int,
"bar":unicode
}
mydoc = MyDoc()
mydoc.foo = "3"
self.assertRaises(SchemaTypeError, mydoc.validate)
mydoc.foo = 3
assert mydoc['foo'] == 3
assert mydoc == {'foo':3, 'bar':None}
mydoc.validate()
mydoc.bar = u"bar"
assert mydoc == {'foo':3, 'bar':'bar'}
mydoc.validate()
def test_dot_notation_nested(self):
class MyDoc(SchemaDocument):
use_dot_notation = True
structure = {
"foo":{
"bar":unicode
}
}
mydoc = MyDoc()
mydoc.foo.bar = 3
self.assertRaises(SchemaTypeError, mydoc.validate)
mydoc.foo.bar = u"bar"
assert mydoc.foo.bar == u'bar'
mydoc.foo.bla = 2
assert mydoc.foo.bla == 2
assert mydoc['foo'] == {"bar":"bar"}, mydoc
assert mydoc['foo']['bar'] == 'bar'
assert mydoc == {'foo':{'bar':'bar'}}
mydoc.validate()
def test_document_dot_notation_nested(self):
class MyDoc(Document):
use_dot_notation = True
structure = {
"foo":{
"bar":unicode
}
}
self.connection.register([MyDoc])
mydoc = self.col.MyDoc()
mydoc.foo.bar = u"bar"
self.assertEqual(mydoc.foo.bar, u'bar')
mydoc.foo.bla = 2
assert isinstance(mydoc.foo, DotedDict), type(mydoc.foo)
self.assertEqual(mydoc.foo.bla, 2)
self.assertEqual(mydoc['foo'], {"bar":"bar"})
self.assertEqual(mydoc['foo']['bar'], 'bar')
self.assertEqual(mydoc, {'foo':{'bar':'bar'}})
mydoc.save()
fetched_doc = self.col.MyDoc.find_one()
assert isinstance(fetched_doc.foo, DotedDict), type(fetched_doc.foo)
self.assertEqual(fetched_doc.foo.bar, "bar")
def test_dot_notation_field_not_in_structure(self):
class MyDoc(SchemaDocument):
use_dot_notation = True
structure = {
"foo":{
"bar":unicode,
},
"spam":int,
}
import logging
logging.basicConfig()
mydoc = MyDoc()
mydoc.eggs = 4
assert mydoc == {'foo':{'bar':None}, 'spam':None}
assert mydoc.eggs == 4
try:
mydoc.not_found
except AttributeError, e:
print str(e)
mydoc.foo.eggs = 4
assert mydoc == {'foo':{'bar':None}, 'spam':None}, mydoc
mydoc.validate()
def test_field_changed(self):
class MyDoc(Document):
structure = {
'foo':int,
'bar':unicode,
}
self.connection.register([MyDoc])
doc = self.col.MyDoc()
doc['foo'] = 3
doc.save()
class MyDoc(Document):
structure = {
'foo':int,
'arf': unicode,
}
self.connection.register([MyDoc])
fetched_doc = self.col.MyDoc.find_one()
self.assertRaises(StructureError, fetched_doc.validate)
fetched_doc['foo'] = 2
fetched_doc.save(validate=False)
fetched_doc = self.col.MyDoc.find_one()
self.assertRaises(StructureError, fetched_doc.validate)
def test_exception_bad_structure(self):
import datetime
failed = False
try:
class MyDoc(SchemaDocument):
structure = {
'topic': unicode,
'when': datetime.datetime.utcnow,
}
except TypeError, e:
assert str(e).startswith("MyDoc: <built-in method utcnow of type object at "), str(e)
assert str(e).endswith("is not a type")
failed = True
self.assertEqual(failed, True)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""`LinearOperator` acting like a lower triangular matrix."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg_impl as linalg
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.ops.linalg import linear_operator_util
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"LinearOperatorLowerTriangular",
]
@tf_export("linalg.LinearOperatorLowerTriangular")
class LinearOperatorLowerTriangular(linear_operator.LinearOperator):
"""`LinearOperator` acting like a [batch] square lower triangular matrix.
This operator acts like a [batch] lower triangular matrix `A` with shape
`[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
an `N x N` matrix.
`LinearOperatorLowerTriangular` is initialized with a `Tensor` having
dimensions `[B1,...,Bb, N, N]`. The upper triangle of the last two
dimensions is ignored.
```python
# Create a 2 x 2 lower-triangular linear operator.
tril = [[1., 2.], [3., 4.]]
operator = LinearOperatorLowerTriangular(tril)
# The upper triangle is ignored.
operator.to_dense()
==> [[1., 0.]
[3., 4.]]
operator.shape
==> [2, 2]
operator.log_abs_determinant()
==> scalar Tensor
x = ... Shape [2, 4] Tensor
operator.matmul(x)
==> Shape [2, 4] Tensor
# Create a [2, 3] batch of 4 x 4 linear operators.
tril = tf.random_normal(shape=[2, 3, 4, 4])
operator = LinearOperatorLowerTriangular(tril)
```
#### Shape compatibility
This operator acts on [batch] matrix with compatible shape.
`x` is a batch matrix with compatible shape for `matmul` and `solve` if
```
operator.shape = [B1,...,Bb] + [N, N], with b >= 0
x.shape = [B1,...,Bb] + [N, R], with R >= 0.
```
#### Performance
Suppose `operator` is a `LinearOperatorLowerTriangular` of shape `[N, N]`,
and `x.shape = [N, R]`. Then
* `operator.matmul(x)` involves `N^2 * R` multiplications.
* `operator.solve(x)` involves `N * R` size `N` back-substitutions.
* `operator.determinant()` involves a size `N` `reduce_prod`.
If instead `operator` and `x` have shape `[B1,...,Bb, N, N]` and
`[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`.
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
tril,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name="LinearOperatorLowerTriangular"):
r"""Initialize a `LinearOperatorLowerTriangular`.
Args:
tril: Shape `[B1,...,Bb, N, N]` with `b >= 0`, `N >= 0`.
The lower triangular part of `tril` defines this operator. The strictly
upper triangle is ignored.
is_non_singular: Expect that this operator is non-singular.
This operator is non-singular if and only if its diagonal elements are
all non-zero.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose. This operator is self-adjoint only if it is diagonal with
real-valued diagonal entries. In this case it is advised to use
`LinearOperatorDiag`.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
name: A name for this `LinearOperator`.
Raises:
ValueError: If `is_square` is `False`.
"""
if is_square is False:
raise ValueError(
"Only square lower triangular operators supported at this time.")
is_square = True
with ops.name_scope(name, values=[tril]):
self._tril = ops.convert_to_tensor(tril, name="tril")
self._check_tril(self._tril)
self._tril = array_ops.matrix_band_part(tril, -1, 0)
self._diag = array_ops.matrix_diag_part(self._tril)
super(LinearOperatorLowerTriangular, self).__init__(
dtype=self._tril.dtype,
graph_parents=[self._tril],
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name)
def _check_tril(self, tril):
"""Static check of the `tril` argument."""
allowed_dtypes = [
dtypes.float16,
dtypes.float32,
dtypes.float64,
dtypes.complex64,
dtypes.complex128,
]
dtype = tril.dtype
if dtype not in allowed_dtypes:
raise TypeError(
"Argument tril must have dtype in %s. Found: %s"
% (allowed_dtypes, dtype))
if tril.get_shape().ndims is not None and tril.get_shape().ndims < 2:
raise ValueError(
"Argument tril must have at least 2 dimensions. Found: %s"
% tril)
def _shape(self):
return self._tril.get_shape()
def _shape_tensor(self):
return array_ops.shape(self._tril)
def _assert_non_singular(self):
return linear_operator_util.assert_no_entries_with_modulus_zero(
self._diag,
message="Singular operator: Diagonal contained zero values.")
def _matmul(self, x, adjoint=False, adjoint_arg=False):
return linear_operator_util.matmul_with_broadcast(
self._tril, x, adjoint_a=adjoint, adjoint_b=adjoint_arg)
def _determinant(self):
return math_ops.reduce_prod(self._diag, reduction_indices=[-1])
def _log_abs_determinant(self):
return math_ops.reduce_sum(
math_ops.log(math_ops.abs(self._diag)), reduction_indices=[-1])
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
rhs = linalg.adjoint(rhs) if adjoint_arg else rhs
return linear_operator_util.matrix_triangular_solve_with_broadcast(
self._tril, rhs, lower=True, adjoint=adjoint)
def _to_dense(self):
return self._tril
def _add_to_tensor(self, x):
return self._tril + x
|
|
# -*- coding: iso-8859-1 -*-
"""Get useful information from live Python objects.
This module encapsulates the interface provided by the internal special
attributes (func_*, co_*, im_*, tb_*, etc.) in a friendlier fashion.
It also provides some help for examining source code and class layout.
Here are some of the useful functions provided by this module:
ismodule(), isclass(), ismethod(), isfunction(), istraceback(),
isframe(), iscode(), isbuiltin(), isroutine() - check object types
getmembers() - get members of an object that satisfy a given condition
getfile(), getsourcefile(), getsource() - find an object's source code
getdoc(), getcomments() - get documentation on an object
getmodule() - determine the module that an object came from
getclasstree() - arrange classes so as to represent their hierarchy
getargspec(), getargvalues() - get info about function arguments
formatargspec(), formatargvalues() - format an argument spec
getouterframes(), getinnerframes() - get info about frames
currentframe() - get the current stack frame
stack(), trace() - get info about frames on the stack or in a traceback
"""
# This module is in the public domain. No warranties.
__author__ = 'Ka-Ping Yee <ping@lfw.org>'
__date__ = '1 Jan 2001'
import sys, os, types, string, re, dis, imp, tokenize, linecache
from operator import attrgetter
# ----------------------------------------------------------- type-checking
def ismodule(object):
"""Return true if the object is a module.
Module objects provide these attributes:
__doc__ documentation string
__file__ filename (missing for built-in modules)"""
return isinstance(object, types.ModuleType)
def isclass(object):
"""Return true if the object is a class.
Class objects provide these attributes:
__doc__ documentation string
__module__ name of module in which this class was defined"""
return isinstance(object, types.ClassType) or hasattr(object, '__bases__')
def ismethod(object):
"""Return true if the object is an instance method.
Instance method objects provide these attributes:
__doc__ documentation string
__name__ name with which this method was defined
im_class class object in which this method belongs
im_func function object containing implementation of method
im_self instance to which this method is bound, or None"""
return isinstance(object, types.MethodType)
def ismethoddescriptor(object):
"""Return true if the object is a method descriptor.
But not if ismethod() or isclass() or isfunction() are true.
This is new in Python 2.2, and, for example, is true of int.__add__.
An object passing this test has a __get__ attribute but not a __set__
attribute, but beyond that the set of attributes varies. __name__ is
usually sensible, and __doc__ often is.
Methods implemented via descriptors that also pass one of the other
tests return false from the ismethoddescriptor() test, simply because
the other tests promise more -- you can, e.g., count on having the
im_func attribute (etc) when an object passes ismethod()."""
return (hasattr(object, "__get__")
and not hasattr(object, "__set__") # else it's a data descriptor
and not ismethod(object) # mutual exclusion
and not isfunction(object)
and not isclass(object))
def isdatadescriptor(object):
"""Return true if the object is a data descriptor.
Data descriptors have both a __get__ and a __set__ attribute. Examples are
properties (defined in Python) and getsets and members (defined in C).
Typically, data descriptors will also have __name__ and __doc__ attributes
(properties, getsets, and members have both of these attributes), but this
is not guaranteed."""
return (hasattr(object, "__set__") and hasattr(object, "__get__"))
if hasattr(types, 'MemberDescriptorType'):
# CPython and equivalent
def ismemberdescriptor(object):
"""Return true if the object is a member descriptor.
Member descriptors are specialized descriptors defined in extension
modules."""
return isinstance(object, types.MemberDescriptorType)
else:
# Other implementations
def ismemberdescriptor(object):
"""Return true if the object is a member descriptor.
Member descriptors are specialized descriptors defined in extension
modules."""
return False
if hasattr(types, 'GetSetDescriptorType'):
# CPython and equivalent
def isgetsetdescriptor(object):
"""Return true if the object is a getset descriptor.
getset descriptors are specialized descriptors defined in extension
modules."""
return isinstance(object, types.GetSetDescriptorType)
else:
# Other implementations
def isgetsetdescriptor(object):
"""Return true if the object is a getset descriptor.
getset descriptors are specialized descriptors defined in extension
modules."""
return False
def isfunction(object):
"""Return true if the object is a user-defined function.
Function objects provide these attributes:
__doc__ documentation string
__name__ name with which this function was defined
func_code code object containing compiled function bytecode
func_defaults tuple of any default values for arguments
func_doc (same as __doc__)
func_globals global namespace in which this function was defined
func_name (same as __name__)"""
return isinstance(object, types.FunctionType)
def istraceback(object):
"""Return true if the object is a traceback.
Traceback objects provide these attributes:
tb_frame frame object at this level
tb_lasti index of last attempted instruction in bytecode
tb_lineno current line number in Python source code
tb_next next inner traceback object (called by this level)"""
return isinstance(object, types.TracebackType)
def isframe(object):
"""Return true if the object is a frame object.
Frame objects provide these attributes:
f_back next outer frame object (this frame's caller)
f_builtins built-in namespace seen by this frame
f_code code object being executed in this frame
f_exc_traceback traceback if raised in this frame, or None
f_exc_type exception type if raised in this frame, or None
f_exc_value exception value if raised in this frame, or None
f_globals global namespace seen by this frame
f_lasti index of last attempted instruction in bytecode
f_lineno current line number in Python source code
f_locals local namespace seen by this frame
f_restricted 0 or 1 if frame is in restricted execution mode
f_trace tracing function for this frame, or None"""
return isinstance(object, types.FrameType)
def iscode(object):
"""Return true if the object is a code object.
Code objects provide these attributes:
co_argcount number of arguments (not including * or ** args)
co_code string of raw compiled bytecode
co_consts tuple of constants used in the bytecode
co_filename name of file in which this code object was created
co_firstlineno number of first line in Python source code
co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg
co_lnotab encoded mapping of line numbers to bytecode indices
co_name name with which this code object was defined
co_names tuple of names of local variables
co_nlocals number of local variables
co_stacksize virtual machine stack space required
co_varnames tuple of names of arguments and local variables"""
return isinstance(object, types.CodeType)
def isbuiltin(object):
"""Return true if the object is a built-in function or method.
Built-in functions and methods provide these attributes:
__doc__ documentation string
__name__ original name of this function or method
__self__ instance to which a method is bound, or None"""
return isinstance(object, types.BuiltinFunctionType)
def isroutine(object):
"""Return true if the object is any kind of function or method."""
return (isbuiltin(object)
or isfunction(object)
or ismethod(object)
or ismethoddescriptor(object))
def getmembers(object, predicate=None):
"""Return all members of an object as (name, value) pairs sorted by name.
Optionally, only return members that satisfy a given predicate."""
results = []
for key in dir(object):
value = getattr(object, key)
if not predicate or predicate(value):
results.append((key, value))
results.sort()
return results
def classify_class_attrs(cls):
"""Return list of attribute-descriptor tuples.
For each name in dir(cls), the return list contains a 4-tuple
with these elements:
0. The name (a string).
1. The kind of attribute this is, one of these strings:
'class method' created via classmethod()
'static method' created via staticmethod()
'property' created via property()
'method' any other flavor of method
'data' not a method
2. The class which defined this attribute (a class).
3. The object as obtained directly from the defining class's
__dict__, not via getattr. This is especially important for
data attributes: C.data is just a data object, but
C.__dict__['data'] may be a data descriptor with additional
info, like a __doc__ string.
"""
mro = getmro(cls)
names = dir(cls)
result = []
for name in names:
# Get the object associated with the name.
# Getting an obj from the __dict__ sometimes reveals more than
# using getattr. Static and class methods are dramatic examples.
if name in cls.__dict__:
obj = cls.__dict__[name]
else:
obj = getattr(cls, name)
# Figure out where it was defined.
homecls = getattr(obj, "__objclass__", None)
if homecls is None:
# search the dicts.
for base in mro:
if name in base.__dict__:
homecls = base
break
# Get the object again, in order to get it from the defining
# __dict__ instead of via getattr (if possible).
if homecls is not None and name in homecls.__dict__:
obj = homecls.__dict__[name]
# Also get the object via getattr.
obj_via_getattr = getattr(cls, name)
# Classify the object.
if isinstance(obj, staticmethod):
kind = "static method"
elif isinstance(obj, classmethod):
kind = "class method"
elif isinstance(obj, property):
kind = "property"
elif (ismethod(obj_via_getattr) or
ismethoddescriptor(obj_via_getattr)):
kind = "method"
else:
kind = "data"
result.append((name, kind, homecls, obj))
return result
# ----------------------------------------------------------- class helpers
def _searchbases(cls, accum):
# Simulate the "classic class" search order.
if cls in accum:
return
accum.append(cls)
for base in cls.__bases__:
_searchbases(base, accum)
def getmro(cls):
"Return tuple of base classes (including cls) in method resolution order."
if hasattr(cls, "__mro__"):
return cls.__mro__
else:
result = []
_searchbases(cls, result)
return tuple(result)
# -------------------------------------------------- source code extraction
def indentsize(line):
"""Return the indent size, in spaces, at the start of a line of text."""
expline = string.expandtabs(line)
return len(expline) - len(string.lstrip(expline))
def getdoc(object):
"""Get the documentation string for an object.
All tabs are expanded to spaces. To clean up docstrings that are
indented to line up with blocks of code, any whitespace than can be
uniformly removed from the second line onwards is removed."""
try:
doc = object.__doc__
except AttributeError:
return None
if not isinstance(doc, types.StringTypes):
return None
try:
lines = string.split(string.expandtabs(doc), '\n')
except UnicodeError:
return None
else:
# Find minimum indentation of any non-blank lines after first line.
margin = sys.maxint
for line in lines[1:]:
content = len(string.lstrip(line))
if content:
indent = len(line) - content
margin = min(margin, indent)
# Remove indentation.
if lines:
lines[0] = lines[0].lstrip()
if margin < sys.maxint:
for i in range(1, len(lines)): lines[i] = lines[i][margin:]
# Remove any trailing or leading blank lines.
while lines and not lines[-1]:
lines.pop()
while lines and not lines[0]:
lines.pop(0)
return string.join(lines, '\n')
def getfile(object):
"""Work out which source or compiled file an object was defined in."""
if ismodule(object):
if hasattr(object, '__file__'):
return object.__file__
raise TypeError('arg is a built-in module')
if isclass(object):
object = sys.modules.get(object.__module__)
if hasattr(object, '__file__'):
return object.__file__
raise TypeError('arg is a built-in class')
if ismethod(object):
object = object.im_func
if isfunction(object):
object = object.func_code
if istraceback(object):
object = object.tb_frame
if isframe(object):
object = object.f_code
if iscode(object):
return object.co_filename
raise TypeError('arg is not a module, class, method, '
'function, traceback, frame, or code object')
def getmoduleinfo(path):
"""Get the module name, suffix, mode, and module type for a given file."""
filename = os.path.basename(path)
suffixes = map(lambda (suffix, mode, mtype):
(-len(suffix), suffix, mode, mtype), imp.get_suffixes())
suffixes.sort() # try longest suffixes first, in case they overlap
for neglen, suffix, mode, mtype in suffixes:
if filename[neglen:] == suffix:
return filename[:neglen], suffix, mode, mtype
def getmodulename(path):
"""Return the module name for a given file, or None."""
info = getmoduleinfo(path)
if info: return info[0]
def getsourcefile(object):
"""Return the Python source file an object was defined in, if it exists."""
filename = getfile(object)
if string.lower(filename[-4:]) in ('.pyc', '.pyo'):
filename = filename[:-4] + '.py'
for suffix, mode, kind in imp.get_suffixes():
if 'b' in mode and string.lower(filename[-len(suffix):]) == suffix:
# Looks like a binary file. We want to only return a text file.
return None
if os.path.exists(filename):
return filename
# only return a non-existent filename if the module has a PEP 302 loader
if hasattr(getmodule(object, filename), '__loader__'):
return filename
def getabsfile(object, _filename=None):
"""Return an absolute path to the source or compiled file for an object.
The idea is for each object to have a unique origin, so this routine
normalizes the result as much as possible."""
if _filename is None:
_filename = getsourcefile(object) or getfile(object)
return os.path.normcase(os.path.abspath(_filename))
modulesbyfile = {}
_filesbymodname = {}
def getmodule(object, _filename=None):
"""Return the module an object was defined in, or None if not found."""
if ismodule(object):
return object
if hasattr(object, '__module__'):
return sys.modules.get(object.__module__)
# Try the filename to modulename cache
if _filename is not None and _filename in modulesbyfile:
return sys.modules.get(modulesbyfile[_filename])
# Try the cache again with the absolute file name
try:
file = getabsfile(object, _filename)
except TypeError:
return None
if file in modulesbyfile:
return sys.modules.get(modulesbyfile[file])
# Update the filename to module name cache and check yet again
# Copy sys.modules in order to cope with changes while iterating
for modname, module in sys.modules.items():
if ismodule(module) and hasattr(module, '__file__'):
f = module.__file__
if f == _filesbymodname.get(modname, None):
# Have already mapped this module, so skip it
continue
_filesbymodname[modname] = f
f = getabsfile(module)
# Always map to the name the module knows itself by
modulesbyfile[f] = modulesbyfile[
os.path.realpath(f)] = module.__name__
if file in modulesbyfile:
return sys.modules.get(modulesbyfile[file])
# Check the main module
main = sys.modules['__main__']
if not hasattr(object, '__name__'):
return None
if hasattr(main, object.__name__):
mainobject = getattr(main, object.__name__)
if mainobject is object:
return main
# Check builtins
builtin = sys.modules['__builtin__']
if hasattr(builtin, object.__name__):
builtinobject = getattr(builtin, object.__name__)
if builtinobject is object:
return builtin
def findsource(object):
"""Return the entire source file and starting line number for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a list of all the lines
in the file and the line number indexes a line in that list. An IOError
is raised if the source code cannot be retrieved."""
file = getsourcefile(object) or getfile(object)
module = getmodule(object, file)
if module:
lines = linecache.getlines(file, module.__dict__)
else:
lines = linecache.getlines(file)
if not lines:
raise IOError('could not get source code')
if ismodule(object):
return lines, 0
if isclass(object):
name = object.__name__
pat = re.compile(r'^(\s*)class\s*' + name + r'\b')
# make some effort to find the best matching class definition:
# use the one with the least indentation, which is the one
# that's most probably not inside a function definition.
candidates = []
for i in range(len(lines)):
match = pat.match(lines[i])
if match:
# if it's at toplevel, it's already the best one
if lines[i][0] == 'c':
return lines, i
# else add whitespace to candidate list
candidates.append((match.group(1), i))
if candidates:
# this will sort by whitespace, and by line number,
# less whitespace first
candidates.sort()
return lines, candidates[0][1]
else:
raise IOError('could not find class definition')
if ismethod(object):
object = object.im_func
if isfunction(object):
object = object.func_code
if istraceback(object):
object = object.tb_frame
if isframe(object):
object = object.f_code
if iscode(object):
if not hasattr(object, 'co_firstlineno'):
raise IOError('could not find function definition')
lnum = object.co_firstlineno - 1
pat = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)')
while lnum > 0:
if pat.match(lines[lnum]): break
lnum = lnum - 1
return lines, lnum
raise IOError('could not find code object')
def getcomments(object):
"""Get lines of comments immediately preceding an object's source code.
Returns None when source can't be found.
"""
try:
lines, lnum = findsource(object)
except (IOError, TypeError):
return None
if ismodule(object):
# Look for a comment block at the top of the file.
start = 0
if lines and lines[0][:2] == '#!': start = 1
while start < len(lines) and string.strip(lines[start]) in ('', '#'):
start = start + 1
if start < len(lines) and lines[start][:1] == '#':
comments = []
end = start
while end < len(lines) and lines[end][:1] == '#':
comments.append(string.expandtabs(lines[end]))
end = end + 1
return string.join(comments, '')
# Look for a preceding block of comments at the same indentation.
elif lnum > 0:
indent = indentsize(lines[lnum])
end = lnum - 1
if end >= 0 and string.lstrip(lines[end])[:1] == '#' and \
indentsize(lines[end]) == indent:
comments = [string.lstrip(string.expandtabs(lines[end]))]
if end > 0:
end = end - 1
comment = string.lstrip(string.expandtabs(lines[end]))
while comment[:1] == '#' and indentsize(lines[end]) == indent:
comments[:0] = [comment]
end = end - 1
if end < 0: break
comment = string.lstrip(string.expandtabs(lines[end]))
while comments and string.strip(comments[0]) == '#':
comments[:1] = []
while comments and string.strip(comments[-1]) == '#':
comments[-1:] = []
return string.join(comments, '')
class EndOfBlock(Exception): pass
class BlockFinder:
"""Provide a tokeneater() method to detect the end of a code block."""
def __init__(self):
self.indent = 0
self.islambda = False
self.started = False
self.passline = False
self.last = 1
def tokeneater(self, type, token, (srow, scol), (erow, ecol), line):
if not self.started:
# look for the first "def", "class" or "lambda"
if token in ("def", "class", "lambda"):
if token == "lambda":
self.islambda = True
self.started = True
self.passline = True # skip to the end of the line
elif type == tokenize.NEWLINE:
self.passline = False # stop skipping when a NEWLINE is seen
self.last = srow
if self.islambda: # lambdas always end at the first NEWLINE
raise EndOfBlock
elif self.passline:
pass
elif type == tokenize.INDENT:
self.indent = self.indent + 1
self.passline = True
elif type == tokenize.DEDENT:
self.indent = self.indent - 1
# the end of matching indent/dedent pairs end a block
# (note that this only works for "def"/"class" blocks,
# not e.g. for "if: else:" or "try: finally:" blocks)
if self.indent <= 0:
raise EndOfBlock
elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL):
# any other token on the same indentation level end the previous
# block as well, except the pseudo-tokens COMMENT and NL.
raise EndOfBlock
def getblock(lines):
"""Extract the block of code at the top of the given list of lines."""
blockfinder = BlockFinder()
try:
tokenize.tokenize(iter(lines).next, blockfinder.tokeneater)
except (EndOfBlock, IndentationError):
pass
return lines[:blockfinder.last]
def getsourcelines(object):
"""Return a list of source lines and starting line number for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a list of the lines
corresponding to the object and the line number indicates where in the
original source file the first line of code was found. An IOError is
raised if the source code cannot be retrieved."""
lines, lnum = findsource(object)
if ismodule(object): return lines, 0
else: return getblock(lines[lnum:]), lnum + 1
def getsource(object):
"""Return the text of the source code for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a single string. An
IOError is raised if the source code cannot be retrieved."""
lines, lnum = getsourcelines(object)
return string.join(lines, '')
# --------------------------------------------------- class tree extraction
def walktree(classes, children, parent):
"""Recursive helper function for getclasstree()."""
results = []
classes.sort(key=attrgetter('__module__', '__name__'))
for c in classes:
results.append((c, c.__bases__))
if c in children:
results.append(walktree(children[c], children, c))
return results
def getclasstree(classes, unique=0):
"""Arrange the given list of classes into a hierarchy of nested lists.
Where a nested list appears, it contains classes derived from the class
whose entry immediately precedes the list. Each entry is a 2-tuple
containing a class and a tuple of its base classes. If the 'unique'
argument is true, exactly one entry appears in the returned structure
for each class in the given list. Otherwise, classes using multiple
inheritance and their descendants will appear multiple times."""
children = {}
roots = []
for c in classes:
if c.__bases__:
for parent in c.__bases__:
if not parent in children:
children[parent] = []
children[parent].append(c)
if unique and parent in classes: break
elif c not in roots:
roots.append(c)
for parent in children:
if parent not in classes:
roots.append(parent)
return walktree(roots, children, None)
# ------------------------------------------------ argument list extraction
# These constants are from Python's compile.h.
CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS = 1, 2, 4, 8
def getargs(co):
"""Get information about the arguments accepted by a code object.
Three things are returned: (args, varargs, varkw), where 'args' is
a list of argument names (possibly containing nested lists), and
'varargs' and 'varkw' are the names of the * and ** arguments or None."""
if not iscode(co):
raise TypeError('arg is not a code object')
if not sys.platform.startswith('java'):
# Jython doesn't have co_code
code = co.co_code
nargs = co.co_argcount
names = co.co_varnames
args = list(names[:nargs])
step = 0
# The following acrobatics are for anonymous (tuple) arguments.
for i in range(nargs):
if args[i][:1] in ('', '.'):
stack, remain, count = [], [], []
while step < len(code):
op = ord(code[step])
step = step + 1
if op >= dis.HAVE_ARGUMENT:
opname = dis.opname[op]
value = ord(code[step]) + ord(code[step+1])*256
step = step + 2
if opname in ('UNPACK_TUPLE', 'UNPACK_SEQUENCE'):
remain.append(value)
count.append(value)
elif opname == 'STORE_FAST':
stack.append(names[value])
# Special case for sublists of length 1: def foo((bar))
# doesn't generate the UNPACK_TUPLE bytecode, so if
# `remain` is empty here, we have such a sublist.
if not remain:
stack[0] = [stack[0]]
break
else:
remain[-1] = remain[-1] - 1
while remain[-1] == 0:
remain.pop()
size = count.pop()
stack[-size:] = [stack[-size:]]
if not remain: break
remain[-1] = remain[-1] - 1
if not remain: break
args[i] = stack[0]
varargs = None
if co.co_flags & CO_VARARGS:
varargs = co.co_varnames[nargs]
nargs = nargs + 1
varkw = None
if co.co_flags & CO_VARKEYWORDS:
varkw = co.co_varnames[nargs]
return args, varargs, varkw
def getargspec(func):
"""Get the names and default values of a function's arguments.
A tuple of four things is returned: (args, varargs, varkw, defaults).
'args' is a list of the argument names (it may contain nested lists).
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'defaults' is an n-tuple of the default values of the last n arguments.
"""
if ismethod(func):
func = func.im_func
if not isfunction(func):
raise TypeError('arg is not a Python function')
args, varargs, varkw = getargs(func.func_code)
return args, varargs, varkw, func.func_defaults
def getargvalues(frame):
"""Get information about arguments passed into a particular frame.
A tuple of four things is returned: (args, varargs, varkw, locals).
'args' is a list of the argument names (it may contain nested lists).
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'locals' is the locals dictionary of the given frame."""
args, varargs, varkw = getargs(frame.f_code)
return args, varargs, varkw, frame.f_locals
def joinseq(seq):
if len(seq) == 1:
return '(' + seq[0] + ',)'
else:
return '(' + string.join(seq, ', ') + ')'
def strseq(object, convert, join=joinseq):
"""Recursively walk a sequence, stringifying each element."""
if type(object) in (list, tuple):
return join(map(lambda o, c=convert, j=join: strseq(o, c, j), object))
else:
return convert(object)
def formatargspec(args, varargs=None, varkw=None, defaults=None,
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value),
join=joinseq):
"""Format an argument spec from the 4 values returned by getargspec.
The first four arguments are (args, varargs, varkw, defaults). The
other four arguments are the corresponding optional formatting functions
that are called to turn names and values into strings. The ninth
argument is an optional function to format the sequence of arguments."""
specs = []
if defaults:
firstdefault = len(args) - len(defaults)
for i in range(len(args)):
spec = strseq(args[i], formatarg, join)
if defaults and i >= firstdefault:
spec = spec + formatvalue(defaults[i - firstdefault])
specs.append(spec)
if varargs is not None:
specs.append(formatvarargs(varargs))
if varkw is not None:
specs.append(formatvarkw(varkw))
return '(' + string.join(specs, ', ') + ')'
def formatargvalues(args, varargs, varkw, locals,
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value),
join=joinseq):
"""Format an argument spec from the 4 values returned by getargvalues.
The first four arguments are (args, varargs, varkw, locals). The
next four arguments are the corresponding optional formatting functions
that are called to turn names and values into strings. The ninth
argument is an optional function to format the sequence of arguments."""
def convert(name, locals=locals,
formatarg=formatarg, formatvalue=formatvalue):
return formatarg(name) + formatvalue(locals[name])
specs = []
for i in range(len(args)):
specs.append(strseq(args[i], convert, join))
if varargs:
specs.append(formatvarargs(varargs) + formatvalue(locals[varargs]))
if varkw:
specs.append(formatvarkw(varkw) + formatvalue(locals[varkw]))
return '(' + string.join(specs, ', ') + ')'
# -------------------------------------------------- stack frame extraction
def getframeinfo(frame, context=1):
"""Get information about a frame or traceback object.
A tuple of five things is returned: the filename, the line number of
the current line, the function name, a list of lines of context from
the source code, and the index of the current line within that list.
The optional second argument specifies the number of lines of context
to return, which are centered around the current line."""
if istraceback(frame):
lineno = frame.tb_lineno
frame = frame.tb_frame
else:
lineno = frame.f_lineno
if not isframe(frame):
raise TypeError('arg is not a frame or traceback object')
filename = getsourcefile(frame) or getfile(frame)
if context > 0:
start = lineno - 1 - context//2
try:
lines, lnum = findsource(frame)
except IOError:
lines = index = None
else:
start = max(start, 1)
start = max(0, min(start, len(lines) - context))
lines = lines[start:start+context]
index = lineno - 1 - start
else:
lines = index = None
return (filename, lineno, frame.f_code.co_name, lines, index)
def getlineno(frame):
"""Get the line number from a frame object, allowing for optimization."""
# FrameType.f_lineno is now a descriptor that grovels co_lnotab
return frame.f_lineno
def getouterframes(frame, context=1):
"""Get a list of records for a frame and all higher (calling) frames.
Each record contains a frame object, filename, line number, function
name, a list of lines of context, and index within the context."""
framelist = []
while frame:
framelist.append((frame,) + getframeinfo(frame, context))
frame = frame.f_back
return framelist
def getinnerframes(tb, context=1):
"""Get a list of records for a traceback's frame and all lower frames.
Each record contains a frame object, filename, line number, function
name, a list of lines of context, and index within the context."""
framelist = []
while tb:
framelist.append((tb.tb_frame,) + getframeinfo(tb, context))
tb = tb.tb_next
return framelist
currentframe = sys._getframe
def stack(context=1):
"""Return a list of records for the stack above the caller's frame."""
return getouterframes(sys._getframe(1), context)
def trace(context=1):
"""Return a list of records for the stack below the current exception."""
return getinnerframes(sys.exc_info()[2], context)
|
|
# coding: utf-8
"""
.. _U.S. 1976 Standard Atmosphere: http://ntrs.nasa.gov/search.jsp?R=19770009539
COESA model, based on the `U.S. 1976 Standard Atmosphere`_.
"""
from __future__ import absolute_import, division
import numpy as np
from scipy import constants, interpolate
from skaero.atmosphere import util
# Constants and values : the following parameters are extracted from Notes
# reference (mainly Chap. 1.2.). Naming is consistent. WARNING : Some of these
# values are not exactly consistent with the 2010 CODATA Recommended Values of
# the Fundamental Physical constants that you can find for example in the
# scipy.constants module
# gas constant
Rs = 8.31432 # N m / (mol K), WARNING : different from the the 2010 CODATA
# Recommended Values of the Fundamental Physical Constants
# set of geopotential heights from table 2 of Notes reference
H = np.array([0.0, 11.0, 20.0, 32.0, 47.0, 51.0, 71.00, 84.85205]) * 1e3 # m
# set of molecular-scale temperature gradients from table 2 of Notes reference
LM = np.array([-6.5, 0.0, 1.0, 2.8, 0.0, -2.8, -2.0, 0.0]) * 1e-3 # K / m
f_LM = interpolate.interp1d(H, LM, kind="zero")
# K, standard sea-level temperature
T_0 = 288.15 # K
# mean molecular-weight at sea-level
M_0 = 28.9644e-3 # kg / mol
# set of geopotential heights from table 8 of Notes reference
H2 = np.array(
[
0.0,
79005.7,
79493.3,
79980.8,
80468.2,
80955.7,
81443.0,
81930.2,
82417.3,
82904.4,
83391.4,
83878.4,
84365.2,
84852.05,
]
) # m
# set of molecular weight ratios from table 8 of Notes reference
M_o_M0 = np.array(
[
1.0,
1.0,
0.999996,
0.999989,
0.999971,
0.999941,
0.999909,
0.999870,
0.999829,
0.999786,
0.999741,
0.999694,
0.999641,
0.999579,
]
) # -
f_M_o_M0 = interpolate.interp1d(H2, M_o_M0)
# set of pressures and temperatures (initialization)
P = np.array([constants.atm]) # Pa
TM = np.array([T_0]) # K
for k in range(1, len(H)):
# from eq. [23] of Notes reference
TM = np.append(TM, TM[-1] + f_LM(H[k - 1]) * (H[k] - H[k - 1]))
if f_LM(H[k - 1]) == 0.0:
# from eq. [33b] of Notes reference
P = np.append(
P, P[-1] * np.exp(-constants.g * M_0 * (H[k] - H[k - 1]) / (Rs * TM[-2]))
)
else:
# from eq. [33a] of Notes reference
P = np.append(
P,
P[-1] * (TM[-2] / (TM[-1])) ** (constants.g * M_0 / (Rs * f_LM(H[k - 1]))),
)
f_TM = interpolate.interp1d(H, TM, kind="zero")
f_P = interpolate.interp1d(H, P, kind="zero")
f_H = interpolate.interp1d(H, H, kind="zero")
def table(x, kind="geopotential"):
"""Computes table of COESA atmosphere properties.
Returns temperature, pressure and density COESA values at the given
altitude.
Parameters
----------
x : array_like
Geopotential or geometric altitude (depending on kind) given in meters.
kind : str
Specifies the kind of interpolation as altitude x ('geopotential' or 'geometric'). Default is 'geopotential'
Returns
-------
h : array_like
Given geopotential altitude in meters.
T : array_like
Temperature in Kelvin.
p : array_like
Pressure in Pascal.
rho : array_like
Density in kilograms per cubic meter.
Note
----
Based on the U.S. 1976 Standard Atmosphere.
"""
# check the kind of altitude and raise an exception if necessary
if kind == "geopotential":
alt = x
elif kind == "geometric":
alt = util.geometric_to_geopotential(x)
else:
raise ValueError(
"%s is unsupported: Use either geopotential or " "geometric." % kind
)
h = np.asarray(alt)
# check if altitude is out of bound and raise an exception if necessary
if (h < H[0]).any() or (h > H[-1]).any():
raise ValueError(
"the given altitude x is out of bound, this module is "
"currently only valid for a geometric altitude between 0. and 86000. m"
)
# K, molecule-scale temperature from eq. [23] of Notes reference
tm = f_TM(h) + f_LM(h) * (h - f_H(h))
# K, absolute temperature from eq. [22] of Notes reference
T = tm * f_M_o_M0(h)
if h.shape: # if h is not a 0-d array (like a scalar)
# Pa, intialization of the pressure vector
p = np.zeros(len(h))
# points of h for which the molecular-scale temperature gradient is
# zero
zero_gradient = f_LM(h) == 0.0
# points of h for which the molecular-scale temperature gradient is not
# zero
not_zero_gradient = f_LM(h) != 0.0
# Pa, pressure from eq. [33b] of Notes reference
p[zero_gradient] = f_P(h[zero_gradient]) * np.exp(
-constants.g
* M_0
* (h[zero_gradient] - f_H(h[zero_gradient]))
/ (Rs * f_TM(h[zero_gradient]))
)
# Pa, pressure from eq. [33a] of Notes reference
p[not_zero_gradient] = f_P(h[not_zero_gradient]) * (
f_TM(h[not_zero_gradient])
/ (
f_TM(h[not_zero_gradient])
+ f_LM(h[not_zero_gradient])
* (h[not_zero_gradient] - f_H(h[not_zero_gradient]))
)
) ** (constants.g * M_0 / (Rs * f_LM(h[not_zero_gradient])))
else:
if f_LM(h) == 0:
# Pa, pressure from eq. [33b] of Notes reference
p = f_P(h) * np.exp(-constants.g * M_0 * (h - f_H(h)) / (Rs * f_TM(h)))
else:
# Pa, pressure from eq. [33a] of Notes reference
p = f_P(h) * (f_TM(h) / (f_TM(h) + f_LM(h) * (h - f_H(h)))) ** (
constants.g * M_0 / (Rs * f_LM(h))
)
rho = p * M_0 / (Rs * tm) # kg / m^3, mass density
return alt, T, p, rho
def temperature(x, kind="geopotential"):
"""Computes air temperature for a given altitude using the U.S. standard atmosphere model
Parameters
----------
x : array_like
Geopotential or geometric altitude (depending on kind) given in meters.
kind : str
Specifies the kind of interpolation as altitude x ('geopotential' or 'geometric'). Default is 'geopotential'
Returns
-------
T : array_like
Temperature in Kelvin.
Note
----
Based on the U.S. 1976 Standard Atmosphere.
"""
T = table(x, kind)[1]
return T
def pressure(x, kind="geopotential"):
"""Computes absolute pressure for a given altitude using the U.S. standard atmosphere model.
Parameters
----------
x : array_like
Geopotential or geometric altitude (depending on kind) given in meters.
kind : str
Specifies the kind of interpolation as altitude x ('geopotential' or 'geometric'). Default is 'geopotential'
Returns
-------
P : array_like
Pressure in Pascal.
Note
----
Based on the U.S. 1976 Standard Atmosphere.
"""
p = table(x, kind)[2]
return p
def density(x, kind="geopotential"):
"""Computes air mass density for a given altitude using the U.S. standar atmosphere model.
Parameters
----------
x : array_like
Geopotential or geometric altitude (depending on kind) given in meters.
kind : str
Specifies the kind of interpolation as altitude x ('geopotential' or 'geometric'). Default is 'geopotential'
Returns
-------
rho : array_like
Density in kilograms per cubic meter.
Note
----
Based on the U.S. 1976 Standard Atmosphere.
"""
rho = table(x, kind)[3]
return rho
|
|
"""cdbfunctions.py
Developer: Noelle Todd
Last Updated: August 30, 2014
This module consists of functions which will be called by the user
interface, in order to insert, delete, update, etc. data in the database.
This module is still in its early testing stages; many more functions will
be added or edited in the following weeks.
"""
import sqlalchemy
from sqlalchemy import Column, DateTime, String, Integer, ForeignKey, func
from sqlalchemy import desc
from sqlalchemy.orm import relationship, backref
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from datetime import datetime, timedelta, date
from cdbtabledef import Household, Person, Volunteer, Visit
class volunteerData:
"""This class is used for inserting/selecting a volunteer into/from
the database.
"""
def __init__(self, firstname, lastname, color, phone=None, active=True):
self.firstname = str(firstname)
self.lastname = str(lastname)
self.color = color
self.phone = str(phone)
self.active = active
class newClientData:
"""This class is used for inserting a new client into the
database.
"""
def __init__(self, firstname, lastname, dob, phone=None,
dateJoined=datetime.now()):
self.firstname = str(firstname)
self.lastname = str(lastname)
self.dob = dob
self.phone = str(phone)
self.dateJoined = dateJoined
class oldClientData:
"""This class is used for updating old clients and for
returning information for a client.
"""
def __init__(self, id, firstname, lastname, dob, phone=None,
dateJoined=datetime.now()):
self.id = id
self.firstname = str(firstname)
self.lastname = str(lastname)
self.dob = dob
self.age = age(dob)
self.phone = str(phone)
self.dateJoined = dateJoined
class houseData:
"""This class is used to hold data for inserting a household,
updating a household, or returning household information.
"""
def __init__(self, street, city='Troy', state='NY', zip='12180',
dateVerified=None, apt=None):
self.street = street
self.city = city
self.state = state
self.zip = zip
self.dateVerified = dateVerified
self.apt = apt
class visitData:
"""This class is used to hold data for inserting a visit
"""
def __init__(self, Vol_ID, visitDate=datetime.now(), notes=None):
self.Vol_ID = Vol_ID
self.visitDate = visitDate
self.notes = notes
class visitDataReturn:
"""This class is used for returning data for the list_visits function.
"""
def __init__(self, visitDate, clientname, volname, notes=None,
vid=None):
self.date = visitDate
self.visitor = clientname
self.volunteer = volname
self.notes = notes
self.visitID = vid
#functions for inserts
def insert_household(s, street, dateverified=None, Apt=None,
City='Troy', State='NY', Zip='12180'):
"""This function creates a new row to hold a household's data. It returns
the household id, which will be used when we insert household members.
"""
newhouse = Household(street_address = street, apt = Apt, city = City,
state = State, zip = Zip,
date_verified = dateverified)
s.add(newhouse)
s.commit()
#return newhouse.id
return newhouse
def insert_person(s, firstname, lastname, dob, newhouse,
datejoined=datetime.now(), phonenum=None):
"""This function creates a new row to hold an individual's data. There is
no return.
"""
newpers = Person(first_name=firstname, last_name=lastname, DOB=dob,
date_joined=datejoined, phone=phonenum)
newpers.HH_ID = newhouse
newpers.age = age(dob)
s.add(newpers)
s.commit()
#return newpers.id
return newpers
def insert_volunteer(s, firstname, lastname, phonenum=None, active=True, color='light blue'):
"""This function creates a new row in the Volunteer table, to hold
a volunteer's data.
"""
new_vol = Volunteer(first_name=firstname, last_name=lastname,
phone=phonenum, active=active, color=color)
s.add(new_vol)
s.commit()
def insert_visit(s, Vol_id, pers_id, house_id, date_of_visit=datetime.now(),
notes=None):
"""This function creates a new row in the Visits table to hold
the data for a visit.
"""
new_visit = Visit(I_ID=pers_id, HH_ID=house_id, Vol_ID=Vol_id,
date=date_of_visit, visit_notes=notes)
s.add(new_visit)
s.commit()
#functions for updating records
def update_household(s, HH_ID, street, city, state, zip, apt=None,
date_verified=None):
"""This function will update a households records
"""
house = s.query(Household).filter(Household.id == HH_ID).one()
house.street_address = street
house.city = city
house.state = state
house.zip = zip
house.apt = apt
house.date_verified = date_verified
s.commit()
def update_person(s, I_ID, firstname, lastname, dob, phonenum=None):
"""This function will update a person's records.
"""
pers = s.query(Person).filter(Person.id == I_ID).one()
pers.first_name = firstname
pers.last_name = lastname
pers.DOB = dob
pers.phone = phonenum
pers.age = age(dob)
s.commit()
def update_visit(s, vis_id, date_of_visit=datetime.now(),
notes=None):
"""This function will update a visit's record.
"""
visit = s.query(Visit).filter(Visit.id == vis_id).one()
visit.date = date_of_visit
visit.visit_notes = notes
s.commit()
def update_volunteer(s, vol_id, firstname, lastname, phonenum, active, color):
"""This function will update a volunteer's records.
"""
vol = s.query(Volunteer).filter(Volunteer.id == vol_id).one()
vol.first_name = firstname
vol.last_name = lastname
vol.phone = phonenum
vol.active = active
vol.color = color
s.commit()
#delete functions
def delete_household(s, HH_ID):
"""This function deletes a household record from the database.
"""
house = s.query(Household).filter(Household.id == HH_ID).one()
s.delete(house)
s.commit()
def delete_person(s, I_ID):
"""This function will delete an individual from the database.
"""
pers = s.query(Person).filter(Person.id == I_ID).one()
s.delete(pers)
s.commit()
def delete_volunteer(s, Vol_ID):
"""This function will delete a volunteer if the volunteer has
not participated in a visit. Else, it will "deactivate" the
volunteer.
"""
vol = s.query(Volunteer).filter(Volunteer.id == Vol_ID).one()
s.delete(vol)
s.commit()
def delete_visit(s, Vi_ID):
"""This function will delete a visit from the database.
"""
vis = s.query(Visit).filter(Visit.id == Vi_ID).one()
s.delete(vis)
s.commit()
#helper functions
def age(dob):
"""This function calculates a person's age using the dob input to it.
"""
timey = datetime.now()
if timey.month > dob.month:
return timey.year - dob.year
elif timey.month < dob.month:
return timey.year - dob.year - 1
else:
if timey.day >= dob.day:
return timey.year - dob.year
else:
return timey.year - dob.year - 1
def list_visits(s, I_ID):
"""This function will find the past visits for a household
and return them as a list of visitDataReturn objects.
"""
visits = []
pers = s.query(Person).filter(Person.id == I_ID).one()
house = s.query(Household).filter(Household.id == pers.HH_ID).one()
#returns all visits for the household in descending order of date
visithistory = s.query(Visit, Person, Volunteer).\
filter(Visit.HH_ID == house.id).\
filter(Visit.I_ID == Person.id).\
filter(Visit.Vol_ID == Volunteer.id).\
order_by(desc(Visit.date)).all()
#retrieves information for past three visits and returns in a list.
for instance in visithistory:
clientname = instance.Person.first_name + " " +\
instance.Person.last_name
volname = instance.Volunteer.first_name + " " +\
instance.Volunteer.last_name
visit = visitDataReturn(instance.Visit.date, clientname, volname,
notes=instance.Visit.visit_notes,
vid=instance.Visit.id)
visits.append(visit)
return visits
def get_age_breakdown(members):
"""This function will retrieve all the ages of the members, and return the
number of adults, seniors, children, and infants accordingly.
"""
infants = 0
children = 0
adults = 0
seniors = 0
for member in members:
if member.age < 2:
infants = infants + 1
elif member.age >= 2 and member.age < 18:
children = children + 1
elif member.age >= 18 and member.age < 65:
adults = adults + 1
else:
seniors = seniors + 1
total = infants + children + adults + seniors
agegroups = {'infants':infants, 'children':children, 'adults':adults,
'seniors':seniors, 'total':total}
return agegroups
def generate_report(s, duration):
"""This function will generate a csv/excel file that holds all
relevant info for a monthly report.
First name, Last name (of visitor only)
City of household
#of children, seniors, adults, and infants
total number of each of the above
total number of households
total number of people served
"""
import csv
#open file and so on
today = datetime.now()
filename = str(today.month)+ "-" + str(today.day) + "-" + str(today.year) +\
"-report.csv"
csvfile = open(filename, 'w', newline='')
outcsv = csv.writer(csvfile)
#calculate a month ago
today = datetime.now()
#duration = timedelta(days=31)
month_ago = today - duration
#convert date objects to strings for comparison purposes
month_ago = str(month_ago)
#one giant massive query
select = sqlalchemy.sql.select([Person.first_name, Person.last_name,
Household.seniors, Household.adults,
Household.children, Household.infants,
Household.city, Visit.date])\
.where(Visit.I_ID == Person.id)\
.where(Visit.HH_ID == Household.id)\
.where(Visit.date >= month_ago)
records = s.execute(select)
outcsv.writerow(records.keys())
outcsv.writerows(records)
csvfile.close()
s.close()
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" P1 tests for network offering
"""
#Import Local Modules
import marvin
from marvin.cloudstackTestCase import *
from marvin.cloudstackAPI import *
from integration.lib.utils import *
from integration.lib.base import *
from integration.lib.common import *
from marvin import remoteSSHClient
import datetime
class Services:
"""Test network offering Services
"""
def __init__(self):
self.services = {
"account": {
"email": "test@test.com",
"firstname": "Test",
"lastname": "User",
"username": "test",
# Random characters are appended for unique
# username
"password": "fr3sca",
},
"service_offering": {
"name": "Tiny Instance",
"displaytext": "Tiny Instance",
"cpunumber": 1,
"cpuspeed": 100, # in MHz
"memory": 64, # In MBs
},
"network_offering": {
"name": 'Network offering-VR services',
"displaytext": 'Network offering-VR services',
"guestiptype": 'Isolated',
"supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding,Vpn,Firewall,Lb,UserData,StaticNat',
"traffictype": 'GUEST',
"availability": 'Optional',
"serviceProviderList" : {
"Dhcp": 'VirtualRouter',
"Dns": 'VirtualRouter',
"SourceNat": 'VirtualRouter',
"PortForwarding": 'VirtualRouter',
"Vpn": 'VirtualRouter',
"Firewall": 'VirtualRouter',
"Lb": 'VirtualRouter',
"UserData": 'VirtualRouter',
"StaticNat": 'VirtualRouter',
},
},
"network_offering_netscaler": {
"name": 'Network offering-netscaler',
"displaytext": 'Network offering-netscaler',
"guestiptype": 'Isolated',
"supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding,Vpn,Firewall,Lb,UserData,StaticNat',
"traffictype": 'GUEST',
"availability": 'Optional',
"serviceProviderList" : {
"Dhcp": 'VirtualRouter',
"Dns": 'VirtualRouter',
"SourceNat": 'VirtualRouter',
"PortForwarding": 'VirtualRouter',
"Vpn": 'VirtualRouter',
"Firewall": 'VirtualRouter',
"Lb": 'Netscaler',
"UserData": 'VirtualRouter',
"StaticNat": 'VirtualRouter',
},
},
"network": {
"name": "Test Network",
"displaytext": "Test Network",
},
"lbrule": {
"name": "SSH",
"alg": "leastconn",
# Algorithm used for load balancing
"privateport": 22,
"publicport": 2222,
"openfirewall": False,
},
"lbrule_port_2221": {
"name": "SSH",
"alg": "leastconn",
# Algorithm used for load balancing
"privateport": 22,
"publicport": 2221,
"openfirewall": False,
},
"natrule": {
"privateport": 22,
"publicport": 22,
"protocol": "TCP"
},
"natrule_port_66": {
"privateport": 22,
"publicport": 66,
"protocol": "TCP"
},
"fw_rule":{
"startport": 1,
"endport": 6000,
"cidr": '55.55.0.0/11',
# Any network (For creating FW rule)
},
"virtual_machine": {
"displayname": "Test VM",
"username": "root",
"password": "password",
"ssh_port": 22,
"hypervisor": 'XenServer',
# Hypervisor type should be same as
# hypervisor type of cluster
"privateport": 22,
"publicport": 22,
"protocol": 'TCP',
},
"ostypeid": '9958b10f-9e5d-4ef1-908d-a047372d823b',
# Cent OS 5.3 (64 bit)
"sleep": 60,
"timeout": 10,
"mode":'advanced'
}
class TestNOVirtualRouter(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestNOVirtualRouter,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostypeid"]
)
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["virtual_machine"]["template"] = cls.template.id
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls._cleanup = [
cls.service_offering,
]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.account = Account.create(
self.apiclient,
self.services["account"],
admin=True,
domainid=self.domain.id
)
self.cleanup = []
return
def tearDown(self):
try:
self.account.delete(self.apiclient)
interval = list_configurations(
self.apiclient,
name='account.cleanup.interval'
)
# Sleep to ensure that all resources are deleted
time.sleep(int(interval[0].value) * 2)
#Clean up, terminate the created network offerings
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def test_01_network_off_without_conserve_mode(self):
"""Test Network offering with Conserve mode off and VR - All services
"""
# Validate the following
# 1. Create a Network from the above network offering and deploy a VM.
# 2. On source NAT ipaddress, we should NOT be allowed to add a
# LB rules
# 3. On source NAT ipaddress, we should be NOT be allowed to add
# PF rule
# 4. On an ipaddress that has PF rules, we should NOT be allowed to
# add a LB rules.
# 5. On an ipaddress that has Lb rules, we should NOT allow PF rules
# to be programmed.
# 6. We should be allowed to program multiple PF rules on the same Ip
# address on different public ports.
# 7. We should be allowed to program multiple LB rules on the same Ip
# address for different public port ranges.
# 8. On source NAT ipaddress, we should be allowed to Enable VPN.
# 9. On SOurce NAT ipaddress, we will be allowed to add firewall rule
# Create a network offering with all virtual router services enabled
self.debug(
"Creating n/w offering with all services in VR & conserve mode:off"
)
self.network_offering = NetworkOffering.create(
self.api_client,
self.services["network_offering"],
conservemode=False
)
self.cleanup.append(self.network_offering)
self.debug("Created n/w offering with ID: %s" %
self.network_offering.id)
# Enable Network offering
self.network_offering.update(self.apiclient, state='Enabled')
# Creating network using the network offering created
self.debug("Creating network with network offering: %s" %
self.network_offering.id)
self.network = Network.create(
self.apiclient,
self.services["network"],
accountid=self.account.account.name,
domainid=self.account.account.domainid,
networkofferingid=self.network_offering.id,
zoneid=self.zone.id
)
self.debug("Created network with ID: %s" % self.network.id)
self.debug("Deploying VM in account: %s" % self.account.account.name)
# Spawn an instance in that network
virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.account.name,
domainid=self.account.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(self.network.id)]
)
self.debug("Deployed VM in network: %s" % self.network.id)
src_nat_list = PublicIPAddress.list(
self.apiclient,
associatednetworkid=self.network.id,
account=self.account.account.name,
domainid=self.account.account.domainid,
listall=True,
issourcenat=True,
)
self.assertEqual(
isinstance(src_nat_list, list),
True,
"List Public IP should return a valid source NAT"
)
self.assertNotEqual(
len(src_nat_list),
0,
"Length of response from listPublicIp should not be 0"
)
src_nat = src_nat_list[0]
self.debug("Trying to create LB rule on source NAT IP: %s" %
src_nat.ipaddress)
# Create Load Balancer rule with source NAT
with self.assertRaises(Exception):
LoadBalancerRule.create(
self.apiclient,
self.services["lbrule"],
ipaddressid=src_nat.id,
accountid=self.account.account.name
)
self.debug(
"Trying to create a port forwarding rule in source NAT: %s" %
src_nat.ipaddress)
#Create NAT rule
with self.assertRaises(Exception):
NATRule.create(
self.apiclient,
virtual_machine,
self.services["natrule"],
ipaddressid=src_nat.id
)
self.debug("Associating public IP for network: %s" % self.network.id)
ip_with_nat_rule = PublicIPAddress.create(
self.apiclient,
accountid=self.account.account.name,
zoneid=self.zone.id,
domainid=self.account.account.domainid,
networkid=self.network.id
)
self.debug("Associated %s with network %s" % (
ip_with_nat_rule.ipaddress.ipaddress,
self.network.id
))
self.debug("Creating PF rule for IP address: %s" %
ip_with_nat_rule.ipaddress.ipaddress)
NATRule.create(
self.apiclient,
virtual_machine,
self.services["natrule"],
ipaddressid=ip_with_nat_rule.ipaddress.id
)
self.debug("Trying to create LB rule on IP with NAT: %s" %
ip_with_nat_rule.ipaddress.ipaddress)
# Create Load Balancer rule on IP already having NAT rule
with self.assertRaises(Exception):
LoadBalancerRule.create(
self.apiclient,
self.services["lbrule"],
ipaddressid=ip_with_nat_rule.ipaddress.id,
accountid=self.account.account.name
)
self.debug("Creating PF rule with public port: 66")
nat_rule = NATRule.create(
self.apiclient,
virtual_machine,
self.services["natrule_port_66"],
ipaddressid=ip_with_nat_rule.ipaddress.id
)
# Check if NAT rule created successfully
nat_rules = NATRule.list(
self.apiclient,
id=nat_rule.id
)
self.assertEqual(
isinstance(nat_rules, list),
True,
"List NAT rules should return valid list"
)
self.debug("Associating public IP for network: %s" % self.network.id)
ip_with_lb_rule = PublicIPAddress.create(
self.apiclient,
accountid=self.account.account.name,
zoneid=self.zone.id,
domainid=self.account.account.domainid,
networkid=self.network.id
)
self.debug("Associated %s with network %s" % (
ip_with_lb_rule.ipaddress.ipaddress,
self.network.id
))
self.debug("Creating LB rule for IP address: %s" %
ip_with_lb_rule.ipaddress.ipaddress)
LoadBalancerRule.create(
self.apiclient,
self.services["lbrule"],
ipaddressid=ip_with_lb_rule.ipaddress.id,
accountid=self.account.account.name
)
self.debug("Trying to create PF rule on IP with LB rule: %s" %
ip_with_nat_rule.ipaddress.ipaddress)
with self.assertRaises(Exception):
NATRule.create(
self.apiclient,
virtual_machine,
self.services["natrule"],
ipaddressid=ip_with_lb_rule.ipaddress.id
)
self.debug("Creating LB rule with public port: 2221")
lb_rule = LoadBalancerRule.create(
self.apiclient,
self.services["lbrule_port_2221"],
ipaddressid=ip_with_lb_rule.ipaddress.id,
accountid=self.account.account.name
)
# Check if NAT rule created successfully
lb_rules = LoadBalancerRule.list(
self.apiclient,
id=lb_rule.id
)
self.assertEqual(
isinstance(lb_rules, list),
True,
"List LB rules should return valid list"
)
self.debug("Creating firewall rule on source NAT: %s" %
src_nat.ipaddress)
#Create Firewall rule on source NAT
fw_rule = FireWallRule.create(
self.apiclient,
ipaddressid=src_nat.id,
protocol='TCP',
cidrlist=[self.services["fw_rule"]["cidr"]],
startport=self.services["fw_rule"]["startport"],
endport=self.services["fw_rule"]["endport"]
)
self.debug("Created firewall rule: %s" % fw_rule.id)
fw_rules = FireWallRule.list(
self.apiclient,
id=fw_rule.id
)
self.assertEqual(
isinstance(fw_rules, list),
True,
"List fw rules should return a valid firewall rules"
)
self.assertNotEqual(
len(fw_rules),
0,
"Length of fw rules response should not be zero"
)
return
def test_02_network_off_with_conserve_mode(self):
"""Test Network offering with Conserve mode ON and VR - All services
"""
# Validate the following
# 1. Create a Network from the above network offering and deploy a VM.
# 2. On source NAT ipaddress, we should be allowed to add a LB rules
# 3. On source NAT ipaddress, we should be allowed to add a PF rules
# 4. On source NAT ipaddress, we should be allowed to add a Firewall
# rules
# 5. On an ipaddress that has Lb rules, we should be allowed to
# program PF rules.
# 6. We should be allowed to program multiple PF rules on the same Ip
# address on different public ports.
# 7. We should be allowed to program multiple LB rules on the same Ip
# address for different public port ranges.
# 8. On source NAT ipaddress, we should be allowed to Enable VPN
# access.
# Create a network offering with all virtual router services enabled
self.debug(
"Creating n/w offering with all services in VR & conserve mode:off"
)
self.network_offering = NetworkOffering.create(
self.api_client,
self.services["network_offering"],
conservemode=True
)
self.cleanup.append(self.network_offering)
self.debug("Created n/w offering with ID: %s" %
self.network_offering.id)
# Enable Network offering
self.network_offering.update(self.apiclient, state='Enabled')
# Creating network using the network offering created
self.debug("Creating network with network offering: %s" %
self.network_offering.id)
self.network = Network.create(
self.apiclient,
self.services["network"],
accountid=self.account.account.name,
domainid=self.account.account.domainid,
networkofferingid=self.network_offering.id,
zoneid=self.zone.id
)
self.debug("Created network with ID: %s" % self.network.id)
self.debug("Deploying VM in account: %s" % self.account.account.name)
# Spawn an instance in that network
virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.account.name,
domainid=self.account.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(self.network.id)]
)
self.debug("Deployed VM in network: %s" % self.network.id)
src_nat_list = PublicIPAddress.list(
self.apiclient,
associatednetworkid=self.network.id,
account=self.account.account.name,
domainid=self.account.account.domainid,
listall=True,
issourcenat=True,
)
self.assertEqual(
isinstance(src_nat_list, list),
True,
"List Public IP should return a valid source NAT"
)
self.assertNotEqual(
len(src_nat_list),
0,
"Length of response from listPublicIp should not be 0"
)
src_nat = src_nat_list[0]
self.debug("Trying to create LB rule on source NAT IP: %s" %
src_nat.ipaddress)
# Create Load Balancer rule with source NAT
lb_rule = LoadBalancerRule.create(
self.apiclient,
self.services["lbrule"],
ipaddressid=src_nat.id,
accountid=self.account.account.name
)
self.debug("Created LB rule on source NAT: %s" % src_nat.ipaddress)
lb_rules = LoadBalancerRule.list(
self.apiclient,
id=lb_rule.id
)
self.assertEqual(
isinstance(lb_rules, list),
True,
"List lb rules should return a valid lb rules"
)
self.assertNotEqual(
len(lb_rules),
0,
"Length of response from listLbRules should not be 0"
)
self.debug(
"Trying to create a port forwarding rule in source NAT: %s" %
src_nat.ipaddress)
#Create NAT rule
nat_rule = NATRule.create(
self.apiclient,
virtual_machine,
self.services["natrule"],
ipaddressid=src_nat.id
)
self.debug("Created PF rule on source NAT: %s" % src_nat.ipaddress)
nat_rules = NATRule.list(
self.apiclient,
id=nat_rule.id
)
self.assertEqual(
isinstance(nat_rules, list),
True,
"List NAT should return a valid port forwarding rules"
)
self.assertNotEqual(
len(nat_rules),
0,
"Length of response from listLbRules should not be 0"
)
self.debug("Creating firewall rule on source NAT: %s" %
src_nat.ipaddress)
#Create Firewall rule on source NAT
fw_rule = FireWallRule.create(
self.apiclient,
ipaddressid=src_nat.id,
protocol='TCP',
cidrlist=[self.services["fw_rule"]["cidr"]],
startport=self.services["fw_rule"]["startport"],
endport=self.services["fw_rule"]["endport"]
)
self.debug("Created firewall rule: %s" % fw_rule.id)
fw_rules = FireWallRule.list(
self.apiclient,
id=fw_rule.id
)
self.assertEqual(
isinstance(fw_rules, list),
True,
"List fw rules should return a valid firewall rules"
)
self.assertNotEqual(
len(fw_rules),
0,
"Length of fw rules response should not be zero"
)
self.debug("Associating public IP for network: %s" % self.network.id)
public_ip = PublicIPAddress.create(
self.apiclient,
accountid=self.account.account.name,
zoneid=self.zone.id,
domainid=self.account.account.domainid,
networkid=self.network.id
)
self.debug("Associated %s with network %s" % (
public_ip.ipaddress.ipaddress,
self.network.id
))
self.debug("Creating PF rule for IP address: %s" %
public_ip.ipaddress.ipaddress)
NATRule.create(
self.apiclient,
virtual_machine,
self.services["natrule"],
ipaddressid=public_ip.ipaddress.id
)
self.debug("Trying to create LB rule on IP with NAT: %s" %
public_ip.ipaddress.ipaddress)
# Create Load Balancer rule on IP already having NAT rule
lb_rule = LoadBalancerRule.create(
self.apiclient,
self.services["lbrule"],
ipaddressid=public_ip.ipaddress.id,
accountid=self.account.account.name
)
self.debug("Creating PF rule with public port: 66")
nat_rule = NATRule.create(
self.apiclient,
virtual_machine,
self.services["natrule_port_66"],
ipaddressid=public_ip.ipaddress.id
)
# Check if NAT rule created successfully
nat_rules = NATRule.list(
self.apiclient,
id=nat_rule.id
)
self.assertEqual(
isinstance(nat_rules, list),
True,
"List NAT rules should return valid list"
)
self.debug("Creating LB rule with public port: 2221")
lb_rule = LoadBalancerRule.create(
self.apiclient,
self.services["lbrule_port_2221"],
ipaddressid=public_ip.ipaddress.id,
accountid=self.account.account.name
)
# Check if NAT rule created successfully
lb_rules = LoadBalancerRule.list(
self.apiclient,
id=lb_rule.id
)
self.assertEqual(
isinstance(lb_rules, list),
True,
"List LB rules should return valid list"
)
# User should be able to enable VPN on source NAT
self.debug("Created VPN with source NAT IP: %s" % src_nat.ipaddress)
# Assign VPN to source NAT
vpn = Vpn.create(
self.apiclient,
src_nat.id,
account=self.account.account.name,
domainid=self.account.account.domainid
)
vpns = Vpn.list(
self.apiclient,
publicipid=src_nat.id,
listall=True,
)
self.assertEqual(
isinstance(vpns, list),
True,
"List VPNs should return a valid VPN list"
)
self.assertNotEqual(
len(vpns),
0,
"Length of list VPN response should not be zero"
)
return
class TestNOWithNetscaler(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestNOWithNetscaler,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostypeid"]
)
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["virtual_machine"]["template"] = cls.template.id
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls._cleanup = [
cls.service_offering,
]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.account = Account.create(
self.apiclient,
self.services["account"],
admin=True,
domainid=self.domain.id
)
self.cleanup = []
return
def tearDown(self):
try:
self.account.delete(self.apiclient)
interval = list_configurations(
self.apiclient,
name='account.cleanup.interval'
)
# Sleep to ensure that all resources are deleted
time.sleep(int(interval[0].value) * 2)
#Clean up, terminate the created network offerings
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def test_01_network_off_without_conserve_mode(self):
"""Test Nw off with Conserve mode off, VR-All services, LB-netscaler
"""
# Validate the following
# 1. Create a Network from the above network offering and deploy a VM.
# 2. On source NAT ipaddress, we should NOT be allowed to add LB rule
# 3. On source NAT ipaddress, we should NOT be allowed to add PF rule
# 4. On an ipaddress that has PF rules, we should NOT be allowed to
# add a LB rules.
# 5. On an ipaddress that has Lb rules , we should NOT allow firewall
# rules to be programmed.
# 6. On an ipaddress that has Lb rules , we should NOT allow PF rules
# to be programmed.
# 7. We should be allowed to program multiple PF rules on the same Ip
# address on different public ports.
# 8. We should be allowed to program multiple LB rules on the same Ip
# address for different public port ranges.
# 9. On source NAT ipaddress, we should NOT be allowed to Enable VPN.
# Create a network offering with all virtual router services enabled
self.debug(
"Creating n/w offering with all services in VR & conserve mode:ON"
)
self.network_offering = NetworkOffering.create(
self.api_client,
self.services["network_offering_netscaler"],
conservemode=False
)
self.cleanup.append(self.network_offering)
self.debug("Created n/w offering with ID: %s" %
self.network_offering.id)
# Enable Network offering
self.network_offering.update(self.apiclient, state='Enabled')
# Creating network using the network offering created
self.debug("Creating network with network offering: %s" %
self.network_offering.id)
self.network = Network.create(
self.apiclient,
self.services["network"],
accountid=self.account.account.name,
domainid=self.account.account.domainid,
networkofferingid=self.network_offering.id,
zoneid=self.zone.id
)
self.debug("Created network with ID: %s" % self.network.id)
self.debug("Deploying VM in account: %s" % self.account.account.name)
# Spawn an instance in that network
virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.account.name,
domainid=self.account.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(self.network.id)]
)
self.debug("Deployed VM in network: %s" % self.network.id)
src_nat_list = PublicIPAddress.list(
self.apiclient,
associatednetworkid=self.network.id,
account=self.account.account.name,
domainid=self.account.account.domainid,
listall=True,
issourcenat=True,
)
self.assertEqual(
isinstance(src_nat_list, list),
True,
"List Public IP should return a valid source NAT"
)
self.assertNotEqual(
len(src_nat_list),
0,
"Length of response from listPublicIp should not be 0"
)
src_nat = src_nat_list[0]
self.debug("Trying to create LB rule on source NAT IP: %s" %
src_nat.ipaddress)
# Create Load Balancer rule with source NAT
with self.assertRaises(Exception):
LoadBalancerRule.create(
self.apiclient,
self.services["lbrule"],
ipaddressid=src_nat.id,
accountid=self.account.account.name
)
self.debug(
"Trying to create a port forwarding rule in source NAT: %s" %
src_nat.ipaddress)
#Create NAT rule
with self.assertRaises(Exception):
NATRule.create(
self.apiclient,
virtual_machine,
self.services["natrule"],
ipaddressid=src_nat.id
)
self.debug("Creating firewall rule on source NAT: %s" %
src_nat.ipaddress)
#Create Firewall rule on source NAT
fw_rule = FireWallRule.create(
self.apiclient,
ipaddressid=src_nat.id,
protocol='TCP',
cidrlist=[self.services["fw_rule"]["cidr"]],
startport=self.services["fw_rule"]["startport"],
endport=self.services["fw_rule"]["endport"]
)
self.debug("Created firewall rule: %s" % fw_rule.id)
fw_rules = FireWallRule.list(
self.apiclient,
id=fw_rule.id
)
self.assertEqual(
isinstance(fw_rules, list),
True,
"List fw rules should return a valid firewall rules"
)
self.assertNotEqual(
len(fw_rules),
0,
"Length of fw rules response should not be zero"
)
self.debug("Associating public IP for network: %s" % self.network.id)
ip_with_nat_rule = PublicIPAddress.create(
self.apiclient,
accountid=self.account.account.name,
zoneid=self.zone.id,
domainid=self.account.account.domainid,
networkid=self.network.id
)
self.debug("Associated %s with network %s" % (
ip_with_nat_rule.ipaddress.ipaddress,
self.network.id
))
self.debug("Creating PF rule for IP address: %s" %
ip_with_nat_rule.ipaddress.ipaddress)
NATRule.create(
self.apiclient,
virtual_machine,
self.services["natrule"],
ipaddressid=ip_with_nat_rule.ipaddress.id
)
self.debug("Trying to create LB rule on IP with NAT: %s" %
ip_with_nat_rule.ipaddress.ipaddress)
# Create Load Balancer rule on IP already having NAT rule
with self.assertRaises(Exception):
LoadBalancerRule.create(
self.apiclient,
self.services["lbrule"],
ipaddressid=ip_with_nat_rule.ipaddress.id,
accountid=self.account.account.name
)
self.debug("Creating PF rule with public port: 66")
nat_rule = NATRule.create(
self.apiclient,
virtual_machine,
self.services["natrule_port_66"],
ipaddressid=ip_with_nat_rule.ipaddress.id
)
# Check if NAT rule created successfully
nat_rules = NATRule.list(
self.apiclient,
id=nat_rule.id
)
self.assertEqual(
isinstance(nat_rules, list),
True,
"List NAT rules should return valid list"
)
self.debug("Associating public IP for network: %s" % self.network.id)
ip_with_lb_rule = PublicIPAddress.create(
self.apiclient,
accountid=self.account.account.name,
zoneid=self.zone.id,
domainid=self.account.account.domainid,
networkid=self.network.id
)
self.debug("Associated %s with network %s" % (
ip_with_lb_rule.ipaddress.ipaddress,
self.network.id
))
self.debug("Creating LB rule for IP address: %s" %
ip_with_lb_rule.ipaddress.ipaddress)
LoadBalancerRule.create(
self.apiclient,
self.services["lbrule"],
ipaddressid=ip_with_lb_rule.ipaddress.id,
accountid=self.account.account.name,
networkid=self.network.id
)
self.debug("Trying to create PF rule on IP with LB rule: %s" %
ip_with_nat_rule.ipaddress.ipaddress)
with self.assertRaises(Exception):
NATRule.create(
self.apiclient,
virtual_machine,
self.services["natrule"],
ipaddressid=ip_with_lb_rule.ipaddress.id
)
self.debug("Trying to create FW rule on IP with LB rule")
with self.assertRaises(Exception):
FireWallRule.create(
self.apiclient,
ipaddressid=src_nat.id,
protocol='TCP',
cidrlist=[self.services["fw_rule"]["cidr"]],
startport=self.services["fw_rule"]["startport"],
endport=self.services["fw_rule"]["endport"]
)
self.debug("Creating LB rule with public port: 2221")
lb_rule = LoadBalancerRule.create(
self.apiclient,
self.services["lbrule_port_2221"],
ipaddressid=ip_with_lb_rule.ipaddress.id,
accountid=self.account.account.name,
networkid=self.network.id
)
# Check if NAT rule created successfully
lb_rules = LoadBalancerRule.list(
self.apiclient,
id=lb_rule.id
)
self.assertEqual(
isinstance(lb_rules, list),
True,
"List LB rules should return valid list"
)
# User should be able to enable VPN on source NAT
self.debug("Enabling VPN on source NAT IP: %s" % src_nat.ipaddress)
# Assign VPN to source NAT
with self.assertRaises(Exception):
Vpn.create(
self.apiclient,
src_nat.id,
account=self.account.account.name,
domainid=self.account.account.domainid
)
return
def test_02_network_off_with_conserve_mode_netscaler(self):
"""Test NW off with Conserve mode ON, LB-Netscaler and VR-All services
"""
# Validate the following
# 1. Create a Network from the above network offering and deploy a VM.
# 2. On source NAT ipaddress, we should NOT be allowed to add LB rule
# 3. On source NAT ipaddress, we should be allowed to add PF rule and
# Fierwall rules.
# 4. On an ipaddress that has PF rules, we should NOT be allowed to
# add a LB rules.
# 5. On an ipaddress that has Lb rules , we should NOT allow firewall
# rules to be programmed.
# 6. On an ipaddress that has Lb rules , we should NOT allow PF rules
# to be programmed.
# 7. We should be allowed to program multiple PF rules on the same Ip
# address on different public ports.
# 8. We should be allowed to program multiple LB rules on the same Ip
# address for different public port ranges.
# 9. On source NAT ipaddress, we should be allowed to Enable VPN.
# Create a network offering with all virtual router services enabled
self.debug(
"Creating n/w offering with all services in VR & conserve mode:ON"
)
self.network_offering = NetworkOffering.create(
self.api_client,
self.services["network_offering_netscaler"],
conservemode=True
)
self.cleanup.append(self.network_offering)
self.debug("Created n/w offering with ID: %s" %
self.network_offering.id)
# Enable Network offering
self.network_offering.update(self.apiclient, state='Enabled')
# Creating network using the network offering created
self.debug("Creating network with network offering: %s" %
self.network_offering.id)
self.network = Network.create(
self.apiclient,
self.services["network"],
accountid=self.account.account.name,
domainid=self.account.account.domainid,
networkofferingid=self.network_offering.id,
zoneid=self.zone.id
)
self.debug("Created network with ID: %s" % self.network.id)
self.debug("Deploying VM in account: %s" % self.account.account.name)
# Spawn an instance in that network
virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.account.name,
domainid=self.account.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(self.network.id)]
)
self.debug("Deployed VM in network: %s" % self.network.id)
src_nat_list = PublicIPAddress.list(
self.apiclient,
associatednetworkid=self.network.id,
account=self.account.account.name,
domainid=self.account.account.domainid,
listall=True,
issourcenat=True,
)
self.assertEqual(
isinstance(src_nat_list, list),
True,
"List Public IP should return a valid source NAT"
)
self.assertNotEqual(
len(src_nat_list),
0,
"Length of response from listPublicIp should not be 0"
)
src_nat = src_nat_list[0]
self.debug("Trying to create LB rule on source NAT IP: %s" %
src_nat.ipaddress)
# Create Load Balancer rule with source NAT
with self.assertRaises(Exception):
LoadBalancerRule.create(
self.apiclient,
self.services["lbrule"],
ipaddressid=src_nat.id,
accountid=self.account.account.name
)
self.debug(
"Trying to create a port forwarding rule in source NAT: %s" %
src_nat.ipaddress)
#Create NAT rule
nat_rule = NATRule.create(
self.apiclient,
virtual_machine,
self.services["natrule"],
ipaddressid=src_nat.id
)
self.debug("Created PF rule on source NAT: %s" % src_nat.ipaddress)
nat_rules = NATRule.list(
self.apiclient,
id=nat_rule.id
)
self.assertEqual(
isinstance(nat_rules, list),
True,
"List NAT should return a valid port forwarding rules"
)
self.assertNotEqual(
len(nat_rules),
0,
"Length of response from listLbRules should not be 0"
)
self.debug("Creating firewall rule on source NAT: %s" %
src_nat.ipaddress)
#Create Firewall rule on source NAT
fw_rule = FireWallRule.create(
self.apiclient,
ipaddressid=src_nat.id,
protocol='TCP',
cidrlist=[self.services["fw_rule"]["cidr"]],
startport=self.services["fw_rule"]["startport"],
endport=self.services["fw_rule"]["endport"]
)
self.debug("Created firewall rule: %s" % fw_rule.id)
fw_rules = FireWallRule.list(
self.apiclient,
id=fw_rule.id
)
self.assertEqual(
isinstance(fw_rules, list),
True,
"List fw rules should return a valid firewall rules"
)
self.assertNotEqual(
len(fw_rules),
0,
"Length of fw rules response should not be zero"
)
self.debug("Associating public IP for network: %s" % self.network.id)
ip_with_nat_rule = PublicIPAddress.create(
self.apiclient,
accountid=self.account.account.name,
zoneid=self.zone.id,
domainid=self.account.account.domainid,
networkid=self.network.id
)
self.debug("Associated %s with network %s" % (
ip_with_nat_rule.ipaddress.ipaddress,
self.network.id
))
self.debug("Creating PF rule for IP address: %s" %
ip_with_nat_rule.ipaddress.ipaddress)
NATRule.create(
self.apiclient,
virtual_machine,
self.services["natrule"],
ipaddressid=ip_with_nat_rule.ipaddress.id
)
self.debug("Trying to create LB rule on IP with NAT: %s" %
ip_with_nat_rule.ipaddress.ipaddress)
# Create Load Balancer rule on IP already having NAT rule
with self.assertRaises(Exception):
LoadBalancerRule.create(
self.apiclient,
self.services["lbrule"],
ipaddressid=ip_with_nat_rule.ipaddress.id,
accountid=self.account.account.name
)
self.debug("Creating PF rule with public port: 66")
nat_rule = NATRule.create(
self.apiclient,
virtual_machine,
self.services["natrule_port_66"],
ipaddressid=ip_with_nat_rule.ipaddress.id
)
# Check if NAT rule created successfully
nat_rules = NATRule.list(
self.apiclient,
id=nat_rule.id
)
self.assertEqual(
isinstance(nat_rules, list),
True,
"List NAT rules should return valid list"
)
self.debug("Associating public IP for network: %s" % self.network.id)
ip_with_lb_rule = PublicIPAddress.create(
self.apiclient,
accountid=self.account.account.name,
zoneid=self.zone.id,
domainid=self.account.account.domainid,
networkid=self.network.id
)
self.debug("Associated %s with network %s" % (
ip_with_lb_rule.ipaddress.ipaddress,
self.network.id
))
self.debug("Creating LB rule for IP address: %s" %
ip_with_lb_rule.ipaddress.ipaddress)
LoadBalancerRule.create(
self.apiclient,
self.services["lbrule"],
ipaddressid=ip_with_lb_rule.ipaddress.id,
accountid=self.account.account.name,
networkid=self.network.id
)
self.debug("Trying to create PF rule on IP with LB rule: %s" %
ip_with_nat_rule.ipaddress.ipaddress)
with self.assertRaises(Exception):
NATRule.create(
self.apiclient,
virtual_machine,
self.services["natrule"],
ipaddressid=ip_with_lb_rule.ipaddress.id
)
self.debug("Trying to create FW rule on IP with LB rule")
with self.assertRaises(Exception):
FireWallRule.create(
self.apiclient,
ipaddressid=src_nat.id,
protocol='TCP',
cidrlist=[self.services["fw_rule"]["cidr"]],
startport=self.services["fw_rule"]["startport"],
endport=self.services["fw_rule"]["endport"]
)
self.debug("Creating LB rule with public port: 2221")
lb_rule = LoadBalancerRule.create(
self.apiclient,
self.services["lbrule_port_2221"],
ipaddressid=ip_with_lb_rule.ipaddress.id,
accountid=self.account.account.name,
networkid=self.network.id
)
# Check if NAT rule created successfully
lb_rules = LoadBalancerRule.list(
self.apiclient,
id=lb_rule.id
)
self.assertEqual(
isinstance(lb_rules, list),
True,
"List LB rules should return valid list"
)
# User should be able to enable VPN on source NAT
self.debug("Created VPN with source NAT IP: %s" % src_nat.ipaddress)
# Assign VPN to source NAT
vpn = Vpn.create(
self.apiclient,
src_nat.id,
account=self.account.account.name,
domainid=self.account.account.domainid
)
vpns = Vpn.list(
self.apiclient,
publicipid=src_nat.id,
listall=True,
)
self.assertEqual(
isinstance(vpns, list),
True,
"List VPNs should return a valid VPN list"
)
self.assertNotEqual(
len(vpns),
0,
"Length of list VNP response should not be zero"
)
return
class TestNetworkUpgrade(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestNetworkUpgrade,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostypeid"]
)
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["virtual_machine"]["template"] = cls.template.id
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls.network_offering = NetworkOffering.create(
cls.api_client,
cls.services["network_offering"],
conservemode=True
)
# Enable Network offering
cls.network_offering.update(cls.api_client, state='Enabled')
cls._cleanup = [
cls.service_offering,
cls.network_offering
]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.account = Account.create(
self.apiclient,
self.services["account"],
admin=True,
domainid=self.domain.id
)
self.cleanup = []
return
def tearDown(self):
try:
self.account.delete(self.apiclient)
interval = list_configurations(
self.apiclient,
name='account.cleanup.interval'
)
# Sleep to ensure that all resources are deleted
time.sleep(int(interval[0].value) * 2)
#Clean up, terminate the created network offerings
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def test_01_nwupgrade_netscaler_conserve_on(self):
"""Test Nw upgrade to netscaler lb service and conserve mode ON
"""
# Validate the following
# 1. Upgrade a network with VR and conserve mode ON TO
# A network that has Lb provided by "Netscaler" and all other
# services provided by "VR" and Conserve mode ON
# 2. Have PF and LB rules on the same ip address. Upgrade network
# should fail.
# 3. Have SourceNat,PF and VPN on the same IP address. Upgrade of
# network should succeed.
# Creating network using the network offering created
self.debug("Creating network with network offering: %s" %
self.network_offering.id)
self.network = Network.create(
self.apiclient,
self.services["network"],
accountid=self.account.account.name,
domainid=self.account.account.domainid,
networkofferingid=self.network_offering.id,
zoneid=self.zone.id
)
self.debug("Created network with ID: %s" % self.network.id)
self.debug("Deploying VM in account: %s" % self.account.account.name)
# Spawn an instance in that network
virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.account.name,
domainid=self.account.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(self.network.id)]
)
self.debug("Deployed VM in network: %s" % self.network.id)
src_nat_list = PublicIPAddress.list(
self.apiclient,
associatednetworkid=self.network.id,
account=self.account.account.name,
domainid=self.account.account.domainid,
listall=True,
issourcenat=True,
)
self.assertEqual(
isinstance(src_nat_list, list),
True,
"List Public IP should return a valid source NAT"
)
self.assertNotEqual(
len(src_nat_list),
0,
"Length of response from listPublicIp should not be 0"
)
src_nat = src_nat_list[0]
self.debug("Trying to create LB rule on source NAT IP: %s" %
src_nat.ipaddress)
# Create Load Balancer rule with source NAT
lb_rule = LoadBalancerRule.create(
self.apiclient,
self.services["lbrule"],
ipaddressid=src_nat.id,
accountid=self.account.account.name
)
self.debug("Created LB rule on source NAT: %s" % src_nat.ipaddress)
lb_rules = LoadBalancerRule.list(
self.apiclient,
id=lb_rule.id
)
self.assertEqual(
isinstance(lb_rules, list),
True,
"List lb rules should return a valid lb rules"
)
self.assertNotEqual(
len(lb_rules),
0,
"Length of response from listLbRules should not be 0"
)
self.debug(
"Trying to create a port forwarding rule in source NAT: %s" %
src_nat.ipaddress)
#Create NAT rule
nat_rule = NATRule.create(
self.apiclient,
virtual_machine,
self.services["natrule"],
ipaddressid=src_nat.id
)
self.debug("Created PF rule on source NAT: %s" % src_nat.ipaddress)
nat_rules = NATRule.list(
self.apiclient,
id=nat_rule.id
)
self.assertEqual(
isinstance(nat_rules, list),
True,
"List NAT should return a valid port forwarding rules"
)
self.assertNotEqual(
len(nat_rules),
0,
"Length of response from listLbRules should not be 0"
)
# Create a network offering with all virtual router services enabled
self.debug(
"Creating n/w offering with all services in VR & conserve mode:ON LB- Netscaler"
)
ns_lb_offering = NetworkOffering.create(
self.api_client,
self.services["network_offering_netscaler"],
conservemode=True
)
self.cleanup.append(ns_lb_offering)
ns_lb_offering.update(self.apiclient, state='Enabled')
#Stop all the VMs associated with network to update cidr
self.debug("Stopping the VM: %s" % virtual_machine.name)
virtual_machine.stop(self.apiclient)
self.debug("Updating network offering for network: %s" %
self.network.id)
with self.assertRaises(Exception):
self.network.update(
self.apiclient,
networkofferingid=ns_lb_offering.id,
changecidr=True
)
self.debug("Network upgrade failed!")
self.debug("Deleting LB Rule: %s" % lb_rule.id)
lb_rule.delete(self.apiclient)
self.debug("LB rule deleted")
# Assign VPN to source NAT
self.debug("Enabling VPN on source NAT")
vpn = Vpn.create(
self.apiclient,
src_nat.id,
account=self.account.account.name,
domainid=self.account.account.domainid
)
vpns = Vpn.list(
self.apiclient,
publicipid=src_nat.id,
listall=True,
)
self.assertEqual(
isinstance(vpns, list),
True,
"List VPNs should return a valid VPN list"
)
self.assertNotEqual(
len(vpns),
0,
"Length of list VPN response should not be zero"
)
self.debug("Upgrading the network: %s" % self.network.id)
self.network.update(
self.apiclient,
networkofferingid=ns_lb_offering.id,
changecidr=True
)
networks = Network.list(
self.apiclient,
id=self.network.id,
listall=True
)
self.assertEqual(
isinstance(networks, list),
True,
"List Networks should return a valid list for given network ID"
)
self.assertNotEqual(
len(networks),
0,
"Length of list networks should not be 0"
)
network = networks[0]
self.assertEqual(
network.networkofferingid,
ns_lb_offering.id,
"Network offering ID should match with new offering ID"
)
return
def test_02_nwupgrade_netscaler_conserve_off(self):
"""Test Nw upgrade to netscaler lb service and conserve mode OFF
"""
# Validate the following
# 1. Upgrade a network with VR and conserve mode ON TO
# A network that has Lb provided by "Netscaler" and all other
# services provided by "VR" and Conserve mode OFF
# 2. Have PF and LB rules on the same ip address. Upgrade network
# should fail.
# 3. Have SourceNat,PF and VPN on the same IP address. Upgrade of
# network should fail.
# Creating network using the network offering created
self.debug("Creating network with network offering: %s" %
self.network_offering.id)
self.network = Network.create(
self.apiclient,
self.services["network"],
accountid=self.account.account.name,
domainid=self.account.account.domainid,
networkofferingid=self.network_offering.id,
zoneid=self.zone.id
)
self.debug("Created network with ID: %s" % self.network.id)
self.debug("Deploying VM in account: %s" % self.account.account.name)
# Spawn an instance in that network
virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.account.name,
domainid=self.account.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(self.network.id)]
)
self.debug("Deployed VM in network: %s" % self.network.id)
src_nat_list = PublicIPAddress.list(
self.apiclient,
associatednetworkid=self.network.id,
account=self.account.account.name,
domainid=self.account.account.domainid,
listall=True,
issourcenat=True,
)
self.assertEqual(
isinstance(src_nat_list, list),
True,
"List Public IP should return a valid source NAT"
)
self.assertNotEqual(
len(src_nat_list),
0,
"Length of response from listPublicIp should not be 0"
)
src_nat = src_nat_list[0]
self.debug("Trying to create LB rule on source NAT IP: %s" %
src_nat.ipaddress)
# Create Load Balancer rule with source NAT
lb_rule = LoadBalancerRule.create(
self.apiclient,
self.services["lbrule"],
ipaddressid=src_nat.id,
accountid=self.account.account.name
)
self.debug("Created LB rule on source NAT: %s" % src_nat.ipaddress)
lb_rules = LoadBalancerRule.list(
self.apiclient,
id=lb_rule.id
)
self.assertEqual(
isinstance(lb_rules, list),
True,
"List lb rules should return a valid lb rules"
)
self.assertNotEqual(
len(lb_rules),
0,
"Length of response from listLbRules should not be 0"
)
self.debug(
"Trying to create a port forwarding rule in source NAT: %s" %
src_nat.ipaddress)
#Create NAT rule
nat_rule = NATRule.create(
self.apiclient,
virtual_machine,
self.services["natrule"],
ipaddressid=src_nat.id
)
self.debug("Created PF rule on source NAT: %s" % src_nat.ipaddress)
nat_rules = NATRule.list(
self.apiclient,
id=nat_rule.id
)
self.assertEqual(
isinstance(nat_rules, list),
True,
"List NAT should return a valid port forwarding rules"
)
self.assertNotEqual(
len(nat_rules),
0,
"Length of response from listLbRules should not be 0"
)
# Create a network offering with all virtual router services enabled
self.debug(
"Creating n/w offering with all services in VR & conserve mode:ON LB- Netscaler"
)
ns_lb_offering = NetworkOffering.create(
self.api_client,
self.services["network_offering_netscaler"],
conservemode=False
)
self.cleanup.append(ns_lb_offering)
ns_lb_offering.update(self.apiclient, state='Enabled')
#Stop all the VMs associated with network to update cidr
self.debug("Stopping the VM: %s" % virtual_machine.name)
virtual_machine.stop(self.apiclient)
self.debug("Updating network offering for network: %s" %
self.network.id)
with self.assertRaises(Exception):
self.network.update(
self.apiclient,
networkofferingid=ns_lb_offering.id,
changecidr=True
)
self.debug("Network upgrade failed!")
self.debug("Deleting LB Rule: %s" % lb_rule.id)
lb_rule.delete(self.apiclient)
self.debug("LB rule deleted")
# Assign VPN to source NAT
self.debug("Enabling VPN on source NAT")
vpn = Vpn.create(
self.apiclient,
src_nat.id,
account=self.account.account.name,
domainid=self.account.account.domainid
)
vpns = Vpn.list(
self.apiclient,
publicipid=src_nat.id,
listall=True,
)
self.assertEqual(
isinstance(vpns, list),
True,
"List VPNs should return a valid VPN list"
)
self.assertNotEqual(
len(vpns),
0,
"Length of list VPN response should not be zero"
)
self.debug("Upgrading the network: %s" % self.network.id)
with self.assertRaises(Exception):
self.network.update(
self.apiclient,
networkofferingid=ns_lb_offering.id,
changecidr=True
)
return
|
|
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the importmulti RPC.
Test importmulti by generating keys on node0, importing the scriptPubKeys and
addresses on node1 and then testing the address info for the different address
variants.
- `get_key()` and `get_multisig()` are called to generate keys on node0 and
return the privkeys, pubkeys and all variants of scriptPubKey and address.
- `test_importmulti()` is called to send an importmulti call to node1, test
success, and (if unsuccessful) test the error code and error message returned.
- `test_address()` is called to call getaddressinfo for an address on node1
and test the values returned."""
from test_framework.script import (
CScript,
OP_NOP,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.descriptors import descsum_create
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_raises_rpc_error,
)
from test_framework.wallet_util import (
get_key,
get_multisig,
test_address,
)
class ImportMultiTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [["-addresstype=legacy"], ["-addresstype=legacy"]]
self.setup_clean_chain = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
def test_importmulti(self, req, success, error_code=None, error_message=None, warnings=None):
"""Run importmulti and assert success"""
if warnings is None:
warnings = []
result = self.nodes[1].importmulti([req])
observed_warnings = []
if 'warnings' in result[0]:
observed_warnings = result[0]['warnings']
assert_equal("\n".join(sorted(warnings)), "\n".join(sorted(observed_warnings)))
assert_equal(result[0]['success'], success)
if error_code is not None:
assert_equal(result[0]['error']['code'], error_code)
assert_equal(result[0]['error']['message'], error_message)
def run_test(self):
self.log.info("Mining blocks...")
self.nodes[0].generate(1)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
node0_address1 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())
# Check only one address
assert_equal(node0_address1['ismine'], True)
# Node 1 sync test
assert_equal(self.nodes[1].getblockcount(), 1)
# Address Test - before import
address_info = self.nodes[1].getaddressinfo(node0_address1['address'])
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
# RPC importmulti -----------------------------------------------
# Bitcoin Address (implicit non-internal)
self.log.info("Should import an address")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
timestamp=timestamp,
ischange=False)
watchonly_address = key.p2pkh_addr
watchonly_timestamp = timestamp
self.log.info("Should not import an invalid address")
self.test_importmulti({"scriptPubKey": {"address": "not valid address"},
"timestamp": "now"},
success=False,
error_code=-5,
error_message='Invalid address \"not valid address\"')
# ScriptPubKey + internal
self.log.info("Should import a scriptPubKey with internal flag")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"internal": True},
success=True)
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
timestamp=timestamp,
ischange=True)
# ScriptPubKey + internal + label
self.log.info("Should not allow a label to be specified when internal is true")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"internal": True,
"label": "Example label"},
success=False,
error_code=-8,
error_message='Internal addresses should not have a label')
# Nonstandard scriptPubKey + !internal
self.log.info("Should not import a nonstandard scriptPubKey without internal flag")
nonstandardScriptPubKey = key.p2pkh_script + CScript([OP_NOP]).hex()
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": nonstandardScriptPubKey,
"timestamp": "now"},
success=False,
error_code=-8,
error_message='Internal must be set to true for nonstandard scriptPubKey imports.')
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=False,
timestamp=None)
# Address + Public key + !Internal(explicit)
self.log.info("Should import an address with public key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"pubkeys": [key.pubkey],
"internal": False},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
timestamp=timestamp)
# ScriptPubKey + Public key + internal
self.log.info("Should import a scriptPubKey with internal and with public key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"pubkeys": [key.pubkey],
"internal": True},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
timestamp=timestamp)
# Nonstandard scriptPubKey + Public key + !internal
self.log.info("Should not import a nonstandard scriptPubKey without internal and with public key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": nonstandardScriptPubKey,
"timestamp": "now",
"pubkeys": [key.pubkey]},
success=False,
error_code=-8,
error_message='Internal must be set to true for nonstandard scriptPubKey imports.')
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=False,
timestamp=None)
# Address + Private key + !watchonly
self.log.info("Should import an address with private key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"keys": [key.privkey]},
success=True)
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=True,
timestamp=timestamp)
self.log.info("Should not import an address with private key if is already imported")
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"keys": [key.privkey]},
success=False,
error_code=-4,
error_message='The wallet already contains the private key for this address or script ("' + key.p2pkh_script + '")')
# Address + Private key + watchonly
self.log.info("Should import an address with private key and with watchonly")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"keys": [key.privkey],
"watchonly": True},
success=True,
warnings=["All private keys are provided, outputs will be considered spendable. If this is intentional, do not specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=True,
timestamp=timestamp)
# ScriptPubKey + Private key + internal
self.log.info("Should import a scriptPubKey with internal and with private key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"keys": [key.privkey],
"internal": True},
success=True)
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=True,
timestamp=timestamp)
# Nonstandard scriptPubKey + Private key + !internal
self.log.info("Should not import a nonstandard scriptPubKey without internal and with private key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": nonstandardScriptPubKey,
"timestamp": "now",
"keys": [key.privkey]},
success=False,
error_code=-8,
error_message='Internal must be set to true for nonstandard scriptPubKey imports.')
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=False,
timestamp=None)
# P2SH address
multisig = get_multisig(self.nodes[0])
self.nodes[1].generate(100)
self.nodes[1].sendtoaddress(multisig.p2sh_addr, 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.log.info("Should import a p2sh")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2sh_addr},
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
multisig.p2sh_addr,
isscript=True,
iswatchonly=True,
timestamp=timestamp)
p2shunspent = self.nodes[1].listunspent(0, 999999, [multisig.p2sh_addr])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], False)
# P2SH + Redeem script
multisig = get_multisig(self.nodes[0])
self.nodes[1].generate(100)
self.nodes[1].sendtoaddress(multisig.p2sh_addr, 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.log.info("Should import a p2sh with respective redeem script")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2sh_addr},
"timestamp": "now",
"redeemscript": multisig.redeem_script},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
multisig.p2sh_addr, timestamp=timestamp, iswatchonly=True, ismine=False, solvable=True)
p2shunspent = self.nodes[1].listunspent(0, 999999, [multisig.p2sh_addr])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], True)
# P2SH + Redeem script + Private Keys + !Watchonly
multisig = get_multisig(self.nodes[0])
self.nodes[1].generate(100)
self.nodes[1].sendtoaddress(multisig.p2sh_addr, 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.log.info("Should import a p2sh with respective redeem script and private keys")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2sh_addr},
"timestamp": "now",
"redeemscript": multisig.redeem_script,
"keys": multisig.privkeys[0:2]},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
multisig.p2sh_addr,
timestamp=timestamp,
ismine=False,
iswatchonly=True,
solvable=True)
p2shunspent = self.nodes[1].listunspent(0, 999999, [multisig.p2sh_addr])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], True)
# P2SH + Redeem script + Private Keys + Watchonly
multisig = get_multisig(self.nodes[0])
self.nodes[1].generate(100)
self.nodes[1].sendtoaddress(multisig.p2sh_addr, 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.log.info("Should import a p2sh with respective redeem script and private keys")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2sh_addr},
"timestamp": "now",
"redeemscript": multisig.redeem_script,
"keys": multisig.privkeys[0:2],
"watchonly": True},
success=True)
test_address(self.nodes[1],
multisig.p2sh_addr,
iswatchonly=True,
ismine=False,
solvable=True,
timestamp=timestamp)
# Address + Public key + !Internal + Wrong pubkey
self.log.info("Should not import an address with the wrong public key as non-solvable")
key = get_key(self.nodes[0])
wrong_key = get_key(self.nodes[0]).pubkey
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"pubkeys": [wrong_key]},
success=True,
warnings=["Importing as non-solvable: some required keys are missing. If this is intentional, don't provide any keys, pubkeys, witnessscript, or redeemscript.", "Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
solvable=False,
timestamp=timestamp)
# ScriptPubKey + Public key + internal + Wrong pubkey
self.log.info("Should import a scriptPubKey with internal and with a wrong public key as non-solvable")
key = get_key(self.nodes[0])
wrong_key = get_key(self.nodes[0]).pubkey
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"pubkeys": [wrong_key],
"internal": True},
success=True,
warnings=["Importing as non-solvable: some required keys are missing. If this is intentional, don't provide any keys, pubkeys, witnessscript, or redeemscript.", "Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
solvable=False,
timestamp=timestamp)
# Address + Private key + !watchonly + Wrong private key
self.log.info("Should import an address with a wrong private key as non-solvable")
key = get_key(self.nodes[0])
wrong_privkey = get_key(self.nodes[0]).privkey
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"keys": [wrong_privkey]},
success=True,
warnings=["Importing as non-solvable: some required keys are missing. If this is intentional, don't provide any keys, pubkeys, witnessscript, or redeemscript.", "Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
solvable=False,
timestamp=timestamp)
# ScriptPubKey + Private key + internal + Wrong private key
self.log.info("Should import a scriptPubKey with internal and with a wrong private key as non-solvable")
key = get_key(self.nodes[0])
wrong_privkey = get_key(self.nodes[0]).privkey
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"keys": [wrong_privkey],
"internal": True},
success=True,
warnings=["Importing as non-solvable: some required keys are missing. If this is intentional, don't provide any keys, pubkeys, witnessscript, or redeemscript.", "Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
solvable=False,
timestamp=timestamp)
# Importing existing watch only address with new timestamp should replace saved timestamp.
assert_greater_than(timestamp, watchonly_timestamp)
self.log.info("Should replace previously saved watch only timestamp.")
self.test_importmulti({"scriptPubKey": {"address": watchonly_address},
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
watchonly_address,
iswatchonly=True,
ismine=False,
timestamp=timestamp)
watchonly_timestamp = timestamp
# restart nodes to check for proper serialization/deserialization of watch only address
self.stop_nodes()
self.start_nodes()
test_address(self.nodes[1],
watchonly_address,
iswatchonly=True,
ismine=False,
timestamp=watchonly_timestamp)
# Bad or missing timestamps
self.log.info("Should throw on invalid or missing timestamp values")
assert_raises_rpc_error(-3, 'Missing required timestamp field for key',
self.nodes[1].importmulti, [{"scriptPubKey": key.p2pkh_script}])
assert_raises_rpc_error(-3, 'Expected number or "now" timestamp value for key. got type string',
self.nodes[1].importmulti, [{
"scriptPubKey": key.p2pkh_script,
"timestamp": ""
}])
# Import P2WPKH address as watch only
self.log.info("Should import a P2WPKH address as watch only")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2wpkh_addr},
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
key.p2wpkh_addr,
iswatchonly=True,
solvable=False)
# Import P2WPKH address with public key but no private key
self.log.info("Should import a P2WPKH address and public key as solvable but not spendable")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2wpkh_addr},
"timestamp": "now",
"pubkeys": [key.pubkey]},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2wpkh_addr,
ismine=False,
solvable=True)
# Import P2WPKH address with key and check it is spendable
self.log.info("Should import a P2WPKH address with key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2wpkh_addr},
"timestamp": "now",
"keys": [key.privkey]},
success=True)
test_address(self.nodes[1],
key.p2wpkh_addr,
iswatchonly=False,
ismine=True)
# P2WSH multisig address without scripts or keys
multisig = get_multisig(self.nodes[0])
self.log.info("Should import a p2wsh multisig as watch only without respective redeem script and private keys")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2wsh_addr},
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
multisig.p2sh_addr,
solvable=False)
# Same P2WSH multisig address as above, but now with witnessscript + private keys
self.log.info("Should import a p2wsh with respective witness script and private keys")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2wsh_addr},
"timestamp": "now",
"witnessscript": multisig.redeem_script,
"keys": multisig.privkeys},
success=True)
test_address(self.nodes[1],
multisig.p2sh_addr,
solvable=True,
ismine=True,
sigsrequired=2)
# P2SH-P2WPKH address with no redeemscript or public or private key
key = get_key(self.nodes[0])
self.log.info("Should import a p2sh-p2wpkh without redeem script or keys")
self.test_importmulti({"scriptPubKey": {"address": key.p2sh_p2wpkh_addr},
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
key.p2sh_p2wpkh_addr,
solvable=False,
ismine=False)
# P2SH-P2WPKH address + redeemscript + public key with no private key
self.log.info("Should import a p2sh-p2wpkh with respective redeem script and pubkey as solvable")
self.test_importmulti({"scriptPubKey": {"address": key.p2sh_p2wpkh_addr},
"timestamp": "now",
"redeemscript": key.p2sh_p2wpkh_redeem_script,
"pubkeys": [key.pubkey]},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2sh_p2wpkh_addr,
solvable=True,
ismine=False)
# P2SH-P2WPKH address + redeemscript + private key
key = get_key(self.nodes[0])
self.log.info("Should import a p2sh-p2wpkh with respective redeem script and private keys")
self.test_importmulti({"scriptPubKey": {"address": key.p2sh_p2wpkh_addr},
"timestamp": "now",
"redeemscript": key.p2sh_p2wpkh_redeem_script,
"keys": [key.privkey]},
success=True)
test_address(self.nodes[1],
key.p2sh_p2wpkh_addr,
solvable=True,
ismine=True)
# P2SH-P2WSH multisig + redeemscript with no private key
multisig = get_multisig(self.nodes[0])
self.log.info("Should import a p2sh-p2wsh with respective redeem script but no private key")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2sh_p2wsh_addr},
"timestamp": "now",
"redeemscript": multisig.p2wsh_script,
"witnessscript": multisig.redeem_script},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
multisig.p2sh_p2wsh_addr,
solvable=True,
ismine=False)
# Test importing of a P2SH-P2WPKH address via descriptor + private key
key = get_key(self.nodes[0])
self.log.info("Should not import a p2sh-p2wpkh address from descriptor without checksum and private key")
self.test_importmulti({"desc": "sh(wpkh(" + key.pubkey + "))",
"timestamp": "now",
"label": "Descriptor import test",
"keys": [key.privkey]},
success=False,
error_code=-5,
error_message="Missing checksum")
# Test importing of a P2SH-P2WPKH address via descriptor + private key
key = get_key(self.nodes[0])
self.log.info("Should import a p2sh-p2wpkh address from descriptor and private key")
self.test_importmulti({"desc": descsum_create("sh(wpkh(" + key.pubkey + "))"),
"timestamp": "now",
"label": "Descriptor import test",
"keys": [key.privkey]},
success=True)
test_address(self.nodes[1],
key.p2sh_p2wpkh_addr,
solvable=True,
ismine=True,
label="Descriptor import test")
# Test ranged descriptor fails if range is not specified
xpriv = "tprv8ZgxMBicQKsPeuVhWwi6wuMQGfPKi9Li5GtX35jVNknACgqe3CY4g5xgkfDDJcmtF7o1QnxWDRYw4H5P26PXq7sbcUkEqeR4fg3Kxp2tigg"
addresses = ["2N7yv4p8G8yEaPddJxY41kPihnWvs39qCMf", "2MsHxyb2JS3pAySeNUsJ7mNnurtpeenDzLA"] # hdkeypath=m/0'/0'/0' and 1'
addresses += ["fcrt1qrd3n235cj2czsfmsuvqqpr3lu6lg0ju7ksxv08", "fcrt1qfqeppuvj0ww98r6qghmdkj70tv8qpche333g0n"] # wpkh subscripts corresponding to the above addresses
desc = "sh(wpkh(" + xpriv + "/0'/0'/*'" + "))"
self.log.info("Ranged descriptor import should fail without a specified range")
self.test_importmulti({"desc": descsum_create(desc),
"timestamp": "now"},
success=False,
error_code=-8,
error_message='Descriptor is ranged, please specify the range')
# Test importing of a ranged descriptor with xpriv
self.log.info("Should import the ranged descriptor with specified range as solvable")
self.test_importmulti({"desc": descsum_create(desc),
"timestamp": "now",
"range": 1},
success=True)
for address in addresses:
test_address(self.nodes[1],
address,
solvable=True,
ismine=True)
self.test_importmulti({"desc": descsum_create(desc), "timestamp": "now", "range": -1},
success=False, error_code=-8, error_message='End of range is too high')
self.test_importmulti({"desc": descsum_create(desc), "timestamp": "now", "range": [-1, 10]},
success=False, error_code=-8, error_message='Range should be greater or equal than 0')
self.test_importmulti({"desc": descsum_create(desc), "timestamp": "now", "range": [(2 << 31 + 1) - 1000000, (2 << 31 + 1)]},
success=False, error_code=-8, error_message='End of range is too high')
self.test_importmulti({"desc": descsum_create(desc), "timestamp": "now", "range": [2, 1]},
success=False, error_code=-8, error_message='Range specified as [begin,end] must not have begin after end')
self.test_importmulti({"desc": descsum_create(desc), "timestamp": "now", "range": [0, 1000001]},
success=False, error_code=-8, error_message='Range is too large')
# Test importing a descriptor containing a WIF private key
wif_priv = "cTe1f5rdT8A8DFgVWTjyPwACsDPJM9ff4QngFxUixCSvvbg1x6sh"
address = "2MuhcG52uHPknxDgmGPsV18jSHFBnnRgjPg"
desc = "sh(wpkh(" + wif_priv + "))"
self.log.info("Should import a descriptor with a WIF private key as spendable")
self.test_importmulti({"desc": descsum_create(desc),
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
address,
solvable=True,
ismine=True)
# dump the private key to ensure it matches what was imported
privkey = self.nodes[1].dumpprivkey(address)
assert_equal(privkey, wif_priv)
# Test importing of a P2PKH address via descriptor
key = get_key(self.nodes[0])
self.log.info("Should import a p2pkh address from descriptor")
self.test_importmulti({"desc": descsum_create("pkh(" + key.pubkey + ")"),
"timestamp": "now",
"label": "Descriptor import test"},
True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
solvable=True,
ismine=False,
label="Descriptor import test")
# Test import fails if both desc and scriptPubKey are provided
key = get_key(self.nodes[0])
self.log.info("Import should fail if both scriptPubKey and desc are provided")
self.test_importmulti({"desc": descsum_create("pkh(" + key.pubkey + ")"),
"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now"},
success=False,
error_code=-8,
error_message='Both a descriptor and a scriptPubKey should not be provided.')
# Test import fails if neither desc nor scriptPubKey are present
key = get_key(self.nodes[0])
self.log.info("Import should fail if neither a descriptor nor a scriptPubKey are provided")
self.test_importmulti({"timestamp": "now"},
success=False,
error_code=-8,
error_message='Either a descriptor or scriptPubKey must be provided.')
# Test importing of a multisig via descriptor
key1 = get_key(self.nodes[0])
key2 = get_key(self.nodes[0])
self.log.info("Should import a 1-of-2 bare multisig from descriptor")
self.test_importmulti({"desc": descsum_create("multi(1," + key1.pubkey + "," + key2.pubkey + ")"),
"timestamp": "now"},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
self.log.info("Should not treat individual keys from the imported bare multisig as watchonly")
test_address(self.nodes[1],
key1.p2pkh_addr,
ismine=False,
iswatchonly=False)
# Import pubkeys with key origin info
self.log.info("Addresses should have hd keypath and master key id after import with key origin")
pub_addr = self.nodes[1].getnewaddress()
pub_addr = self.nodes[1].getnewaddress()
info = self.nodes[1].getaddressinfo(pub_addr)
pub = info['pubkey']
pub_keypath = info['hdkeypath']
pub_fpr = info['hdmasterfingerprint']
result = self.nodes[0].importmulti(
[{
'desc' : descsum_create("wpkh([" + pub_fpr + pub_keypath[1:] +"]" + pub + ")"),
"timestamp": "now",
}]
)
assert result[0]['success']
pub_import_info = self.nodes[0].getaddressinfo(pub_addr)
assert_equal(pub_import_info['hdmasterfingerprint'], pub_fpr)
assert_equal(pub_import_info['pubkey'], pub)
assert_equal(pub_import_info['hdkeypath'], pub_keypath)
# Import privkeys with key origin info
priv_addr = self.nodes[1].getnewaddress()
info = self.nodes[1].getaddressinfo(priv_addr)
priv = self.nodes[1].dumpprivkey(priv_addr)
priv_keypath = info['hdkeypath']
priv_fpr = info['hdmasterfingerprint']
result = self.nodes[0].importmulti(
[{
'desc' : descsum_create("wpkh([" + priv_fpr + priv_keypath[1:] + "]" + priv + ")"),
"timestamp": "now",
}]
)
assert result[0]['success']
priv_import_info = self.nodes[0].getaddressinfo(priv_addr)
assert_equal(priv_import_info['hdmasterfingerprint'], priv_fpr)
assert_equal(priv_import_info['hdkeypath'], priv_keypath)
# Make sure the key origin info are still there after a restart
self.stop_nodes()
self.start_nodes()
import_info = self.nodes[0].getaddressinfo(pub_addr)
assert_equal(import_info['hdmasterfingerprint'], pub_fpr)
assert_equal(import_info['hdkeypath'], pub_keypath)
import_info = self.nodes[0].getaddressinfo(priv_addr)
assert_equal(import_info['hdmasterfingerprint'], priv_fpr)
assert_equal(import_info['hdkeypath'], priv_keypath)
# Check legacy import does not import key origin info
self.log.info("Legacy imports don't have key origin info")
pub_addr = self.nodes[1].getnewaddress()
info = self.nodes[1].getaddressinfo(pub_addr)
pub = info['pubkey']
result = self.nodes[0].importmulti(
[{
'scriptPubKey': {'address': pub_addr},
'pubkeys': [pub],
"timestamp": "now",
}]
)
assert result[0]['success']
pub_import_info = self.nodes[0].getaddressinfo(pub_addr)
assert_equal(pub_import_info['pubkey'], pub)
assert 'hdmasterfingerprint' not in pub_import_info
assert 'hdkeypath' not in pub_import_info
# Import some public keys to the keypool of a no privkey wallet
self.log.info("Adding pubkey to keypool of disableprivkey wallet")
self.nodes[1].createwallet(wallet_name="noprivkeys", disable_private_keys=True)
wrpc = self.nodes[1].get_wallet_rpc("noprivkeys")
addr1 = self.nodes[0].getnewaddress()
addr2 = self.nodes[0].getnewaddress()
pub1 = self.nodes[0].getaddressinfo(addr1)['pubkey']
pub2 = self.nodes[0].getaddressinfo(addr2)['pubkey']
result = wrpc.importmulti(
[{
'desc': descsum_create('wpkh(' + pub1 + ')'),
'keypool': True,
"timestamp": "now",
},
{
'desc': descsum_create('wpkh(' + pub2 + ')'),
'keypool': True,
"timestamp": "now",
}]
)
assert result[0]['success']
assert result[1]['success']
assert_equal(wrpc.getwalletinfo()["keypoolsize"], 2)
newaddr1 = wrpc.getnewaddress()
assert_equal(addr1, newaddr1)
newaddr2 = wrpc.getnewaddress()
assert_equal(addr2, newaddr2)
# Import some public keys to the internal keypool of a no privkey wallet
self.log.info("Adding pubkey to internal keypool of disableprivkey wallet")
addr1 = self.nodes[0].getnewaddress()
addr2 = self.nodes[0].getnewaddress()
pub1 = self.nodes[0].getaddressinfo(addr1)['pubkey']
pub2 = self.nodes[0].getaddressinfo(addr2)['pubkey']
result = wrpc.importmulti(
[{
'desc': descsum_create('wpkh(' + pub1 + ')'),
'keypool': True,
'internal': True,
"timestamp": "now",
},
{
'desc': descsum_create('wpkh(' + pub2 + ')'),
'keypool': True,
'internal': True,
"timestamp": "now",
}]
)
assert result[0]['success']
assert result[1]['success']
assert_equal(wrpc.getwalletinfo()["keypoolsize_hd_internal"], 2)
newaddr1 = wrpc.getrawchangeaddress()
assert_equal(addr1, newaddr1)
newaddr2 = wrpc.getrawchangeaddress()
assert_equal(addr2, newaddr2)
# Import a multisig and make sure the keys don't go into the keypool
self.log.info('Imported scripts with pubkeys should not have their pubkeys go into the keypool')
addr1 = self.nodes[0].getnewaddress()
addr2 = self.nodes[0].getnewaddress()
pub1 = self.nodes[0].getaddressinfo(addr1)['pubkey']
pub2 = self.nodes[0].getaddressinfo(addr2)['pubkey']
result = wrpc.importmulti(
[{
'desc': descsum_create('wsh(multi(2,' + pub1 + ',' + pub2 + '))'),
'keypool': True,
"timestamp": "now",
}]
)
assert result[0]['success']
assert_equal(wrpc.getwalletinfo()["keypoolsize"], 0)
# Cannot import those pubkeys to keypool of wallet with privkeys
self.log.info("Pubkeys cannot be added to the keypool of a wallet with private keys")
wrpc = self.nodes[1].get_wallet_rpc("")
assert wrpc.getwalletinfo()['private_keys_enabled']
result = wrpc.importmulti(
[{
'desc': descsum_create('wpkh(' + pub1 + ')'),
'keypool': True,
"timestamp": "now",
}]
)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], "Keys can only be imported to the keypool when private keys are disabled")
# Make sure ranged imports import keys in order
self.log.info('Key ranges should be imported in order')
wrpc = self.nodes[1].get_wallet_rpc("noprivkeys")
assert_equal(wrpc.getwalletinfo()["keypoolsize"], 0)
assert_equal(wrpc.getwalletinfo()["private_keys_enabled"], False)
xpub = "tpubDAXcJ7s7ZwicqjprRaEWdPoHKrCS215qxGYxpusRLLmJuT69ZSicuGdSfyvyKpvUNYBW1s2U3NSrT6vrCYB9e6nZUEvrqnwXPF8ArTCRXMY"
addresses = [
'fcrt1qtmp74ayg7p24uslctssvjm06q5phz4yrq5pr5c', # m/0'/0'/0
'fcrt1q8vprchan07gzagd5e6v9wd7azyucksq2qs833n', # m/0'/0'/1
'fcrt1qtuqdtha7zmqgcrr26n2rqxztv5y8rafj8dxhcg', # m/0'/0'/2
'fcrt1qau64272ymawq26t90md6an0ps99qkrsej0z3jm', # m/0'/0'/3
'fcrt1qsg97266hrh6cpmutqen8s4s962aryy775fkz0m', # m/0'/0'/4
]
result = wrpc.importmulti(
[{
'desc': descsum_create('wpkh([80002067/0h/0h]' + xpub + '/*)'),
'keypool': True,
'timestamp': 'now',
'range' : [0, 4],
}]
)
for i in range(0, 5):
addr = wrpc.getnewaddress('', 'bech32')
assert_equal(addr, addresses[i])
if __name__ == '__main__':
ImportMultiTest().main()
|
|
# python 2.7
from __future__ import absolute_import, unicode_literals
from builtins import str
# builtins
from collections import OrderedDict
from copy import deepcopy
import logging
# plugins
from pypandoc import convert
# custom
import blowdrycss_settings as settings
__author__ = 'chad nelson'
__project__ = 'blowdrycss'
class DataLibrary(object):
"""
DataLibrary is not intended for use outside of this file as each time its' called it rebuilds the dictionaries.
**Attributes:**
| **property_regex_dict** (*dict*)
A regex dictionary for detecting more complex value patterns for a given property.
**Dictionary Contains:**
- The ``key`` is the official CSS property name.
- The ``value`` is a ``set()`` of regex strings.
**Regexes Cases:**
- Hexidecimal (3 digit) -- 'h123', 'h123 bold', 'underline h123 bold'
- Hexidecimal (6 digit) -- 'h123456', 'h123456 underline', 'underline h123456 bold'
- Hexidecimal (3 digit + pseudo-class + importance designator) -- 'h123-hover-i', 'h123-after-i bold',
- Hexidecimal (6 digit + pseudo-class + importance designator) -- 'h12ad56-hover-i', 'h12AD56-hover-i underline'
- Hexidecimal (3 digit + importance designator + pseudo-class) -- 'h1f3-i-hover', 'h1F3-i-hover bold'
- Hexidecimal (6 digit + importance designator + pseudo-class) -- 'h123456-i-hover', 'h123456-i-hover underline'
- Hexidecimal Regex explained
- ``r"(h[0-9a-fA-F]{3} ?)$"`` or ``r"(h[0-9a-fA-F]{6} ?)$"``
- ``h`` -- The substring must begin with an ``h``.
- ``[0-9a-fA-F]`` -- The characters that follow must be a hexidecimal characters.
- ``{3}`` or ``{6}`` -- Limit the number of hexidecimal characters to either 3 or 6 only.
- ``' ?'`` -- The substring may optionally be followed by a space.
| **property_value_as_alias_dict** (*dict*)
Maps valid, unique W3C CSS property values to a W3C CSS property name. This enables the use of unique
property values as standalone class selectors i.e. ``bold`` can be used in place of ``font-weight-bold``.
This makes the syntax succinct while remaining declarative.
**Rules:**
- The property value alias must be unique across all valid W3C defined property values.
- The property value alias must be unique across all property values defined in the dictionary.
- The ``key`` is the official CSS property name.
- The ``value`` is a ``set()`` of valid, unique W3C CSS property values.
**Example:** ::
``'font-weight': {'bold', 'bolder', 'lighter', },``
`Allowed values <https://www.w3.org/TR/CSS21/fonts.html#propdef-font-weight>`__ for font-weight according to W3C.
Based on the standard ``normal`` is a valid property value.
However, ``normal`` is not unique to ``font-weight``. To verify that
`go here <https://www.w3.org/TR/CSS21/propidx.html>`__ and search for ``normal``.
Also, ``100 - 900`` are listed as valid values, but CSS class selectors cannot begin with a digit.
This implies that the numeric values cannot be included. That leaves ``bold, bolder, and lighter``.
| **property_names** (*set*)
The set of all CSS 2.1 property names listed here: http://www.w3.org/TR/CSS21/propidx.html on the W3C website.
| **clashing_alias_dict** (*dict*)
Auto-generated dictionary of clashing aliases. An alias clashes if it exactly equals an
alias associated with another property e.g. One alias for ``border-right`` is ``'br-'``.
However ``background-repeat`` has an identical alias of ``'br-'``. Therefore ``'br-'`` is added to
``clashing_alias_dict`` and is not allowed to be used as an alias.
**Dictionary Contains:**
- The ``key`` is the official CSS property name.
- The ``value`` is a ``set()`` of custom string aliases.
| **property_alias_dict** (*dict*)
Auto-generated dictionary of property aliases.
*Dictionary Contains:*
- The ``key`` is the official CSS property name.
- The ``value`` is a ``set()`` of custom string aliases.
| **alphabetical_clashing_dict** (*dict*)
Alphabetized ordered dictionary of clashing aliases.
**Ordered Dictionary Contains:**
- The ``key`` is the official CSS property name.
- The ``value`` is a ``set()`` of clashing string aliases.
| **alphabetical_property_dict** (*dict*)
Alphabetized ordered dictionary of property aliases.
*Ordered Dictionary Contains:*
- The ``key`` is the official CSS property name.
- The ``value`` is a ``set()`` of custom string aliases.
| **clashing_alias_markdown** (*str*) -- Auto-generated table of clashing aliases in markdown format.
| **property_alias_markdown** (*str*) -- Auto-generated table of property names and aliases in markdown format.
| **clashing_alias_html** (*str*) -- Auto-generated table of clashing aliases in HTML format.
| **property_alias_html** (*str*) -- Auto-generated table of property names and aliases in HTML format.
| **clashing_alias_rst** (*str*) -- Auto-generated table of clashing aliases in reStructuredText format.
| **property_alias_rst** (*str*) -- Auto-generated table of property names and aliases in reStructuredText form.
| **ordered_property_dict** (*dict*)
Sorted property_alias_dict with the longest items first as the
most verbose match is preferred i.e. If ``css_class == 'margin-top'``, then match the
``property_alias_dict`` key that starts with ``margin-top`` not ``margin``.
*Ordered Dictionary Contains:*
- The ``key`` is the official CSS property name.
- The ``value`` is a ``set()`` of custom string aliases.
"""
def __init__(self):
self.property_regex_dict = {
'color': {
r"(h[0-9a-fA-F]{3} ?)$", r"(h[0-9a-fA-F]{6} ?)$",
r"(h[0-9a-fA-F]{3}-i ?)$", r"(h[0-9a-fA-F]{6}-i ?)$",
r"(h[0-9a-fA-F]{3}-.*? ?)$", r"(h[0-9a-fA-F]{6}-.* ?)$",
r"(h[0-9a-fA-F]{3}-.*?-i ?)$", r"(h[0-9a-fA-F]{6}-.*-i ?)$",
r"(h[0-9a-fA-F]{3}-i-.*? ?)$", r"(h[0-9a-fA-F]{6}-i-.*? ?)$",
},
}
self.custom_property_alias_dict = settings.custom_property_alias_dict
self.property_value_as_alias_dict = {
'background-repeat': {'repeat', 'repeat-x', 'repeat-y', 'no-repeat', },
'color': {
# Special format cases
'rgb', 'rgba', 'hsl', 'hsla',
# SVG 1.1 Color Keyword Reference: http://www.w3.org/TR/SVG/types.html#ColorKeywords
'aliceblue', 'antiquewhite', 'aqua', 'aquamarine', 'azure',
'beige', 'bisque', 'black', 'blanchedalmond', 'blue', 'blueviolet', 'brown', 'burlywood',
'cadetblue', 'chartreuse', 'chocolate', 'coral', 'cornflowerblue', 'cornsilk', 'crimson', 'cyan',
'darkblue', 'darkcyan', 'darkgoldenrod', 'darkgray', 'darkgreen', 'darkgrey', 'darkkhaki',
'darkmagenta', 'darkolivegreen', 'darkorange', 'darkorchid', 'darkred', 'darksalmon',
'darkseagreen', 'darkslateblue', 'darkslategray', 'darkslategrey', 'darkturquoise',
'darkviolet', 'deeppink', 'deepskyblue', 'dimgray', 'dimgrey', 'dodgerblue',
'firebrick', 'floralwhite', 'forestgreen', 'fuchsia',
'gainsboro', 'ghostwhite', 'gold', 'goldenrod', 'gray', 'grey', 'green', 'greenyellow',
'honeydew', 'hotpink',
'indianred', 'indigo', 'ivory',
'khaki',
'lavender', 'lavenderblush', 'lawngreen', 'lemonchiffon', 'lightblue', 'lightcoral',
'lightcyan', 'lightgoldenrodyellow', 'lightgray', 'lightgreen', 'lightgrey', 'lightpink',
'lightsalmon', 'lightseagreen', 'lightskyblue', 'lightslategray', 'lightslategrey',
'lightsteelblue', 'lightyellow', 'lime', 'limegreen', 'linen',
'magenta', 'maroon', 'mediumaquamarine', 'mediumblue', 'mediumorchid', 'mediumpurple',
'mediumseagreen', 'mediumslateblue', 'mediumspringgreen', 'mediumturquoise',
'mediumvioletred', 'midnightblue', 'mintcream', 'mistyrose', 'moccasin',
'navajowhite', 'navy',
'oldlace', 'olive', 'olivedrab', 'orange', 'orangered', 'orchid',
'palegoldenrod', 'palegreen', 'paleturquoise', 'palevioletred', 'papayawhip', 'peachpuff',
'peru', 'pink', 'plum', 'powderblue', 'purple',
'red', 'rosybrown', 'royalblue',
'saddlebrown', 'salmon', 'sandybrown', 'seagreen', 'seashell', 'sienna', 'silver', 'skyblue',
'slateblue', 'slategray', 'slategrey', 'snow', 'springgreen', 'steelblue',
'tan', 'teal', 'thistle', 'tomato', 'turquoise',
'violet',
'wheat', 'white', 'whitesmoke',
'yellow', 'yellowgreen',
},
'content': {'open-quote', 'close-quote', 'no-open-quote', 'no-close-quote', },
'cursor': {'crosshair', 'default', 'pointer', 'move', 'e-resize', 'ne-resize', 'nw-resize', 'n-resize',
'se-resize', 'sw-resize', 's-resize', 'w-resize', 'text', 'wait', 'help', 'progress', },
'direction': {'ltr', 'rtl'},
'display': {'inline', 'block', 'flex', 'list-item', 'inline-block', 'inline-flex', 'run-in',
'table', 'inline-table', 'table-row-group',
'table-header-group', 'table-footer-group', 'table-row', 'table-column-group', 'table-column',
'table-cell', 'table-caption',
'xxsmall', 'xsmall', 'small', 'medium', 'large', 'xlarge', 'xxlarge',
'giant', 'xgiant', 'xxgiant', },
'elevation': {'below', 'level', 'above', 'higher', 'lower', },
'font-family': {'serif', 'georgia', 'palatino', 'times', 'cambria', 'didot', 'garamond', 'perpetua',
'rockwell', 'baskerville',
'sans-serif', 'arial', 'helvetica', 'gadget', 'cursive', 'impact', 'charcoal', 'tahoma',
'geneva', 'verdana', 'calibri', 'candara', 'futura', 'optima',
'monospace', 'courier', 'monaco', 'consolas',
'fantasy', 'copperplate', 'papyrus', },
'font-style': {'italic', 'oblique', },
'font-variant': {'small-caps', },
'font-weight': {'bold', 'bolder', 'lighter', },
'list-style-position': {'inside', 'outside', },
'list-style-type': {'disc', 'circle', 'square', 'decimal', 'decimal-leading-zero', 'lower-roman',
'upper-roman', 'lower-greek', 'lower-latin', 'upper-latin', 'armenian',
'georgian', 'lower-alpha', 'upper-alpha', },
'overflow': {'visible', 'hidden', 'scroll', },
'pitch': {'x-low', 'low', 'high', 'x-high'},
'play-during': {'mix', 'repeat', },
'position': {'static', 'relative', 'absolute', },
'speak-header': {'once', 'always'},
'speak-numeral': {'digits', 'continuous', },
'speak-punctuation': {'code', },
'speak': {'spell-out', },
'speech-rate': {'x-slow', 'slow', 'fast', 'x-fast', 'faster', 'slower', },
'text-decoration': {'underline', 'overline', 'line-through', 'blink', },
'text-transform': {'capitalize', 'uppercase', 'lowercase', },
'unicode-bidi': {'embed', 'bidi-override', },
'vertical-align': {'baseline', 'sub', 'super', 'middle', 'text-top', 'text-bottom', },
'visibility': {'visible', 'hidden', 'collapse', },
'volume': {'silent', 'x-soft', 'soft', 'loud', 'x-loud', },
}
self.property_names = {
'all', 'align-items',
'background', 'background-attachment', 'background-color', 'background-image',
'background-position', 'background-repeat', 'border', 'border-bottom', 'border-bottom-color',
'border-bottom-style', 'border-bottom-width', 'border-collapse', 'border-color', 'border-left',
'border-left-color', 'border-left-style', 'border-left-width', 'border-right', 'border-right-color',
'border-right-style', 'border-right-width', 'border-spacing', 'border-style', 'border-top',
'border-radius', 'border-top-left-radius', 'border-top-right-radius', 'border-bottom-right-radius',
'border-bottom-left-radius',
'border-top-color', 'border-top-style', 'border-top-width', 'border-width', 'bottom',
'caption-side', 'clear', 'clip', 'color', 'content', 'counter-increment', 'counter-reset', 'cue',
'cue-after', 'cue-before', 'cursor', 'direction', 'display', 'elevation', 'empty-cells', 'float',
'font', 'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight', 'height', 'left',
'letter-spacing', 'line-height', 'list-style', 'list-style-image', 'list-style-position',
'list-style-type', 'margin', 'margin-bottom', 'margin-left', 'margin-right', 'margin-top', 'max-height',
'max-width', 'min-height', 'min-width', 'opacity', 'orphans', 'outline', 'outline-color', 'outline-style',
'outline-width', 'overflow', 'padding', 'padding-bottom', 'padding-left', 'padding-right',
'padding-top', 'page-break-after', 'page-break-before', 'page-break-inside', 'pause', 'pause-after',
'pause-before', 'pitch', 'pitch-range', 'play-during', 'position', 'quotes', 'richness', 'right', 'speak',
'speak-header', 'speak-numeral', 'speak-punctuation', 'speech-rate', 'stress', 'table-layout',
'text-align', 'text-decoration', 'text-indent', 'text-shadow', 'text-transform', 'top', 'unicode-bidi',
'vertical-align',
'visibility', 'voice-family', 'volume', 'white-space', 'widows', 'width', 'word-spacing', 'z-index'
}
self.clashing_alias_dict = {}
self.property_alias_dict = {}
# Set clashing_alias_dict and property_alias_dict.
self.autogen_property_alias_dict() # Initialize property_alias_dict
self.merge_dictionaries() # Merge
self.set_clashing_aliases() # Set clashing_aliases_dict
self.remove_clashing_aliases() # Clean property_alias_dict by removing clashing aliases.
# Alphabetical Property Dictionaries
self.alphabetical_clashing_dict = OrderedDict(sorted(self.clashing_alias_dict.items(), key=lambda t: t[0]))
self.alphabetical_property_dict = OrderedDict(sorted(self.property_alias_dict.items(), key=lambda t: t[0]))
# Generate Markdown Files
self.clashing_alias_markdown = self.dict_to_markdown(
h1_text=str('Clashing Aliases'),
key_title=str('Property Name'),
value_title=str('Invalid Clashing Aliases'),
_dict=self.alphabetical_clashing_dict
)
self.property_alias_markdown = self.dict_to_markdown(
h1_text=str('Valid Property Aliases'),
key_title=str('Property Name'),
value_title=str('Valid Aliases'),
_dict=self.alphabetical_property_dict
)
# Generate HTML Files
self.clashing_alias_html = self.dict_to_html(
h1_text=str('Invalid Clashing Aliases'),
key_title=str('Property Name'),
value_title=str('Clashing Aliases'),
_dict=self.alphabetical_clashing_dict
)
self.property_alias_html = self.dict_to_html(
h1_text=str('Valid Property Aliases'),
key_title=str('Property Name'),
value_title=str('Valid Aliases'),
_dict=self.alphabetical_property_dict
)
# Generate reStructuredText
clashing_html = self.clashing_alias_html.replace(' ', ' ') # Remove 'tab'
property_html = self.property_alias_html.replace(' ', ' ') # Remove 'tab'
self.clashing_alias_rst = convert(source=clashing_html, to=str('rst'), format=str('html'))
self.property_alias_rst = convert(source=property_html, to=str('rst'), format=str('html'))
# Debug
logging.debug('\nproperty_alias_dict:\n' + str(self.property_alias_dict))
logging.debug('\nclashing_alias_markdown:\n' + str(self.clashing_alias_markdown))
logging.debug('\nclashing_alias_html:\n' + str(self.clashing_alias_html))
logging.debug('\nproperty_alias_markdown:\n' + str(self.property_alias_markdown))
self.ordered_property_dict = OrderedDict(
sorted(self.property_alias_dict.items(), key=lambda t: len(t[0]), reverse=True)
)
@staticmethod
def get_property_aliases(property_name=''):
"""
Auto-generates and returns a set of aliases based on abbreviation patterns.
**Rules:**
- Property name does not contain a dash:
{First three letters of property_name + ``'-'``}
- Property name contains one dashes:
| 1st word + 1st letter after dash + ``'-'``
| 1st letter of 1st word + 1st letter of 2nd word + ``'-'``, (single dash case)
| 1st letter of 1st word + 1st letter of 2nd word + 1st letter of 3rd word + ``'-'``, (double dash case)
- Append dash '-' at the end of each abbreviation.
- Do not abbreviate words less than or equal to 5 characters in length.
**Examples:**
::
property_name --> {...}
color --> set()
padding --> {'pad-'}
margin-top --> {'margin-t-', 'mt-'}
border-bottom-width --> {'border-b-width', 'bbw-'}
:type property_name: str
:param property_name: A CSS property name.
:return: Return a set() of abbreviation patterns according to the rules defined above.
"""
if len(property_name) <= 5: # Do not abbreviate short words (<= 5 letters).
return set()
aliases = set() # First three letters
if '-' in property_name: # First dash
dash_index1 = property_name.index('-')
suffix = property_name[dash_index1 + 1:]
if '-' in suffix: # Second dash (rare)
dash_index2 = suffix.index('-')
aliases.add( # Three letter abbreviation
property_name[0] + property_name[dash_index1 + 1] + suffix[dash_index2 + 1] + '-'
)
aliases.add(property_name[:dash_index1 + 2] + '-' + suffix[dash_index2 + 1:] + '-')
else:
aliases.add(property_name[0] + property_name[dash_index1 + 1] + '-')
aliases.add(property_name[:dash_index1 + 2] + '-')
else:
aliases.add(property_name[:3] + '-')
return aliases
# TODO: Ask cssutils guys about combining class names for matching properties.
def autogen_property_alias_dict(self):
""" Uses ``self.property_names`` to auto--generate a property aliases. Assigns the result to
``self.property_alias_dict``.
**Note:** The dictionary may contain clashing aliases. More processing is required to remove them.
"""
self.property_alias_dict = {}
for property_name in self.property_names:
abbreviations = self.get_property_aliases(property_name=property_name)
value = abbreviations
self.property_alias_dict[property_name] = value
def merge_dictionaries(self):
""" Merges the ``property_alias_dict`` with ``property_value_as_alias_dict``.
**Note:** All keys in both dictionaries much match.
:raises KeyError: Raises KeyError if property name does not exist as a key in ``property_alias_dict``.
"""
# Merge property values
if self.property_value_as_alias_dict is not None:
for property_name, alias_set in self.property_value_as_alias_dict.items():
try:
self.property_alias_dict[property_name] = self.property_alias_dict[property_name].union(alias_set)
except KeyError:
print('KeyError: property_name ->', property_name, '<- not found in property_alias_dict.')
raise KeyError
# Merge user defined custom property aliases.
if self.custom_property_alias_dict is not None:
for property_name, alias_set in self.custom_property_alias_dict.items():
try:
self.property_alias_dict[property_name] = self.property_alias_dict[property_name].union(alias_set)
except KeyError:
print('KeyError: property_name ->', property_name, '<- not found in custom_property_alias_dict.')
raise KeyError
def set_clashing_aliases(self):
""" Searches ``property_alias_dict`` for duplicate / clashing aliases and adds them to ``clashing_alias_dict``.
"""
clone_dict = self.property_alias_dict
self.clashing_alias_dict = {}
for key1, alias_set1 in self.property_alias_dict.items():
for key2, alias_set2 in clone_dict.items():
intersection = alias_set1.intersection(alias_set2)
if len(intersection) > 0 and key1 != key2: # prevent direct comparison of the same key.
try:
self.clashing_alias_dict[key1] = self.clashing_alias_dict[key1].union(intersection)
except KeyError:
self.clashing_alias_dict[key1] = intersection
logging.debug(msg='\ndatalibrary.clashing_aliases_dict:\n' + str(self.clashing_alias_dict))
def remove_clashing_aliases(self):
""" Removes clashing aliases stored in ``clashing_alias_dict`` from ``property_alias_dict`` and
deep copies the clean dictionary to ``property_alias_dict``.
"""
clean_dict = deepcopy(self.property_alias_dict)
for property_name in self.property_alias_dict:
try:
clashing_aliases = self.clashing_alias_dict[property_name]
for clashing_alias in clashing_aliases:
if clashing_alias in self.property_alias_dict[property_name]: # If clashing_alias found.
clean_dict[property_name].remove(clashing_alias) # Remove it.
except KeyError:
pass
logging.debug(msg='Clashing aliases removed: datalibrary clean_dict\n' + str(clean_dict))
self.property_alias_dict = deepcopy(clean_dict)
@staticmethod
def dict_to_markdown(h1_text='', key_title='', value_title='', _dict=None):
""" Convert a dictionary into a markdown formatted 2-column table.
*Markdown Table Format:*
| ``# h1_text``
| ``key_title | value_title``
| ``--- | ---``
| ``key[0] | value``
| ``key[1] | value``
:type h1_text: str
:type key_title: str
:type value_title: str
:type _dict: dict
:param h1_text: Title for the table.
:param key_title: Key name.
:param value_title: Value stored at Key.
:param _dict: A generic dictionary.
:return: (str) -- Returns a markdown formatted 2-column table based on the key/value pairs in ``_dict``.
"""
# H1 plus table header.
_markdown = '# ' + h1_text + '\n\n' \
'| ' + key_title + ' | ' + value_title + ' |\n| --- | --- |\n'
for key, value in _dict.items():
value_str = ''
if isinstance(value, set):
for v in value:
value_str += "`" + v + "` "
_markdown += '| ' + key + ' | ' + str(value_str) + ' |\n' # Key | Value row(s).
return _markdown
@staticmethod
def dict_to_html(h1_text= '', key_title='', value_title='', _dict=None):
""" Convert a dictionary into an HTML formatted 2-column table.
*HTML Table Format:*
::
<html>
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<link rel="icon" type="image/x-icon" href="/images/favicon.ico" />
<title>value_title - blowdrycss</title>
<link rel="stylesheet" type="text/css" href="/css/blowdry.min.css"/>
</head>
<body>
<table>
<thead>
<tr>
<th>key_title</th>
<th>value_title</th>
</tr>
</thead>
<tbody>
<tr>
<td>key[0]</td>
<td>value</td>
</tr>
</tbody>
</table>
</body>
</html>
:type h1_text: str
:type key_title: str
:type value_title: str
:type _dict: dict
:param h1_text: Title for the table.
:param key_title: Key name.
:param value_title: Value stored at Key.
:param _dict: A generic dictionary.
:return: (str) -- Returns a HTML formatted 2-column table based on the key/value pairs in ``_dict``.
"""
common_classes = ' padding-5 border-1px-solid-gray display-inline '
alternating_bg = ' bgc-hf8f8f8 '
_html = str(
'<html>\n' +
'\t<head>\n' +
'\t\t<meta charset="UTF-8">\n' +
'\t\t<meta name="viewport" content="width=device-width, initial-scale=1.0">\n' +
'\t\t<link rel="icon" type="image/x-icon" href="/images/favicon.ico" />\n' +
'\t\t<title>' + value_title + ' - blowdrycss</title>\n' +
'\t\t<link rel="stylesheet" type="text/css" href="/css/blowdry.min.css" />\n' +
'\t</head>\n\n' +
'\t<body>\n' +
'\t\t<h1>' + h1_text + '</h1>\n' +
'\t\t<table>\n' +
'\t\t\t<tbody>\n'
'\t\t\t\t<tr>\n' +
'\t\t\t\t\t<td class="' + common_classes + 'talign-center bold">' + key_title + '</td>\n' +
'\t\t\t\t\t<td class="' + common_classes + 'talign-center bold">' + value_title + '</td>\n' +
'\t\t\t\t</tr>\n'
)
count = 1
for key, value in _dict.items():
classes = (common_classes + alternating_bg) if count % 2 == 0 else common_classes # Alternate Style
value_str = ''
_html += '\t\t\t\t<tr>\n' # Open Key | Value row.
if isinstance(value, set):
vcount = 1
for v in value:
value_str += "<code>" + v + "</code> "
value_str += '<br>' if vcount % 5 == 0 else ''
vcount += 1
_html += str(
'\t\t\t\t\t<td class="' + classes + '">' + key + '</td>\n' +
'\t\t\t\t\t<td class="' + classes + '">' + str(value_str) + '</td>\n'
'\t\t\t\t</tr>\n' # Close Key | Value row.
)
count += 1
_html += str(
'\t\t\t</tbody>\n' +
'\t\t</table>\n' +
'\t</body>\n' +
'</html>\n'
)
return _html
# DataLibrary() is not intended for use outside of this file as each time its' called it rebuilds some dictionaries.
__data_library = DataLibrary()
############################################
# Only variables intended for outside use. #
############################################
# Dictionaries
property_regex_dict = __data_library.property_regex_dict
property_alias_dict = __data_library.property_alias_dict
ordered_property_dict = __data_library.ordered_property_dict
# Pseudo Sets - reference: http://www.w3schools.com/css/css_pseudo_classes.asp
# The following pseudo classes are not implemented due to complexity and verbosity.
# 'lang([a-zA-Z\-])',
# 'not(_[a-zA-Z0-9]\-)', 'nth-child([0-9])', 'nth-last-child([0-9])',
# 'nth-last-of-type([0-9])', 'nth-of-type([0-9])',
pseudo_classes = {
'active', 'checked', 'disabled', 'empty', 'enabled', 'first-child', 'first-of-type', 'focus', 'hover',
'in-range', 'invalid', 'last-child', 'last-of-type', 'link', 'only-child',
'optional', 'out-of-range', 'read-only', 'read-write', 'required', 'root', 'target', 'valid', 'visited',
}
pseudo_elements = {'after', 'before', 'first-letter', 'first-line', 'selection', }
# Markdown
clashing_alias_markdown = __data_library.clashing_alias_markdown
property_alias_markdown = __data_library.property_alias_markdown
# HTML
clashing_alias_html = __data_library.clashing_alias_html
property_alias_html = __data_library.property_alias_html
# reStructuredTest
clashing_alias_rst = __data_library.clashing_alias_rst
property_alias_rst = __data_library.property_alias_rst
|
|
# Copyright (c) 2013 eBay Inc.
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The QoS specs extension"""
import six
import webob
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api.views import qos_specs as view_qos_specs
from cinder.api import xmlutil
from cinder import exception
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import log as logging
from cinder.openstack.common import strutils
from cinder import rpc
from cinder import utils
from cinder.volume import qos_specs
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('volume', 'qos_specs_manage')
def make_qos_specs(elem):
elem.set('id')
elem.set('name')
elem.set('consumer')
elem.append(SpecsTemplate())
def make_associations(elem):
elem.set('association_type')
elem.set('name')
elem.set('id')
class SpecsTemplate(xmlutil.TemplateBuilder):
def construct(self):
return xmlutil.MasterTemplate(xmlutil.make_flat_dict('specs'), 1)
class QoSSpecsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('qos_specs')
elem = xmlutil.SubTemplateElement(root, 'qos_spec',
selector='qos_specs')
make_qos_specs(elem)
return xmlutil.MasterTemplate(root, 1)
class QoSSpecsKeyDeserializer(wsgi.XMLDeserializer):
def _extract_keys(self, key_node):
keys = []
for key in key_node.childNodes:
key_name = key.tagName
keys.append(key_name)
return keys
def default(self, string):
dom = utils.safe_minidom_parse_string(string)
key_node = self.find_first_child_named(dom, 'keys')
if not key_node:
LOG.info(_("Unable to parse XML input."))
msg = _("Unable to parse XML request. "
"Please provide XML in correct format.")
raise webob.exc.HTTPBadRequest(explanation=msg)
return {'body': {'keys': self._extract_keys(key_node)}}
class AssociationsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('qos_associations')
elem = xmlutil.SubTemplateElement(root, 'associations',
selector='qos_associations')
make_associations(elem)
return xmlutil.MasterTemplate(root, 1)
def _check_specs(context, specs_id):
try:
qos_specs.get_qos_specs(context, specs_id)
except exception.NotFound as ex:
raise webob.exc.HTTPNotFound(explanation=six.text_type(ex))
class QoSSpecsController(wsgi.Controller):
"""The volume type extra specs API controller for the OpenStack API."""
_view_builder_class = view_qos_specs.ViewBuilder
@staticmethod
def _notify_qos_specs_error(context, method, payload):
rpc.get_notifier('QoSSpecs').error(context,
method,
payload)
@wsgi.serializers(xml=QoSSpecsTemplate)
def index(self, req):
"""Returns the list of qos_specs."""
context = req.environ['cinder.context']
authorize(context)
specs = qos_specs.get_all_specs(context)
return self._view_builder.summary_list(req, specs)
@wsgi.serializers(xml=QoSSpecsTemplate)
def create(self, req, body=None):
context = req.environ['cinder.context']
authorize(context)
if not self.is_valid_body(body, 'qos_specs'):
raise webob.exc.HTTPBadRequest()
specs = body['qos_specs']
name = specs.get('name', None)
if name is None or name == "":
msg = _("Please specify a name for QoS specs.")
raise webob.exc.HTTPBadRequest(explanation=msg)
try:
qos_specs.create(context, name, specs)
spec = qos_specs.get_qos_specs_by_name(context, name)
notifier_info = dict(name=name, specs=specs)
rpc.get_notifier('QoSSpecs').info(context,
'QoSSpecs.create',
notifier_info)
except exception.InvalidInput as err:
notifier_err = dict(name=name, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.create',
notifier_err)
raise webob.exc.HTTPBadRequest(explanation=six.text_type(err))
except exception.QoSSpecsExists as err:
notifier_err = dict(name=name, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.create',
notifier_err)
raise webob.exc.HTTPConflict(explanation=six.text_type(err))
except exception.QoSSpecsCreateFailed as err:
notifier_err = dict(name=name, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.create',
notifier_err)
raise webob.exc.HTTPInternalServerError(
explanation=six.text_type(err))
return self._view_builder.detail(req, spec)
@wsgi.serializers(xml=QoSSpecsTemplate)
def update(self, req, id, body=None):
context = req.environ['cinder.context']
authorize(context)
if not self.is_valid_body(body, 'qos_specs'):
raise webob.exc.HTTPBadRequest()
specs = body['qos_specs']
try:
qos_specs.update(context, id, specs)
notifier_info = dict(id=id, specs=specs)
rpc.get_notifier('QoSSpecs').info(context,
'qos_specs.update',
notifier_info)
except exception.QoSSpecsNotFound as err:
notifier_err = dict(id=id, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.update',
notifier_err)
raise webob.exc.HTTPNotFound(explanation=six.text_type(err))
except exception.InvalidQoSSpecs as err:
notifier_err = dict(id=id, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.update',
notifier_err)
raise webob.exc.HTTPBadRequest(explanation=six.text_type(err))
except exception.QoSSpecsUpdateFailed as err:
notifier_err = dict(id=id, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.update',
notifier_err)
raise webob.exc.HTTPInternalServerError(
explanation=six.text_type(err))
return body
@wsgi.serializers(xml=QoSSpecsTemplate)
def show(self, req, id):
"""Return a single qos spec item."""
context = req.environ['cinder.context']
authorize(context)
try:
spec = qos_specs.get_qos_specs(context, id)
except exception.QoSSpecsNotFound as err:
raise webob.exc.HTTPNotFound(explanation=six.text_type(err))
return self._view_builder.detail(req, spec)
def delete(self, req, id):
"""Deletes an existing qos specs."""
context = req.environ['cinder.context']
authorize(context)
force = req.params.get('force', None)
#convert string to bool type in strict manner
force = strutils.bool_from_string(force)
LOG.debug("Delete qos_spec: %(id)s, force: %(force)s" %
{'id': id, 'force': force})
try:
qos_specs.delete(context, id, force)
notifier_info = dict(id=id)
rpc.get_notifier('QoSSpecs').info(context,
'qos_specs.delete',
notifier_info)
except exception.QoSSpecsNotFound as err:
notifier_err = dict(id=id, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.delete',
notifier_err)
raise webob.exc.HTTPNotFound(explanation=six.text_type(err))
except exception.QoSSpecsInUse as err:
notifier_err = dict(id=id, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.delete',
notifier_err)
if force:
msg = _('Failed to disassociate qos specs.')
raise webob.exc.HTTPInternalServerError(explanation=msg)
msg = _('Qos specs still in use.')
raise webob.exc.HTTPBadRequest(explanation=msg)
return webob.Response(status_int=202)
@wsgi.deserializers(xml=QoSSpecsKeyDeserializer)
def delete_keys(self, req, id, body):
"""Deletes specified keys in qos specs."""
context = req.environ['cinder.context']
authorize(context)
if not (body and 'keys' in body
and isinstance(body.get('keys'), list)):
raise webob.exc.HTTPBadRequest()
keys = body['keys']
LOG.debug("Delete_key spec: %(id)s, keys: %(keys)s" %
{'id': id, 'keys': keys})
try:
qos_specs.delete_keys(context, id, keys)
notifier_info = dict(id=id)
rpc.get_notifier().info(context, 'qos_specs.delete_keys',
notifier_info)
except exception.QoSSpecsNotFound as err:
notifier_err = dict(id=id, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.delete_keys',
notifier_err)
raise webob.exc.HTTPNotFound(explanation=six.text_type(err))
except exception.QoSSpecsKeyNotFound as err:
notifier_err = dict(id=id, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.delete_keys',
notifier_err)
raise webob.exc.HTTPBadRequest(explanation=six.text_type(err))
return webob.Response(status_int=202)
@wsgi.serializers(xml=AssociationsTemplate)
def associations(self, req, id):
"""List all associations of given qos specs."""
context = req.environ['cinder.context']
authorize(context)
LOG.debug("Get associations for qos_spec id: %s" % id)
try:
associates = qos_specs.get_associations(context, id)
notifier_info = dict(id=id)
rpc.get_notifier('QoSSpecs').info(context,
'qos_specs.associations',
notifier_info)
except exception.QoSSpecsNotFound as err:
notifier_err = dict(id=id, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.associations',
notifier_err)
raise webob.exc.HTTPNotFound(explanation=six.text_type(err))
except exception.CinderException as err:
notifier_err = dict(id=id, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.associations',
notifier_err)
raise webob.exc.HTTPInternalServerError(
explanation=six.text_type(err))
return self._view_builder.associations(req, associates)
def associate(self, req, id):
"""Associate a qos specs with a volume type."""
context = req.environ['cinder.context']
authorize(context)
type_id = req.params.get('vol_type_id', None)
if not type_id:
msg = _('Volume Type id must not be None.')
notifier_err = dict(id=id, error_message=msg)
self._notify_qos_specs_error(context,
'qos_specs.delete',
notifier_err)
raise webob.exc.HTTPBadRequest(explanation=msg)
LOG.debug("Associate qos_spec: %(id)s with type: %(type_id)s" %
{'id': id, 'type_id': type_id})
try:
qos_specs.associate_qos_with_type(context, id, type_id)
notifier_info = dict(id=id, type_id=type_id)
rpc.get_notifier('QoSSpecs').info(context,
'qos_specs.associate',
notifier_info)
except exception.VolumeTypeNotFound as err:
notifier_err = dict(id=id, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.associate',
notifier_err)
raise webob.exc.HTTPNotFound(explanation=six.text_type(err))
except exception.QoSSpecsNotFound as err:
notifier_err = dict(id=id, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.associate',
notifier_err)
raise webob.exc.HTTPNotFound(explanation=six.text_type(err))
except exception.InvalidVolumeType as err:
notifier_err = dict(id=id, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.associate',
notifier_err)
self._notify_qos_specs_error(context,
'qos_specs.associate',
notifier_err)
raise webob.exc.HTTPBadRequest(explanation=six.text_type(err))
except exception.QoSSpecsAssociateFailed as err:
notifier_err = dict(id=id, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.associate',
notifier_err)
raise webob.exc.HTTPInternalServerError(
explanation=six.text_type(err))
return webob.Response(status_int=202)
def disassociate(self, req, id):
"""Disassociate a qos specs from a volume type."""
context = req.environ['cinder.context']
authorize(context)
type_id = req.params.get('vol_type_id', None)
if not type_id:
msg = _('Volume Type id must not be None.')
notifier_err = dict(id=id, error_message=msg)
self._notify_qos_specs_error(context,
'qos_specs.delete',
notifier_err)
raise webob.exc.HTTPBadRequest(explanation=msg)
LOG.debug("Disassociate qos_spec: %(id)s from type: %(type_id)s" %
{'id': id, 'type_id': type_id})
try:
qos_specs.disassociate_qos_specs(context, id, type_id)
notifier_info = dict(id=id, type_id=type_id)
rpc.get_notifier('QoSSpecs').info(context,
'qos_specs.disassociate',
notifier_info)
except exception.VolumeTypeNotFound as err:
notifier_err = dict(id=id, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.disassociate',
notifier_err)
raise webob.exc.HTTPNotFound(explanation=six.text_type(err))
except exception.QoSSpecsNotFound as err:
notifier_err = dict(id=id, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.disassociate',
notifier_err)
raise webob.exc.HTTPNotFound(explanation=six.text_type(err))
except exception.QoSSpecsDisassociateFailed as err:
notifier_err = dict(id=id, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.disassociate',
notifier_err)
raise webob.exc.HTTPInternalServerError(
explanation=six.text_type(err))
return webob.Response(status_int=202)
def disassociate_all(self, req, id):
"""Disassociate a qos specs from all volume types."""
context = req.environ['cinder.context']
authorize(context)
LOG.debug("Disassociate qos_spec: %s from all." % id)
try:
qos_specs.disassociate_all(context, id)
notifier_info = dict(id=id)
rpc.get_notifier('QoSSpecs').info(context,
'qos_specs.disassociate_all',
notifier_info)
except exception.QoSSpecsNotFound as err:
notifier_err = dict(id=id, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.disassociate_all',
notifier_err)
raise webob.exc.HTTPNotFound(explanation=six.text_type(err))
except exception.QoSSpecsDisassociateFailed as err:
notifier_err = dict(id=id, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.disassociate_all',
notifier_err)
raise webob.exc.HTTPInternalServerError(
explanation=six.text_type(err))
return webob.Response(status_int=202)
class Qos_specs_manage(extensions.ExtensionDescriptor):
"""QoS specs support."""
name = "Qos_specs_manage"
alias = "qos-specs"
namespace = "http://docs.openstack.org/volume/ext/qos-specs/api/v1"
updated = "2013-08-02T00:00:00+00:00"
def get_resources(self):
resources = []
res = extensions.ResourceExtension(
Qos_specs_manage.alias,
QoSSpecsController(),
member_actions={"associations": "GET",
"associate": "GET",
"disassociate": "GET",
"disassociate_all": "GET",
"delete_keys": "PUT"})
resources.append(res)
return resources
|
|
#!/usr/bin/env python
# Copyright (C) 2015 Wayne Warren
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Manage JJB Configuration sources, defaults, and access.
from collections import defaultdict
import io
import logging
import os
from six.moves import configparser, StringIO
from six import PY2
from jenkins_jobs import builder
from jenkins_jobs.errors import JJBConfigException
from jenkins_jobs.errors import JenkinsJobsException
__all__ = [
"JJBConfig"
]
logger = logging.getLogger(__name__)
DEFAULT_CONF = """
[job_builder]
keep_descriptions=False
ignore_cache=False
recursive=False
exclude=.*
allow_duplicates=False
allow_empty_variables=False
# other named sections could be used in addition to the implicit [jenkins]
# if you have multiple jenkins servers.
[jenkins]
url=http://localhost:8080/
query_plugins_info=True
[hipchat]
authtoken=dummy
send-as=Jenkins
"""
CONFIG_REQUIRED_MESSAGE = ("A valid configuration file is required. "
"No configuration file passed.")
class JJBConfig(object):
def __init__(self, config_filename=None,
config_file_required=False,
config_section='jenkins'):
"""
The JJBConfig class is intended to encapsulate and resolve priority
between all sources of configuration for the JJB library. This allows
the various sources of configuration to provide a consistent accessor
interface regardless of where they are used.
It also allows users of JJB-as-an-API to create minimally valid
configuration and easily make minor modifications to default values
without strictly adhering to the confusing setup (see the _setup
method, the behavior of which largely lived in the cmd.execute method
previously) necessary for the jenkins-jobs command line tool.
:arg str config_filename: Name of configuration file on which to base
this config object.
:arg bool config_file_required: Allows users of the JJBConfig class to
decide whether or not it's really necessary for a config file to be
passed in when creating an instance. This has two effects on the
behavior of JJBConfig initialization:
* It determines whether or not we try "local" and "global" config
files.
* It determines whether or not failure to read some config file
will raise an exception or simply print a warning message
indicating that no config file was found.
"""
config_parser = self._init_defaults()
global_conf = '/etc/jenkins_jobs/jenkins_jobs.ini'
user_conf = os.path.join(os.path.expanduser('~'), '.config',
'jenkins_jobs', 'jenkins_jobs.ini')
local_conf = os.path.join(os.path.dirname(__file__),
'jenkins_jobs.ini')
conf = None
if config_filename is not None:
conf = config_filename
else:
if os.path.isfile(local_conf):
conf = local_conf
elif os.path.isfile(user_conf):
conf = user_conf
else:
conf = global_conf
if config_file_required and conf is None:
raise JJBConfigException(CONFIG_REQUIRED_MESSAGE)
config_fp = None
if conf is not None:
try:
config_fp = self._read_config_file(conf)
except JJBConfigException:
if config_file_required:
raise JJBConfigException(CONFIG_REQUIRED_MESSAGE)
else:
logger.warning("Config file, {0}, not found. Using "
"default config values.".format(conf))
if config_fp is not None:
if PY2:
config_parser.readfp(config_fp)
else:
config_parser.read_file(config_fp)
self.config_parser = config_parser
self.ignore_cache = False
self.flush_cache = False
self.user = None
self.password = None
self.section = config_section
self.plugins_info = None
self.timeout = builder._DEFAULT_TIMEOUT
self.allow_empty_variables = None
self.jenkins = defaultdict(None)
self.builder = defaultdict(None)
self.yamlparser = defaultdict(None)
self.hipchat = defaultdict(None)
self._setup()
def _init_defaults(self):
""" Initialize default configuration values using DEFAULT_CONF
"""
config = configparser.ConfigParser()
# Load default config always
if PY2:
config.readfp(StringIO(DEFAULT_CONF))
else:
config.read_file(StringIO(DEFAULT_CONF))
return config
def _read_config_file(self, config_filename):
""" Given path to configuration file, read it in as a ConfigParser
object and return that object.
"""
if os.path.isfile(config_filename):
self.__config_file = config_filename # remember file we read from
logger.debug("Reading config from {0}".format(config_filename))
config_fp = io.open(config_filename, 'r', encoding='utf-8')
else:
raise JJBConfigException(
"A valid configuration file is required. "
"\n{0} is not valid.".format(config_filename))
return config_fp
def _setup(self):
config = self.config_parser
logger.debug("Config: {0}".format(config))
# check the ignore_cache setting
if config.has_option(self.section, 'ignore_cache'):
logging.warning("ignore_cache option should be moved to the "
"[job_builder] section in the config file, the "
"one specified in the [jenkins] section will be "
"ignored in the future")
self.ignore_cache = config.getboolean(self.section, 'ignore_cache')
elif config.has_option('job_builder', 'ignore_cache'):
self.ignore_cache = config.getboolean('job_builder',
'ignore_cache')
# check the flush_cache setting
if config.has_option('job_builder', 'flush_cache'):
self.flush_cache = config.getboolean('job_builder', 'flush_cache')
# Jenkins supports access as an anonymous user, which can be used to
# ensure read-only behaviour when querying the version of plugins
# installed for test mode to generate XML output matching what will be
# uploaded. To enable must pass 'None' as the value for user and
# password to python-jenkins
#
# catching 'TypeError' is a workaround for python 2.6 interpolation
# error
# https://bugs.launchpad.net/openstack-ci/+bug/1259631
try:
self.user = config.get(self.section, 'user')
except (TypeError, configparser.NoOptionError):
pass
try:
self.password = config.get(self.section, 'password')
except (TypeError, configparser.NoOptionError):
pass
# None -- no timeout, blocking mode; same as setblocking(True)
# 0.0 -- non-blocking mode; same as setblocking(False) <--- default
# > 0 -- timeout mode; operations time out after timeout seconds
# < 0 -- illegal; raises an exception
# to retain the default must use
# "timeout=jenkins_jobs.builder._DEFAULT_TIMEOUT" or not set timeout at
# all.
try:
self.timeout = config.getfloat(self.section, 'timeout')
except (ValueError):
raise JenkinsJobsException("Jenkins timeout config is invalid")
except (TypeError, configparser.NoOptionError):
pass
if (config.has_option(self.section, 'query_plugins_info') and
not config.getboolean(self.section, "query_plugins_info")):
logger.debug("Skipping plugin info retrieval")
self.plugins_info = []
self.recursive = config.getboolean('job_builder', 'recursive')
self.excludes = config.get('job_builder', 'exclude').split(os.pathsep)
# The way we want to do things moving forward:
self.jenkins['url'] = config.get(self.section, 'url')
self.jenkins['user'] = self.user
self.jenkins['password'] = self.password
self.jenkins['timeout'] = self.timeout
self.builder['ignore_cache'] = self.ignore_cache
self.builder['flush_cache'] = self.flush_cache
self.builder['plugins_info'] = self.plugins_info
# keep descriptions ? (used by yamlparser)
keep_desc = False
if (config and config.has_section('job_builder') and
config.has_option('job_builder', 'keep_descriptions')):
keep_desc = config.getboolean('job_builder',
'keep_descriptions')
self.yamlparser['keep_descriptions'] = keep_desc
# figure out the include path (used by yamlparser)
path = ["."]
if (config and config.has_section('job_builder') and
config.has_option('job_builder', 'include_path')):
path = config.get('job_builder',
'include_path').split(':')
self.yamlparser['include_path'] = path
# allow duplicates?
allow_duplicates = False
if config and config.has_option('job_builder', 'allow_duplicates'):
allow_duplicates = config.getboolean('job_builder',
'allow_duplicates')
self.yamlparser['allow_duplicates'] = allow_duplicates
# allow empty variables?
self.yamlparser['allow_empty_variables'] = (
self.allow_empty_variables or
config and config.has_section('job_builder') and
config.has_option('job_builder', 'allow_empty_variables') and
config.getboolean('job_builder', 'allow_empty_variables'))
def validate(self):
config = self.config_parser
# Inform the user as to what is likely to happen, as they may specify
# a real jenkins instance in test mode to get the plugin info to check
# the XML generated.
if self.jenkins['user'] is None and self.jenkins['password'] is None:
logger.info("Will use anonymous access to Jenkins if needed.")
elif ((self.jenkins['user'] is not None and
self.jenkins['password'] is None) or
(self.jenkins['user'] is None and
self.jenkins['password'] is not None)):
raise JenkinsJobsException(
"Cannot authenticate to Jenkins with only one of User and "
"Password provided, please check your configuration."
)
if (self.builder['plugins_info'] is not None and
not isinstance(self.builder['plugins_info'], list)):
raise JenkinsJobsException("plugins_info must contain a list!")
# Temporary until yamlparser is refactored to query config object
if self.yamlparser['allow_empty_variables'] is not None:
config.set('job_builder',
'allow_empty_variables',
str(self.yamlparser['allow_empty_variables']))
def get_module_config(self, section, key):
""" Given a section name and a key value, return the value assigned to
the key in the JJB .ini file if it exists, otherwise emit a warning
indicating that the value is not set. Default value returned if no
value is set in the file will be a blank string.
"""
result = ''
try:
result = self.config_parser.get(
section, key
)
except (configparser.NoSectionError, configparser.NoOptionError,
JenkinsJobsException) as e:
logger.warning("You didn't set a " + key +
" neither in the yaml job definition nor in" +
" the " + section + " section, blank default" +
" value will be applied:\n{0}".format(e))
return result
def get_plugin_config(self, plugin, key):
value = self.get_module_config('plugin "{}"'.format(plugin), key)
# Backwards compatibility for users who have not switched to the new
# plugin configuration format in their config. This code should be
# removed in future versions of JJB after 2.0.
if not value:
value = self.get_module_config(plugin, key)
logger.warning(
"Defining plugin configuration using [" + plugin + "] is"
" deprecated. The recommended way to define plugins now is by"
" configuring [plugin \"" + plugin + "\"]")
return value
|
|
# Copyright (c) 2005 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
import code
import datetime
import os
import socket
import sys
__all__ = [ 'options', 'arguments', 'main' ]
usage="%prog [gem5 options] script.py [script options]"
version="%prog 2.0"
brief_copyright=\
"gem5 is copyrighted software; use the --copyright option for details."
def parse_options():
import config
from options import OptionParser
options = OptionParser(usage=usage, version=version,
description=brief_copyright)
option = options.add_option
group = options.set_group
# Help options
option('-B', "--build-info", action="store_true", default=False,
help="Show build information")
option('-C', "--copyright", action="store_true", default=False,
help="Show full copyright information")
option('-R', "--readme", action="store_true", default=False,
help="Show the readme")
# Options for configuring the base simulator
option('-d', "--outdir", metavar="DIR", default="m5out",
help="Set the output directory to DIR [Default: %default]")
option('-r', "--redirect-stdout", action="store_true", default=False,
help="Redirect stdout (& stderr, without -e) to file")
option('-e', "--redirect-stderr", action="store_true", default=False,
help="Redirect stderr to file")
option("--stdout-file", metavar="FILE", default="simout",
help="Filename for -r redirection [Default: %default]")
option("--stderr-file", metavar="FILE", default="simerr",
help="Filename for -e redirection [Default: %default]")
option('-i', "--interactive", action="store_true", default=False,
help="Invoke the interactive interpreter after running the script")
option("--pdb", action="store_true", default=False,
help="Invoke the python debugger before running the script")
option('-p', "--path", metavar="PATH[:PATH]", action='append', split=':',
help="Prepend PATH to the system path when invoking the script")
option('-q', "--quiet", action="count", default=0,
help="Reduce verbosity")
option('-v', "--verbose", action="count", default=0,
help="Increase verbosity")
# Statistics options
group("Statistics Options")
option("--stats-file", metavar="FILE", default="stats.txt",
help="Sets the output file for statistics [Default: %default]")
# Configuration Options
group("Configuration Options")
option("--dump-config", metavar="FILE", default="config.ini",
help="Dump configuration output file [Default: %default]")
option("--json-config", metavar="FILE", default="config.json",
help="Create JSON output of the configuration [Default: %default]")
option("--dot-config", metavar="FILE", default="config.dot",
help="Create DOT & pdf outputs of the configuration [Default: %default]")
# Debugging options
group("Debugging Options")
option("--debug-break", metavar="TIME[,TIME]", action='append', split=',',
help="Cycle to create a breakpoint")
option("--debug-help", action='store_true',
help="Print help on trace flags")
option("--debug-flags", metavar="FLAG[,FLAG]", action='append', split=',',
help="Sets the flags for tracing (-FLAG disables a flag)")
option("--remote-gdb-port", type='int', default=7000,
help="Remote gdb base port (set to 0 to disable listening)")
# Tracing options
group("Trace Options")
option("--trace-start", metavar="TIME", type='int',
help="Start tracing at TIME (must be in ticks)")
option("--trace-file", metavar="FILE", default="cout",
help="Sets the output file for tracing [Default: %default]")
option("--trace-ignore", metavar="EXPR", action='append', split=':',
help="Ignore EXPR sim objects")
# Help options
group("Help Options")
option("--list-sim-objects", action='store_true', default=False,
help="List all built-in SimObjects, their params and default values")
# load the options.py config file to allow people to set their own
# default options
options_file = config.get('options.py')
if options_file:
scope = { 'options' : options }
execfile(options_file, scope)
arguments = options.parse_args()
return options,arguments
def interact(scope):
banner = "gem5 Interactive Console"
sys.argv = []
try:
from IPython.Shell import IPShellEmbed
ipshell = IPShellEmbed(banner=banner,user_ns=scope)
ipshell()
except ImportError:
code.InteractiveConsole(scope).interact(banner)
def main(*args):
import m5
import core
import debug
import defines
import event
import info
import stats
import trace
from util import fatal
if len(args) == 0:
options, arguments = parse_options()
elif len(args) == 2:
options, arguments = args
else:
raise TypeError, "main() takes 0 or 2 arguments (%d given)" % len(args)
m5.options = options
def check_tracing():
if defines.TRACING_ON:
return
fatal("Tracing is not enabled. Compile with TRACING_ON")
if not os.path.isdir(options.outdir):
os.makedirs(options.outdir)
# These filenames are used only if the redirect_std* options are set
stdout_file = os.path.join(options.outdir, options.stdout_file)
stderr_file = os.path.join(options.outdir, options.stderr_file)
# Print redirection notices here before doing any redirection
if options.redirect_stdout and not options.redirect_stderr:
print "Redirecting stdout and stderr to", stdout_file
else:
if options.redirect_stdout:
print "Redirecting stdout to", stdout_file
if options.redirect_stderr:
print "Redirecting stderr to", stderr_file
# Now redirect stdout/stderr as desired
if options.redirect_stdout:
redir_fd = os.open(stdout_file, os. O_WRONLY | os.O_CREAT | os.O_TRUNC)
os.dup2(redir_fd, sys.stdout.fileno())
if not options.redirect_stderr:
os.dup2(redir_fd, sys.stderr.fileno())
if options.redirect_stderr:
redir_fd = os.open(stderr_file, os. O_WRONLY | os.O_CREAT | os.O_TRUNC)
os.dup2(redir_fd, sys.stderr.fileno())
done = False
if options.build_info:
done = True
print 'Build information:'
print
print 'compiled %s' % defines.compileDate;
print 'build options:'
keys = defines.buildEnv.keys()
keys.sort()
for key in keys:
val = defines.buildEnv[key]
print ' %s = %s' % (key, val)
print
if options.copyright:
done = True
print info.COPYING
print
if options.readme:
done = True
print 'Readme:'
print
print info.README
print
if options.debug_help:
done = True
check_tracing()
debug.help()
if options.list_sim_objects:
import SimObject
done = True
print "SimObjects:"
objects = SimObject.allClasses.keys()
objects.sort()
for name in objects:
obj = SimObject.allClasses[name]
print " %s" % obj
params = obj._params.keys()
params.sort()
for pname in params:
param = obj._params[pname]
default = getattr(param, 'default', '')
print " %s" % pname
if default:
print " default: %s" % default
print " desc: %s" % param.desc
print
print
if done:
sys.exit(0)
# setting verbose and quiet at the same time doesn't make sense
if options.verbose > 0 and options.quiet > 0:
options.usage(2)
verbose = options.verbose - options.quiet
if options.verbose >= 0:
print "gem5 Simulator System. http://gem5.org"
print brief_copyright
print
print "gem5 compiled %s" % defines.compileDate;
print "gem5 started %s" % \
datetime.datetime.now().strftime("%b %e %Y %X")
print "gem5 executing on %s" % socket.gethostname()
print "command line:",
for argv in sys.argv:
print argv,
print
# check to make sure we can find the listed script
if not arguments or not os.path.isfile(arguments[0]):
if arguments and not os.path.isfile(arguments[0]):
print "Script %s not found" % arguments[0]
options.usage(2)
# tell C++ about output directory
core.setOutputDir(options.outdir)
# update the system path with elements from the -p option
sys.path[0:0] = options.path
# set stats options
stats.initText(options.stats_file)
# set debugging options
debug.setRemoteGDBPort(options.remote_gdb_port)
for when in options.debug_break:
debug.schedBreakCycle(int(when))
if options.debug_flags:
check_tracing()
on_flags = []
off_flags = []
for flag in options.debug_flags:
off = False
if flag.startswith('-'):
flag = flag[1:]
off = True
if flag not in debug.flags:
print >>sys.stderr, "invalid debug flag '%s'" % flag
sys.exit(1)
if off:
debug.flags[flag].disable()
else:
debug.flags[flag].enable()
if options.trace_start:
check_tracing()
e = event.create(trace.enable, event.Event.Trace_Enable_Pri)
event.mainq.schedule(e, options.trace_start)
else:
trace.enable()
trace.output(options.trace_file)
for ignore in options.trace_ignore:
check_tracing()
trace.ignore(ignore)
sys.argv = arguments
sys.path = [ os.path.dirname(sys.argv[0]) ] + sys.path
filename = sys.argv[0]
filedata = file(filename, 'r').read()
filecode = compile(filedata, filename, 'exec')
scope = { '__file__' : filename,
'__name__' : '__m5_main__' }
# we want readline if we're doing anything interactive
if options.interactive or options.pdb:
exec "import readline" in scope
# if pdb was requested, execfile the thing under pdb, otherwise,
# just do the execfile normally
if options.pdb:
import pdb
import traceback
pdb = pdb.Pdb()
try:
pdb.run(filecode, scope)
except SystemExit:
print "The program exited via sys.exit(). Exit status: ",
print sys.exc_info()[1]
except:
traceback.print_exc()
print "Uncaught exception. Entering post mortem debugging"
t = sys.exc_info()[2]
while t.tb_next is not None:
t = t.tb_next
pdb.interaction(t.tb_frame,t)
else:
exec filecode in scope
# once the script is done
if options.interactive:
interact(scope)
if __name__ == '__main__':
from pprint import pprint
options, arguments = parse_options()
print 'opts:'
pprint(options, indent=4)
print
print 'args:'
pprint(arguments, indent=4)
|
|
import httplib
import urllib
import base64
import socket
from celery.utils.log import get_task_logger
log = get_task_logger(__name__)
from django.utils import simplejson
from django.conf import settings
from celery import Celery
from mixpanel.conf import settings as mp_settings
celery = Celery('mixpanel')
celery.config_from_object(settings)
@celery.task(name="mixpanel.tasks.PeopleTracker", max_retries=mp_settings.MIXPANEL_MAX_RETRIES)
def people_tracker(distinct_id, properties=None, token=None, test=None, throw_retry_error=False):
"""
Track an event occurrence to mixpanel through the API.
``event_name`` is the string for the event/category you'd like to log
this event under
``properties`` is (optionally) a dictionary of key/value pairs
describing the event.
``token`` is (optionally) your Mixpanel api token. Not required if
you've already configured your MIXPANEL_API_TOKEN setting.
``test`` is an optional override to your
`:data:mixpanel.conf.settings.MIXPANEL_TEST_ONLY` setting for determining
if the event requests should actually be stored on the Mixpanel servers.
"""
log.info("Recording people datapoint: <%s>" % distinct_id)
is_test = _is_test(test)
url_params = _build_people_params(distinct_id, properties, is_test)
conn = _get_connection()
try:
result = _send_request(conn, url_params, mp_settings.MIXPANEL_PEOPLE_TRACKING_ENDPOINT)
except FailedEventRequest, exception:
conn.close()
log.info("Event failed. Retrying: user <%s>" % distinct_id)
raise event_tracker.retry(exc=exception,
countdown=mp_settings.MIXPANEL_RETRY_DELAY,
throw=throw_retry_error)
conn.close()
return result
@celery.task(name="mixpanel.tasks.EventTracker", max_retries=mp_settings.MIXPANEL_MAX_RETRIES)
def event_tracker(event_name, properties=None, token=None, test=None, throw_retry_error=False):
"""
Track an event occurrence to mixpanel through the API.
``event_name`` is the string for the event/category you'd like to log
this event under
``properties`` is (optionally) a dictionary of key/value pairs
describing the event.
``token`` is (optionally) your Mixpanel api token. Not required if
you've already configured your MIXPANEL_API_TOKEN setting.
``test`` is an optional override to your
`:data:mixpanel.conf.settings.MIXPANEL_TEST_ONLY` setting for determining
if the event requests should actually be stored on the Mixpanel servers.
"""
log.info("Recording event: <%s>" % event_name)
is_test = _is_test(test)
generated_properties = _handle_properties(properties, token)
url_params = _build_params(event_name, generated_properties, is_test)
conn = _get_connection()
try:
result = _send_request(conn, url_params)
except FailedEventRequest, exception:
conn.close()
log.info("Event failed. Retrying: <%s>" % event_name)
raise event_tracker.retry(exc=exception,
countdown=mp_settings.MIXPANEL_RETRY_DELAY,
throw=throw_retry_error)
conn.close()
return result
@celery.task(name="mixpanel.tasks.FunnelEventTracker", max_retries=mp_settings.MIXPANEL_MAX_RETRIES)
def funnel_event_tracker(funnel, step, goal, properties, token=None, test=None,
throw_retry_error=False):
"""
Track an event occurrence to mixpanel through the API.
``funnel`` is the string for the funnel you'd like to log
this event under
``step`` the step in the funnel you're registering
``goal`` the end goal of this funnel
``properties`` is a dictionary of key/value pairs
describing the funnel event. A ``distinct_id`` is required.
``token`` is (optionally) your Mixpanel api token. Not required if
you've already configured your MIXPANEL_API_TOKEN setting.
``test`` is an optional override to your
`:data:mixpanel.conf.settings.MIXPANEL_TEST_ONLY` setting for determining
if the event requests should actually be stored on the Mixpanel servers.
"""
log.info("Recording funnel: <%s>-<%s>" % (funnel, step))
properties = _handle_properties(properties, token)
is_test = _is_test(test)
properties = _add_funnel_properties(properties, funnel, step, goal)
url_params = _build_params(mp_settings.MIXPANEL_FUNNEL_EVENT_ID,
properties, is_test)
conn = _get_connection()
try:
result = _send_request(conn, url_params)
except FailedEventRequest, exception:
conn.close()
log.info("Funnel failed. Retrying: <%s>-<%s>" % (funnel, step))
raise funnel_event_tracker.retry(exc=exception,
countdown=mp_settings.MIXPANEL_RETRY_DELAY,
throw=throw_retry_error)
conn.close()
return result
class FailedEventRequest(Exception):
"""The attempted recording event failed because of a non-200 HTTP return code"""
pass
class InvalidFunnelProperties(Exception):
"""Required properties were missing from the funnel-tracking call"""
pass
def _is_test(test):
"""
Determine whether this event should be logged as a test request, meaning
it won't actually be stored on the Mixpanel servers. A return result of
1 means this will be a test, 0 means it won't as per the API spec.
Uses ``:mod:mixpanel.conf.settings.MIXPANEL_TEST_ONLY`` as the default
if no explicit test option is given.
"""
if test == None:
test = mp_settings.MIXPANEL_TEST_ONLY
if test:
return 1
return 0
def _handle_properties(properties, token):
"""
Build a properties dictionary, accounting for the token.
"""
if properties == None:
properties = {}
if not properties.get('token', None):
if token is None:
token = mp_settings.MIXPANEL_API_TOKEN
properties['token'] = token
return properties
def _get_connection():
server = mp_settings.MIXPANEL_API_SERVER
# Wish we could use python 2.6's httplib timeout support
socket.setdefaulttimeout(mp_settings.MIXPANEL_API_TIMEOUT)
return httplib.HTTPConnection(server)
def _build_people_params(distinct_id, properties, is_test):
"""
Build HTTP params to record the given event and properties.
"""
params = {'$distinct_id': distinct_id,'$token': mp_settings.MIXPANEL_API_TOKEN}
if 'set' in properties:
#adding $ to any reserved mixpanel vars
for special_prop in mp_settings.MIXPANEL_RESERVED_PEOPLE_PROPERTIES:
if special_prop in properties['set']:
properties['set']['${}'.format(special_prop)] = properties['set'][special_prop]
del properties['set'][special_prop]
params['$set'] = properties['set']
if 'increment' in properties:
params['$add'] = properties['increment']
data = base64.b64encode(simplejson.dumps(params))
data_var = mp_settings.MIXPANEL_DATA_VARIABLE
url_params = urllib.urlencode({data_var: data, 'test': is_test})
return url_params
def _build_params(event, properties, is_test):
"""
Build HTTP params to record the given event and properties.
"""
params = {'event': event, 'properties': properties}
data = base64.b64encode(simplejson.dumps(params))
data_var = mp_settings.MIXPANEL_DATA_VARIABLE
url_params = urllib.urlencode({data_var: data, 'test': is_test})
return url_params
def _send_request(connection, params, endpoint=mp_settings.MIXPANEL_TRACKING_ENDPOINT):
"""
Send a an event with its properties to the api server.
Returns ``true`` if the event was logged by Mixpanel.
"""
try:
connection.request('GET', '%s?%s' % (endpoint, params))
response = connection.getresponse()
except socket.error, message:
raise FailedEventRequest("The tracking request failed with a socket error. Message: [%s]" % message)
if response.status != 200 or response.reason != 'OK':
raise FailedEventRequest("The tracking request failed. Non-200 response code was: %s %s" % (response.status, response.reason))
# Successful requests will generate a log
response_data = response.read()
if response_data != '1':
return False
return True
def _add_funnel_properties(properties, funnel, step, goal):
if not 'distinct_id' in properties:
error_msg = "A ``distinct_id`` must be given to record a funnel event"
raise InvalidFunnelProperties(error_msg)
properties['funnel'] = funnel
properties['step'] = step
properties['goal'] = goal
return properties
|
|
from .academicearth import AcademicEarthCourseIE
from .addanime import AddAnimeIE
from .aftonbladet import AftonbladetIE
from .anitube import AnitubeIE
from .aol import AolIE
from .aparat import AparatIE
from .appletrailers import AppleTrailersIE
from .archiveorg import ArchiveOrgIE
from .ard import ARDIE
from .arte import (
ArteTvIE,
ArteTVPlus7IE,
ArteTVCreativeIE,
ArteTVConcertIE,
ArteTVFutureIE,
ArteTVDDCIE,
ArteTVEmbedIE,
)
from .auengine import AUEngineIE
from .bambuser import BambuserIE, BambuserChannelIE
from .bandcamp import BandcampIE, BandcampAlbumIE
from .bbccouk import BBCCoUkIE
from .blinkx import BlinkxIE
from .bliptv import BlipTVIE, BlipTVUserIE
from .bloomberg import BloombergIE
from .br import BRIE
from .breakcom import BreakIE
from .brightcove import BrightcoveIE
from .byutv import BYUtvIE
from .c56 import C56IE
from .canal13cl import Canal13clIE
from .canalplus import CanalplusIE
from .canalc2 import Canalc2IE
from .cbs import CBSIE
from .ceskatelevize import CeskaTelevizeIE
from .channel9 import Channel9IE
from .chilloutzone import ChilloutzoneIE
from .cinemassacre import CinemassacreIE
from .clipfish import ClipfishIE
from .cliphunter import CliphunterIE
from .clipsyndicate import ClipsyndicateIE
from .cmt import CMTIE
from .cnn import (
CNNIE,
CNNBlogsIE,
)
from .collegehumor import CollegeHumorIE
from .comedycentral import ComedyCentralIE, ComedyCentralShowsIE
from .condenast import CondeNastIE
from .criterion import CriterionIE
from .crunchyroll import CrunchyrollIE
from .cspan import CSpanIE
from .d8 import D8IE
from .dailymotion import (
DailymotionIE,
DailymotionPlaylistIE,
DailymotionUserIE,
)
from .daum import DaumIE
from .dotsub import DotsubIE
from .dreisat import DreiSatIE
from .defense import DefenseGouvFrIE
from .discovery import DiscoveryIE
from .dropbox import DropboxIE
from .ebaumsworld import EbaumsWorldIE
from .ehow import EHowIE
from .eighttracks import EightTracksIE
from .eitb import EitbIE
from .elpais import ElPaisIE
from .engadget import EngadgetIE
from .escapist import EscapistIE
from .everyonesmixtape import EveryonesMixtapeIE
from .exfm import ExfmIE
from .extremetube import ExtremeTubeIE
from .facebook import FacebookIE
from .faz import FazIE
from .firstpost import FirstpostIE
from .firsttv import FirstTVIE
from .fivemin import FiveMinIE
from .fktv import (
FKTVIE,
FKTVPosteckeIE,
)
from .flickr import FlickrIE
from .fourtube import FourTubeIE
from .franceinter import FranceInterIE
from .francetv import (
PluzzIE,
FranceTvInfoIE,
FranceTVIE,
GenerationQuoiIE,
CultureboxIE,
)
from .freesound import FreesoundIE
from .freespeech import FreespeechIE
from .funnyordie import FunnyOrDieIE
from .gamekings import GamekingsIE
from .gamespot import GameSpotIE
from .gametrailers import GametrailersIE
from .gdcvault import GDCVaultIE
from .generic import GenericIE
from .googleplus import GooglePlusIE
from .googlesearch import GoogleSearchIE
from .hark import HarkIE
from .helsinki import HelsinkiIE
from .hotnewhiphop import HotNewHipHopIE
from .howcast import HowcastIE
from .huffpost import HuffPostIE
from .hypem import HypemIE
from .ign import IGNIE, OneUPIE
from .imdb import (
ImdbIE,
ImdbListIE
)
from .ina import InaIE
from .infoq import InfoQIE
from .instagram import InstagramIE, InstagramUserIE
from .internetvideoarchive import InternetVideoArchiveIE
from .iprima import IPrimaIE
from .ivi import (
IviIE,
IviCompilationIE
)
from .jadorecettepub import JadoreCettePubIE
from .jeuxvideo import JeuxVideoIE
from .jukebox import JukeboxIE
from .justintv import JustinTVIE
from .jpopsukitv import JpopsukiIE
from .kankan import KankanIE
from .keezmovies import KeezMoviesIE
from .khanacademy import KhanAcademyIE
from .kickstarter import KickStarterIE
from .keek import KeekIE
from .kontrtube import KontrTubeIE
from .la7 import LA7IE
from .lifenews import LifeNewsIE
from .liveleak import LiveLeakIE
from .livestream import LivestreamIE, LivestreamOriginalIE
from .lynda import (
LyndaIE,
LyndaCourseIE
)
from .m6 import M6IE
from .macgamestore import MacGameStoreIE
from .mailru import MailRuIE
from .malemotion import MalemotionIE
from .mdr import MDRIE
from .metacafe import MetacafeIE
from .metacritic import MetacriticIE
from .mit import TechTVMITIE, MITIE, OCWMITIE
from .mixcloud import MixcloudIE
from .mpora import MporaIE
from .mofosex import MofosexIE
from .mooshare import MooshareIE
from .mtv import (
MTVIE,
MTVIggyIE,
)
from .muzu import MuzuTVIE
from .myspace import MySpaceIE
from .myspass import MySpassIE
from .myvideo import MyVideoIE
from .naver import NaverIE
from .nba import NBAIE
from .nbc import (
NBCIE,
NBCNewsIE,
)
from .ndr import NDRIE
from .ndtv import NDTVIE
from .newgrounds import NewgroundsIE
from .nfb import NFBIE
from .nhl import NHLIE, NHLVideocenterIE
from .niconico import NiconicoIE
from .ninegag import NineGagIE
from .normalboots import NormalbootsIE
from .novamov import NovaMovIE
from .nowness import NownessIE
from .nowvideo import NowVideoIE
from .ooyala import OoyalaIE
from .orf import ORFIE
from .parliamentliveuk import ParliamentLiveUKIE
from .pbs import PBSIE
from .photobucket import PhotobucketIE
from .playvid import PlayvidIE
from .podomatic import PodomaticIE
from .pornhd import PornHdIE
from .pornhub import PornHubIE
from .pornotube import PornotubeIE
from .prosiebensat1 import ProSiebenSat1IE
from .pyvideo import PyvideoIE
from .radiofrance import RadioFranceIE
from .rbmaradio import RBMARadioIE
from .redtube import RedTubeIE
from .ringtv import RingTVIE
from .ro220 import Ro220IE
from .rottentomatoes import RottenTomatoesIE
from .roxwel import RoxwelIE
from .rtlnow import RTLnowIE
from .rts import RTSIE
from .rutube import (
RutubeIE,
RutubeChannelIE,
RutubeMovieIE,
RutubePersonIE,
)
from .rutv import RUTVIE
from .savefrom import SaveFromIE
from .servingsys import ServingSysIE
from .sina import SinaIE
from .slideshare import SlideshareIE
from .smotri import (
SmotriIE,
SmotriCommunityIE,
SmotriUserIE,
SmotriBroadcastIE,
)
from .sohu import SohuIE
from .soundcloud import SoundcloudIE, SoundcloudSetIE, SoundcloudUserIE
from .southparkstudios import (
SouthParkStudiosIE,
SouthparkDeIE,
)
from .space import SpaceIE
from .spankwire import SpankwireIE
from .spiegel import SpiegelIE
from .spike import SpikeIE
from .stanfordoc import StanfordOpenClassroomIE
from .statigram import StatigramIE
from .steam import SteamIE
from .streamcloud import StreamcloudIE
from .streamcz import StreamCZIE
from .syfy import SyfyIE
from .sztvhu import SztvHuIE
from .teamcoco import TeamcocoIE
from .techtalks import TechTalksIE
from .ted import TEDIE
from .testurl import TestURLIE
from .tf1 import TF1IE
from .theplatform import ThePlatformIE
from .thisav import ThisAVIE
from .tinypic import TinyPicIE
from .toutv import TouTvIE
from .toypics import ToypicsUserIE, ToypicsIE
from .traileraddict import TrailerAddictIE
from .trilulilu import TriluliluIE
from .trutube import TruTubeIE
from .tube8 import Tube8IE
from .tudou import TudouIE
from .tumblr import TumblrIE
from .tutv import TutvIE
from .tvigle import TvigleIE
from .tvp import TvpIE
from .udemy import (
UdemyIE,
UdemyCourseIE
)
from .unistra import UnistraIE
from .ustream import UstreamIE, UstreamChannelIE
from .vbox7 import Vbox7IE
from .veehd import VeeHDIE
from .veoh import VeohIE
from .vesti import VestiIE
from .vevo import VevoIE
from .viddler import ViddlerIE
from .videobam import VideoBamIE
from .videodetective import VideoDetectiveIE
from .videolecturesnet import VideoLecturesNetIE
from .videofyme import VideofyMeIE
from .videopremium import VideoPremiumIE
from .vimeo import (
VimeoIE,
VimeoChannelIE,
VimeoUserIE,
VimeoAlbumIE,
VimeoGroupsIE,
VimeoReviewIE,
)
from .vine import VineIE
from .viki import VikiIE
from .vk import VKIE
from .vube import VubeIE
from .washingtonpost import WashingtonPostIE
from .wat import WatIE
from .wdr import WDRIE
from .weibo import WeiboIE
from .wimp import WimpIE
from .wistia import WistiaIE
from .worldstarhiphop import WorldStarHipHopIE
from .xbef import XBefIE
from .xhamster import XHamsterIE
from .xnxx import XNXXIE
from .xvideos import XVideosIE
from .xtube import XTubeUserIE, XTubeIE
from .yahoo import (
YahooIE,
YahooNewsIE,
YahooSearchIE,
)
from .youjizz import YouJizzIE
from .youku import YoukuIE
from .youporn import YouPornIE
from .youtube import (
YoutubeIE,
YoutubeChannelIE,
YoutubeFavouritesIE,
YoutubeHistoryIE,
YoutubePlaylistIE,
YoutubeRecommendedIE,
YoutubeSearchDateIE,
YoutubeSearchIE,
YoutubeSearchURLIE,
YoutubeShowIE,
YoutubeSubscriptionsIE,
YoutubeTopListIE,
YoutubeTruncatedURLIE,
YoutubeUserIE,
YoutubeWatchLaterIE,
)
from .zdf import ZDFIE
_ALL_CLASSES = [
klass
for name, klass in globals().items()
if name.endswith('IE') and name != 'GenericIE'
]
_ALL_CLASSES.append(GenericIE)
def gen_extractors():
""" Return a list of an instance of every supported extractor.
The order does matter; the first extractor matched is the one handling the URL.
"""
return [klass() for klass in _ALL_CLASSES]
def get_info_extractor(ie_name):
"""Returns the info extractor class with the given ie_name"""
return globals()[ie_name+'IE']
|
|
#!/usr/bin/env python
# Copyright (c) 2010 Anil Kumar
# All rights reserved.
#
# License: BSD
import os
import re
import array
from PyQt4 import QtGui
from PyQt4.QtGui import *
from PyQt4.QtCore import *
try:
from PyQt4.Qsci import QsciScintilla, QsciScintillaBase
from PyQt4.Qsci import QsciLexerCPP, QsciLexerJava
from PyQt4.Qsci import QsciLexerPython, QsciLexerRuby
from PyQt4.Qsci import QsciLexerBash, QsciLexerDiff, QsciLexerMakefile
from PyQt4.Qsci import QsciLexerLua, QsciLexerSQL, QsciLexerTCL, QsciLexerTeX
from PyQt4.Qsci import QsciLexerHTML, QsciLexerCSS
from PyQt4.Qsci import QsciLexerPerl, QsciLexerVHDL
suffix_to_lexer = [
[['c', 'h', 'cpp', 'hpp', 'cc', 'hh', 'cxx', 'hxx', 'C', 'H', 'h++'], QsciLexerCPP],
[['java'], QsciLexerJava],
[['py', 'pyx', 'pxd', 'pxi', 'scons'], QsciLexerPython],
[['rb', 'ruby'], QsciLexerRuby],
[['sh', 'bash'], QsciLexerBash],
[['diff', 'patch'], QsciLexerDiff],
[['mak', 'mk'], QsciLexerMakefile],
[['lua'], QsciLexerLua],
[['sql'], QsciLexerSQL],
[['tcl', 'tk', 'wish', 'itcl'], QsciLexerTCL],
[['tex'], QsciLexerTeX],
[['htm', 'html'], QsciLexerHTML],
[['css'], QsciLexerCSS],
[['pl', 'perl'], QsciLexerPerl],
[['vhdl', 'vhd'], QsciLexerVHDL],
]
filename_to_lexer = [
[['Makefile', 'makefile', 'Makefile.am', 'makefile.am', 'Makefile.in', 'makefile.in'], QsciLexerMakefile],
]
seascope_editor_tab_width = None
try:
seascope_editor_tab_width = int(os.getenv('SEASCOPE_EDITOR_TAB_WIDTH'))
except:
pass
except ImportError as e:
print e
print "Error: required qscintilla-python package not found"
raise ImportError
import DialogManager
from FileContextView import *
class EditorViewBase(QsciScintilla):
def __init__(self, parent=None):
QsciScintilla.__init__(self, parent)
self.font = None
self.lexer = None
def set_font(self, font):
if not font:
return
if not self.font:
self.font = QtGui.QFont()
self.font.fromString(font)
# the font metrics here will help
# building the margin width later
self.fm = QtGui.QFontMetrics(self.font)
## set the default font of the editor
## and take the same font for line numbers
self.setFont(self.font)
self.setMarginsFont(self.font)
self.lexer.setFont(self.font,-1)
self.setLexer(self.lexer)
def lpropChanged(self, prop, val):
print 'lpropChanged', prop, val
def setProperty(self, name, val):
name_buff = array.array('c', name + "\0")
val_buff = array.array("c", str(val) + "\0")
address_name_buffer = name_buff.buffer_info()[0]
address_val_buffer = val_buff.buffer_info()[0]
self.SendScintilla(QsciScintillaBase.SCI_SETPROPERTY, address_name_buffer, address_val_buffer)
def getProperty(self, name):
name_buff = array.array('c', name + "\0")
val_buff = array.array("c", str(0) + "\0")
address_name_buffer = name_buff.buffer_info()[0]
address_val_buffer = val_buff.buffer_info()[0]
self.SendScintilla(QsciScintillaBase.SCI_GETPROPERTY, address_name_buffer, address_val_buffer)
return ''.join(val_buff)
def printPropertyAll(self):
sz = self.SendScintilla(QsciScintillaBase.SCI_PROPERTYNAMES, 0, 0)
if not sz:
return
val_buff = array.array("c", (' ' * sz) + "\0")
address_val_buffer = val_buff.buffer_info()[0]
self.SendScintilla(QsciScintillaBase.SCI_PROPERTYNAMES, 0, address_val_buffer)
print '###>'
for p in ''.join(val_buff).splitlines():
v = self.getProperty(p)
print ' %s = %s' % (p, v)
def lexer_for_file(self, filename):
(prefix, ext) = os.path.splitext(filename)
for (el, lxr) in suffix_to_lexer:
if ext in el:
return lxr
for (el, lxr) in filename_to_lexer:
if filename in el:
return lxr
return QsciLexerCPP
def set_lexer(self, filename):
if not self.lexer:
lexerClass = self.lexer_for_file(filename)
self.lexer = lexerClass()
self.setLexer(self.lexer)
self.setProperty("lexer.cpp.track.preprocessor", "0")
is_debug = os.getenv("SEASCOPE_QSCI_LEXER_DEBUG", 0)
if is_debug:
self.lexer.propertyChanged.connect(self.lpropChanged)
self.printPropertyAll()
class EditorView(EditorViewBase):
ev_popup = None
sig_text_selected = pyqtSignal(str)
def __init__(self, parent=None):
EditorViewBase.__init__(self, parent)
#self.setGeometry(300, 300, 400, 300)
## Editing line color
self.setCaretLineVisible(True)
self.setCaretLineBackgroundColor(QtGui.QColor("#d4feff")) # orig: EEF6FF
#self.setCaretWidth(2)
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.codemark_marker = self.markerDefine(self.Circle)
def get_filename(self):
return self.filename
def ed_settings_1(self):
## Margins colors
# line numbers margin
self.setMarginsBackgroundColor(QtGui.QColor("#333333"))
self.setMarginsForegroundColor(QtGui.QColor("#CCCCCC"))
# folding margin colors (foreground,background)
self.setFoldMarginColors(QtGui.QColor("#888888"),QtGui.QColor("#eeeeee"))
## Edge Mode shows a red vetical bar at 80 chars
self.setEdgeMode(QsciScintilla.EdgeLine)
self.setEdgeColumn(80)
self.setEdgeColor(QtGui.QColor("#FF0000"))
## Editing line color
self.setCaretLineVisible(True)
self.setCaretLineBackgroundColor(QtGui.QColor("#CDA869"))
## set tab width
if seascope_editor_tab_width:
self.setTabWidth(seascope_editor_tab_width)
def show_line_number_cb(self, val):
if (val):
width = self.fm.width( "00000" ) + 5
else:
width = 0
self.setMarginWidth(0, width)
self.setMarginLineNumbers(0, val)
def show_folds_cb(self, val):
if val:
#self.setMarginsForegroundColor( QtGui.QColor("#404040") )
#self.setMarginsBackgroundColor( QtGui.QColor("#888888") )
## Folding visual : we will use circled tree fold
self.setFolding(QsciScintilla.CircledTreeFoldStyle)
else:
self.setFolding(QsciScintilla.NoFoldStyle)
self.clearFolds()
def toggle_folds_cb(self):
self.foldAll()
def codemark_add(self, line):
self.markerAdd(line, self.codemark_marker)
def codemark_del(self, line):
self.markerDelete(line, self.codemark_marker)
def goto_marker(self, is_next):
(eline, inx) = self.getCursorPosition()
if is_next:
val = self.markerFindNext(eline + 1, -1)
else:
val = self.markerFindPrevious(eline - 1, -1)
if val >= 0:
self.setCursorPosition(val, 0)
def open_file_begin(self, filename):
self.filename = filename
## Choose a lexer
self.set_lexer(filename)
## Braces matching
self.setBraceMatching(QsciScintilla.SloppyBraceMatch)
## Render on screen
self.show()
def open_file_end(self):
self.show()
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAsNeeded)
#self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.setFocus()
def open_file(self, filename):
self.open_file_begin(filename)
## Show this file in the editor
self.setText(open(filename).read())
## Mark read-only
self.setReadOnly(True)
self.open_file_end()
def refresh_file(self, filename):
assert filename == self.filename
pos = self.getCursorPosition()
self.open_file(filename)
self.setCursorPosition(*pos)
self.cursorPositionChanged.emit(*pos)
def goto_line(self, line):
line = line - 1
self.setCursorPosition(line, 0)
self.ensureLineVisible(line)
self.setFocus()
def contextMenuEvent(self, ev):
if not EditorView.ev_popup:
return
f = EditorView.ev_popup.font()
EditorView.ev_popup.setFont(QFont("San Serif", 8))
EditorView.ev_popup.exec_(QCursor.pos())
EditorView.ev_popup.setFont(f)
def mouseReleaseEvent(self, ev):
super(EditorView, self).mouseReleaseEvent(ev)
if(self.hasSelectedText()):
self.query_text = self.selectedText()
#print 'selectedText ----', self.query_text
self.sig_text_selected.emit(self.query_text)
class EditorPage(QSplitter):
def __init__(self, parent=None):
QSplitter.__init__(self)
self.fcv = FileContextView(self)
self.ev = self.new_editor_view()
self.addWidget(self.fcv)
self.addWidget(self.ev)
self.setSizes([1, 300])
self.ev.cursorPositionChanged.connect(self.fcv.sig_ed_cursor_changed)
self.fcv.sig_goto_line.connect(self.ev.goto_line)
def new_editor_view(self):
return EditorView(self)
def open_file(self, filename):
self.ev.open_file(filename)
self.fcv.run(filename)
def refresh_file(self):
filename = self.get_filename()
self.fcv.rerun(filename)
self.ev.refresh_file(filename)
def get_filename(self):
return self.ev.get_filename()
class EditorBook(QTabWidget):
sig_file_closed = pyqtSignal(str)
sig_history_update = pyqtSignal(str, int)
sig_tab_changed = pyqtSignal(str)
sig_open_dir_view = pyqtSignal(str)
sig_editor_text_selected = pyqtSignal(str)
def __init__(self, *args):
apply(QTabWidget.__init__,(self, ) + args)
self.setMovable(True)
self.setTabsClosable(True)
self.tabCloseRequested.connect(self.removeTab)
self.currentChanged.connect(self.tab_change_cb)
self.is_show_line = False
self.is_show_folds = False
self.f_text = None
self.ev_font = "Monospace,10,-1,5,50,0,0,0,0,0"
def new_editor_page(self):
return EditorPage()
def addFile(self, fileName):
ed = self.new_editor_page()
ed.open_file(fileName)
ed.ev.set_font(self.ev_font)
ed.ev.show_line_number_cb(self.is_show_line)
ed.ev.show_folds_cb(self.is_show_folds)
self.addTab(ed, os.path.basename(fileName))
return ed
def search_already_opened_files(self, filename):
for i in range(self.count()):
page = self.widget(i)
if (page.get_filename() == filename):
return page
return None
def removeTab(self, inx):
ed = self.widget(inx)
f = ed.ev.get_filename()
QTabWidget.removeTab(self, inx)
self.sig_file_closed.emit(f)
def clear(self):
while self.count() != 0:
self.removeTab(0)
def remove_tab_list(self, inx_list):
for inx in sorted(inx_list, reverse=True):
self.removeTab(inx)
def tab_list(self, inx, type):
inx_list = []
if type == 'all' or type == 'files':
return range(self.count())
if type == 'left':
return range(inx)
if type == 'right':
return range(inx + 1, self.count())
if type == 'other':
return self.tab_list(inx, 'left') + self.tab_list(inx, 'right')
assert 0
def close_list_common(self, type):
inx_list = self.tab_list(self.currentIndex(), type)
if len(inx_list) == 0:
return
if not DialogManager.show_yes_no('Close all %s ?' % type):
return
self.remove_tab_list(inx_list)
def close_all_left_cb(self):
self.close_list_common('left')
def close_all_right_cb(self):
self.close_list_common('right')
def close_all_other_cb(self):
self.close_list_common('other')
def close_all_cb(self):
self.close_list_common('files')
def get_current_word(self):
ed = self.currentWidget()
if not ed:
return
if ed.ev.hasSelectedText():
return ed.ev.selectedText()
(line, index) = ed.ev.getCursorPosition()
text = ed.ev.text(line)
# Go left
linx = index
while linx > 0 and ed.ev.isWordCharacter(text[linx - 1]):
linx = linx - 1
# Go right
rinx = index
while rinx < len(text) and ed.ev.isWordCharacter(text[rinx]):
rinx = rinx + 1
text = text[linx:rinx]
if text == '':
return None
return text
def get_current_file_line(self):
ed = self.currentWidget()
if not ed:
return (None, None)
(line, inx) = ed.ev.getCursorPosition()
return (ed.ev.filename, line + 1)
def get_file_line_list(self):
fl_list = []
tlist = range(self.count())
inx = self.currentIndex()
if inx >= 0:
tlist.append(inx)
for inx in tlist:
ed = self.widget(inx)
(line, inx) = ed.ev.getCursorPosition()
fl_list.append('%s:%d' % (ed.ev.filename, line + 1))
return fl_list
def matching_brace_cb(self):
ed = self.currentWidget()
if ed:
ed.ev.moveToMatchingBrace()
#ed.ev.setFocus()
def goto_line_cb(self):
ed = self.currentWidget()
if not ed:
return (None, None)
(line, inx) = ed.ev.getCursorPosition()
#return (line + 1, ed.ev.lines())
line = DialogManager.show_goto_line_dialog(line + 1, ed.ev.lines())
if (line == None):
return
ed.ev.goto_line(line)
def focus_editor(self):
page = self.currentWidget()
if page:
page.ev.setFocus()
def close_current_page(self):
self.removeTab(self.currentIndex())
self.focus_editor()
def focus_search_ctags(self):
ed = self.currentWidget()
if ed:
ed.fcv.focus_search_ctags()
def copy_edit_cb(self):
ed = self.currentWidget()
if ed:
ed.ev.copy()
def tab_change_cb(self, inx):
if (inx == -1):
fname = ''
else:
page = self.currentWidget()
page.ev.setFocus()
fname = page.get_filename()
self.sig_tab_changed.emit(fname)
def open_dir_cb(self):
page = self.currentWidget()
if page:
fname = page.get_filename()
self.sig_open_dir_view.emit(fname)
def mousePressEvent(self, m_ev):
QTabWidget.mousePressEvent(self, m_ev)
if (m_ev.button() == Qt.RightButton):
# setup popup menu
self.pmenu = QMenu()
self.pmenu.addAction("Open dir", self.open_dir_cb)
self.pmenu.addSeparator()
self.pmenu.addAction("Close All &Left", self.close_all_left_cb)
self.pmenu.addAction("Close All &Right", self.close_all_right_cb)
self.pmenu.addAction("Close &Others", self.close_all_other_cb)
self.pmenu.addSeparator()
self.pmenu.addAction("Close &All", self.close_all_cb)
self.pmenu.exec_(QCursor.pos())
def show_file_line(self, filename, line, hist=True):
if line:
(f, l) = self.get_current_file_line()
if (f):
if hist:
self.sig_history_update.emit(f, l)
filename = str(filename)
if (not os.path.exists(filename)):
return
page = self.search_already_opened_files(filename)
if page == None:
page = self.addFile(filename)
self.setCurrentWidget(page)
if line:
page.ev.goto_line(line)
if hist:
self.sig_history_update.emit(filename, line)
page.ev.setFocus()
# text selected callback: need to send out again.
page.ev.sig_text_selected.connect(self.editor_text_selected)
def editor_text_selected(self, text):
self.sig_editor_text_selected.emit(text)
def show_file(self, filename):
self.show_file_line(filename, 0)
def show_line(self, line):
ed = self.currentWidget()
if not ed:
return
ed.ev.goto_line(line)
def refresh_file_cb(self):
ed = self.currentWidget()
if not ed:
return
ed.refresh_file()
def find_cb(self):
ed = self.currentWidget()
if not ed:
return
res = DialogManager.show_find_dialog(self.get_current_word())
if (res == None):
return
(text, opt) = res
if (text == None):
return
self.f_text = text
self.f_opt = opt
self.find_text(opt['cursor'], opt['fw'])
def find_text(self, from_cursor, is_fw):
if (self.f_text == None):
return
ed = self.currentWidget()
if not ed:
return
text = self.f_text
opt = self.f_opt
if (from_cursor):
if (is_fw):
(line, inx) = (-1, -1)
else:
(line, inx) = ed.ev.getCursorPosition()
if (ed.ev.hasSelectedText()):
inx = inx - 1
if ed.ev.findFirst(text, opt['re'], opt['cs'], opt['wo'], False, is_fw, line, inx):
return True
if not DialogManager.show_yes_no('End of document reached. Continue from beginning?'):
return False
if (is_fw):
(line, inx) = (0, 0)
else:
(line, inx) = (ed.ev.lines(), 0)
if ed.ev.findFirst(text, opt['re'], opt['cs'], opt['wo'], False, is_fw, line, inx):
return
DialogManager.show_msg_dialog("Could not find " + "'" + text + "'")
def find_next_cb(self):
self.find_text(True, True)
def find_prev_cb(self):
self.find_text(True, False)
def change_ev_font(self, font):
if font == self.ev_font:
return
self.ev_font = font
for inx in range(self.count()):
ed = self.widget(inx)
ed.ev.set_font(self.ev_font)
def show_line_number_cb(self):
val = self.m_show_line_num.isChecked()
self.is_show_line = val
for inx in range(self.count()):
ed = self.widget(inx)
ed.ev.show_line_number_cb(val)
def show_line_number_pref(self, val):
if val == self.m_show_line_num.isChecked():
return
self.m_show_line_num.setChecked(val)
self.show_line_number_cb()
def show_folds_cb(self):
val = self.m_show_folds.isChecked()
self.is_show_folds = val
for inx in range(self.count()):
ed = self.widget(inx)
ed.ev.show_folds_cb(val)
def toggle_folds_cb(self):
ed = self.currentWidget()
if not ed:
return
if self.is_show_folds:
ed.ev.toggle_folds_cb()
def open_in_external_editor(self, cmd):
if not cmd:
DialogManager.show_msg_dialog('Please configure external editor')
return
(f, l) = self.get_current_file_line()
if not f:
return
cmd = cmd.replace('%F', f).replace('%L', str(l))
if not QProcess.startDetached(cmd):
DialogManager.show_msg_dialog('Failed to start: ' + cmd)
def codemark_add(self, filename, line):
ed = self.search_already_opened_files(filename)
if ed:
ed.ev.codemark_add(line)
def codemark_del(self, filename, line):
ed = self.search_already_opened_files(filename)
if ed:
ed.ev.codemark_del(line)
def bookmark_prev_cb(self):
ed = self.currentWidget()
if ed:
ed.ev.goto_marker(False)
def bookmark_next_cb(self):
ed = self.currentWidget()
if ed:
ed.ev.goto_marker(True)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.