gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
import csv
import sys
import numpy as np
import itertools as it
from scipy import ndimage
############################################################################
# Helpers
############################################################################
def loop_2_1(r):
return it.product(r, r)
def loop_2_2(r1, r2):
return it.product(r1, r2)
def loop_3_3(r1, r2, r3):
return loop_2_2(r1, loop_2_2(r2, r3))
############################################################################
# Layers
############################################################################
class Layer:
def __init__(self, values_shape):
self.values_shape = values_shape
self.depth = values_shape[0]
self.width = values_shape[1]
self.network = None
def setup(self, network, prev_layer, next_layer):
self.network = network
self.prev_layer = prev_layer
self.next_layer = next_layer
def forward(self, value, label):
return value
def backward(self, score, step):
return score
def get_desc(self):
return "Layer " + str(self.values_shape)
class ConvLayer(Layer):
class Filter:
def __init__(self, depth, size):
self.kernel = 0.1 * np.random.randn(depth, size, size)
self.kernel_grad = np.zeros(self.kernel.shape)
self.bias = 0.1 * np.random.randn()
self.bias_grad = 0.0
self.size = size
def clear_gradient(self):
self.kernel_grad.fill(0)
self.bias_grad = 0
def __init__(self, values_shape, filter_size, padding):
Layer.__init__(self, values_shape)
self.filter_size = filter_size
self.padding = padding
self.filters = []
self.temp = None
def setup(self, network, previous_layer, next_layer):
Layer.setup(self, network, previous_layer, next_layer)
for i in range(next_layer.depth):
filt = ConvLayer.Filter(self.depth, self.filter_size)
self.filters.append(filt)
def forward(self, value, label):
self.temp = value
new_width = self.width + 2 * self.padding
out_depth = len(self.filters)
out_width = (new_width - self.filter_size) + 1
result = np.zeros([out_depth, out_width, out_width])
for k in range(out_depth):
new_width = self.width + 2 * self.padding
new_input = np.zeros([self.depth, new_width, new_width])
end = new_width - self.padding
new_input[:, self.padding:end, self.padding:end] = value
d_width = (new_width - self.filter_size) + 1
output = np.zeros([d_width, d_width])
for (i, j) in loop_2_1(range(d_width)):
shift_i = i + self.filter_size
shift_j = j + self.filter_size
region = new_input[:, i:shift_i, j:shift_j]
kernel = self.filters[k].kernel
bias = self.filters[k].bias
output[i, j] = np.sum(region * kernel) + bias
result[k, :, :] = output
return result
def backward(self, score, step):
new_width = self.width + 2 * self.padding
new_input = np.zeros([self.depth, new_width, new_width])
end = new_width - self.padding
new_input[:, self.padding:end, self.padding:end] = self.temp
output = np.zeros(new_input.shape)
for k in range(len(self.filters)):
filt = self.filters[k]
filt.clear_gradient()
for (i, j) in loop_2_1(range(new_width - filt.size + 1)):
(shift_i, shift_j) = (i + filt.size, j + filt.size)
region = new_input[:, i:shift_i, j:shift_j]
out_filt = score[k, i, j] * filt.kernel
output[:, i:shift_i, j:shift_j] += out_filt
filt.kernel_grad += score[k, i, j] * region
filt.bias_grad += score[k, i, j]
reg = self.network.lambda_reg
for filt in self.filters:
filt.kernel -= filt.kernel_grad * step + reg * filt.kernel
filt.bias -= filt.bias_grad * step
return output[:, self.padding:end, self.padding:end]
def get_desc(self):
return "ConvLayer " + str(self.values_shape) + " " + \
str(self.filter_size) + " " + str(self.padding)
class PoolLayer(Layer):
def __init__(self, values_shape):
Layer.__init__(self, values_shape)
self.pool_index = None
def forward(self, value, label):
output = np.zeros([self.depth, self.width // 2, self.width // 2])
self.pool_index = np.zeros(output.shape)
r1 = range(self.depth)
r2 = range(0, self.width, 2)
for (d, (i, j)) in loop_3_3(r1, r2, r2):
region = value[d, i:i+2, j:j+2]
output[d, i // 2, j // 2] = np.max(region)
self.pool_index[d, i // 2, j // 2] = np.argmax(region)
return output
def backward(self, score, step):
r0 = range(score.shape[0])
r1 = range(score.shape[1])
r2 = range(score.shape[2])
gradient = np.zeros(self.values_shape)
for (k, (i, j)) in loop_3_3(r0, r1, r2):
(a, b) = (2 * i, 2 * i + 2)
(c, d) = (2 * j, 2 * j + 2)
e = int(self.pool_index[k, i, j] // 2)
f = int(self.pool_index[k, i, j] % 2)
gradient[k, a:b, c:d][e, f] = score[k, i, j]
return gradient
def get_desc(self):
return "PoolLayer " + str(self.values_shape)
class ReLULayer(Layer):
def __init__(self, values_shape):
Layer.__init__(self, values_shape)
self.acts = None
def forward(self, value, label):
self.acts = value > 0
return self.acts * value
def backward(self, score, step):
return self.acts * score
def get_desc(self):
return "ReLuLayer " + str(self.values_shape)
class SoftLayer(Layer):
def __init__(self, values_shape):
Layer.__init__(self, values_shape)
self.label = -1
def forward(self, value, label):
self.label = label
score = (value - np.max(value)) * 0.05
return score
def backward(self, score, step):
gradient = np.exp(score) / np.sum(np.exp(score))
gradient[int(self.label)] -= 1.0
return gradient * 0.05
def get_desc(self):
return "SoftLayer " + str(self.values_shape)
############################################################################
# Network
############################################################################
class Network:
def __init__(self, layers):
self.layers = layers
self.lambda_reg = 0.01
self.step_init = 0.001
self.step_update = 0
layers[0].setup(self, None, layers[1])
for i in range(1, len(layers) - 1):
layers[i].setup(self, layers[i-1], layers[i+1])
layers[-1].setup(self, layers[-2], None)
def train(self, values, labels):
if len(values) == 0:
return
for k in range(len(values)):
if k % 100 == 0:
sys.stdout.write("\r> progress: %d/%d" % (k, len(values)))
step = self.step_init
if self.step_update == 0:
step /= ((k // 700) + 1)
else:
step *= (0.9 ** (k // 90))
score = self.forward(values[k], labels[k])
self.backward(score, step)
sys.stdout.write("\r")
def forward(self, value, label):
for layer in self.layers:
value = layer.forward(value, label)
return value
def backward(self, score, step):
for layer in self.layers[::-1]:
score = layer.backward(score, step)
def predict(self, values):
if len(values) == 0:
return np.array([])
predictions = np.zeros(len(values))
for i in range(len(values)):
if i % 100 == 0:
sys.stdout.write("\r> progress: %d/%d" % (i, len(values)))
output = self.forward(values[i], -1)
predictions[i] = np.argmax(output, axis=0)
sys.stdout.write("\r")
return predictions
def test(self, values, labels):
if len(values) == 0:
return np.array([])
predictions = self.predict(values)
accuracy = 0
for i in range(len(predictions)):
accuracy += predictions[i] == labels[i]
return accuracy / len(predictions)
############################################################################
# Read / write data
############################################################################
def read_values(filename):
x = []
with open(filename) as f:
train_reader = csv.reader(f)
for line in train_reader:
for index in range(len(line) - 1):
x.append(float(line[index]))
x = (x - np.mean(x)) / np.std(x)
nb_inputs = int(len(x) / (3 * 32 * 32))
x = np.reshape(x, [nb_inputs, 3, 32, 32])
return x
def read_labels(filename):
y = []
with open(filename) as f:
train_reader = csv.reader(f)
train_reader.next()
for line in train_reader:
y.append(float(line[1]))
return np.array(y)
def write_labels(filename, labels):
with open(filename, 'w') as f:
f.write("Id,Prediction\n")
for i in range(len(labels)):
f.write("%d,%d\n" % (i+1, int(labels[i])))
def print_labels(labels):
for i in range(len(labels)):
print("%d,%d\n" % (i+1, int(labels[i])))
############################################################################
# Main
############################################################################
def run():
print("Building CNN...")
cnn = Network([
ConvLayer([3, 32, 32], 5, 2),
ReLULayer([32, 32, 32]),
PoolLayer([32, 32, 32]),
ConvLayer([32, 16, 16], 5, 2),
ReLULayer([64, 16, 16]),
PoolLayer([64, 16, 16]),
ConvLayer([64, 8, 8], 8, 0),
ReLULayer([1024, 1, 1]),
ConvLayer([1024, 1, 1], 1, 0),
SoftLayer([10, 1, 1]),
])
cnn.step_init = 0.1
cnn.step_update = 1
cnn.lambda_reg = 0.000
print("- step init %f" % cnn.step_init)
print("- step upd %d" % cnn.step_update)
print("- step reg %f" % cnn.lambda_reg)
print("")
print("CNN layers:")
for layer in cnn.layers:
print("+ " + layer.get_desc())
print("")
print("Reading training data...")
x_all = read_values('Xtr.csv')
y_all = read_labels('Ytr.csv')
split = int(len(y_all) * 0.90)
print("- split %d" % split)
x_tra = x_all[:split]
y_tra = y_all[:split]
x_tes = x_all[split:]
y_tes = y_all[split:]
print("")
print("Training...")
nb_epoch = 2
for epoch in range(nb_epoch):
permut = np.random.permutation(len(y_tra))
(x_tmp, y_tmp) = (x_tra[permut], y_tra[permut])
print("- training epoch: %d/%d" % (epoch, nb_epoch))
cnn.train(x_tmp, y_tmp)
print("- testing epoch: %d/%d" % (epoch, nb_epoch))
accuracy = cnn.test(x_tes, y_tes)
print("* accuracy: %f" % accuracy)
print("")
print("Reading predicting data...")
x_pre = read_values('Xte.csv')
print("")
print("Predicting...")
predictions = cnn.predict(x_pre)
write_labels("Yte.csv", predictions)
print("")
if __name__ == "__main__":
run()
|
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import logging as orig_logging
import os
import re
import urlparse
import boto
from boto import ec2
from boto import exception
from boto import s3
import keystoneclient.exceptions
import six
import tempest.clients
from tempest.common.utils import file_utils
from tempest import config
from tempest import exceptions
from tempest.openstack.common import log as logging
import tempest.test
from tempest.thirdparty.boto.utils import wait
CONF = config.CONF
LOG = logging.getLogger(__name__)
def decision_maker():
A_I_IMAGES_READY = True # ari,ami,aki
S3_CAN_CONNECT_ERROR = None
EC2_CAN_CONNECT_ERROR = None
secret_matcher = re.compile("[A-Za-z0-9+/]{32,}") # 40 in other system
id_matcher = re.compile("[A-Za-z0-9]{20,}")
def all_read(*args):
return all(map(file_utils.have_effective_read_access, args))
materials_path = CONF.boto.s3_materials_path
ami_path = materials_path + os.sep + CONF.boto.ami_manifest
aki_path = materials_path + os.sep + CONF.boto.aki_manifest
ari_path = materials_path + os.sep + CONF.boto.ari_manifest
A_I_IMAGES_READY = all_read(ami_path, aki_path, ari_path)
boto_logger = logging.getLogger('boto')
level = boto_logger.logger.level
# suppress logging for boto
boto_logger.logger.setLevel(orig_logging.CRITICAL)
def _cred_sub_check(connection_data):
if not id_matcher.match(connection_data["aws_access_key_id"]):
raise Exception("Invalid AWS access Key")
if not secret_matcher.match(connection_data["aws_secret_access_key"]):
raise Exception("Invalid AWS secret Key")
raise Exception("Unknown (Authentication?) Error")
openstack = tempest.clients.Manager()
try:
if urlparse.urlparse(CONF.boto.ec2_url).hostname is None:
raise Exception("Failed to get hostname from the ec2_url")
ec2client = openstack.ec2api_client
try:
ec2client.get_all_regions()
except exception.BotoServerError as exc:
if exc.error_code is None:
raise Exception("EC2 target does not looks EC2 service")
_cred_sub_check(ec2client.connection_data)
except keystoneclient.exceptions.Unauthorized:
EC2_CAN_CONNECT_ERROR = "AWS credentials not set," +\
" failed to get them even by keystoneclient"
except Exception as exc:
EC2_CAN_CONNECT_ERROR = str(exc)
try:
if urlparse.urlparse(CONF.boto.s3_url).hostname is None:
raise Exception("Failed to get hostname from the s3_url")
s3client = openstack.s3_client
try:
s3client.get_bucket("^INVALID*#()@INVALID.")
except exception.BotoServerError as exc:
if exc.status == 403:
_cred_sub_check(s3client.connection_data)
except Exception as exc:
S3_CAN_CONNECT_ERROR = str(exc)
except keystoneclient.exceptions.Unauthorized:
S3_CAN_CONNECT_ERROR = "AWS credentials not set," +\
" failed to get them even by keystoneclient"
boto_logger.logger.setLevel(level)
return {'A_I_IMAGES_READY': A_I_IMAGES_READY,
'S3_CAN_CONNECT_ERROR': S3_CAN_CONNECT_ERROR,
'EC2_CAN_CONNECT_ERROR': EC2_CAN_CONNECT_ERROR}
class BotoExceptionMatcher(object):
STATUS_RE = r'[45]\d\d'
CODE_RE = '.*' # regexp makes sense in group match
def match(self, exc):
""":returns: Returns with an error string if it does not match,
returns with None when it matches.
"""
if not isinstance(exc, exception.BotoServerError):
return "%r not an BotoServerError instance" % exc
LOG.info("Status: %s , error_code: %s", exc.status, exc.error_code)
if re.match(self.STATUS_RE, str(exc.status)) is None:
return ("Status code (%s) does not match"
"the expected re pattern \"%s\""
% (exc.status, self.STATUS_RE))
if re.match(self.CODE_RE, str(exc.error_code)) is None:
return ("Error code (%s) does not match" +
"the expected re pattern \"%s\"") %\
(exc.error_code, self.CODE_RE)
return None
class ClientError(BotoExceptionMatcher):
STATUS_RE = r'4\d\d'
class ServerError(BotoExceptionMatcher):
STATUS_RE = r'5\d\d'
def _add_matcher_class(error_cls, error_data, base=BotoExceptionMatcher):
"""
Usable for adding an ExceptionMatcher(s) into the exception tree.
The not leaf elements does wildcard match
"""
# in error_code just literal and '.' characters expected
if not isinstance(error_data, six.string_types):
(error_code, status_code) = map(str, error_data)
else:
status_code = None
error_code = error_data
parts = error_code.split('.')
basematch = ""
num_parts = len(parts)
max_index = num_parts - 1
add_cls = error_cls
for i_part in six.moves.xrange(num_parts):
part = parts[i_part]
leaf = i_part == max_index
if not leaf:
match = basematch + part + "[.].*"
else:
match = basematch + part
basematch += part + "[.]"
if not hasattr(add_cls, part):
cls_dict = {"CODE_RE": match}
if leaf and status_code is not None:
cls_dict["STATUS_RE"] = status_code
cls = type(part, (base, ), cls_dict)
setattr(add_cls, part, cls())
add_cls = cls
elif leaf:
raise LookupError("Tries to redefine an error code \"%s\"" % part)
else:
add_cls = getattr(add_cls, part)
# TODO(afazekas): classmethod handling
def friendly_function_name_simple(call_able):
name = ""
if hasattr(call_able, "im_class"):
name += call_able.im_class.__name__ + "."
name += call_able.__name__
return name
def friendly_function_call_str(call_able, *args, **kwargs):
string = friendly_function_name_simple(call_able)
string += "(" + ", ".join(map(str, args))
if len(kwargs):
if len(args):
string += ", "
string += ", ".join("=".join(map(str, (key, value)))
for (key, value) in kwargs.items())
return string + ")"
class BotoTestCase(tempest.test.BaseTestCase):
"""Recommended to use as base class for boto related test."""
@classmethod
def skip_checks(cls):
super(BotoTestCase, cls).skip_checks()
if not CONF.compute_feature_enabled.ec2_api:
raise cls.skipException("The EC2 API is not available")
@classmethod
def setup_credentials(cls):
super(BotoTestCase, cls).setup_credentials()
cls.os = cls.get_client_manager()
@classmethod
def resource_setup(cls):
super(BotoTestCase, cls).resource_setup()
cls.conclusion = decision_maker()
# The trash contains cleanup functions and paramaters in tuples
# (function, *args, **kwargs)
cls._resource_trash_bin = {}
cls._sequence = -1
if (hasattr(cls, "EC2") and
cls.conclusion['EC2_CAN_CONNECT_ERROR'] is not None):
raise cls.skipException("EC2 " + cls.__name__ + ": " +
cls.conclusion['EC2_CAN_CONNECT_ERROR'])
if (hasattr(cls, "S3") and
cls.conclusion['S3_CAN_CONNECT_ERROR'] is not None):
raise cls.skipException("S3 " + cls.__name__ + ": " +
cls.conclusion['S3_CAN_CONNECT_ERROR'])
@classmethod
def addResourceCleanUp(cls, function, *args, **kwargs):
"""Adds CleanUp callable, used by tearDownClass.
Recommended to a use (deep)copy on the mutable args.
"""
cls._sequence = cls._sequence + 1
cls._resource_trash_bin[cls._sequence] = (function, args, kwargs)
return cls._sequence
@classmethod
def cancelResourceCleanUp(cls, key):
"""Cancel Clean up request."""
del cls._resource_trash_bin[key]
# TODO(afazekas): Add "with" context handling
def assertBotoError(self, excMatcher, callableObj,
*args, **kwargs):
"""Example usage:
self.assertBotoError(self.ec2_error_code.client.
InvalidKeyPair.Duplicate,
self.client.create_keypair,
key_name)
"""
try:
callableObj(*args, **kwargs)
except exception.BotoServerError as exc:
error_msg = excMatcher.match(exc)
if error_msg is not None:
raise self.failureException, error_msg
else:
raise self.failureException, "BotoServerError not raised"
@classmethod
def resource_cleanup(cls):
"""Calls the callables added by addResourceCleanUp,
when you overwrite this function don't forget to call this too.
"""
fail_count = 0
trash_keys = sorted(cls._resource_trash_bin, reverse=True)
for key in trash_keys:
(function, pos_args, kw_args) = cls._resource_trash_bin[key]
try:
func_name = friendly_function_call_str(function, *pos_args,
**kw_args)
LOG.debug("Cleaning up: %s" % func_name)
function(*pos_args, **kw_args)
except BaseException:
fail_count += 1
LOG.exception("Cleanup failed %s" % func_name)
finally:
del cls._resource_trash_bin[key]
cls.clear_isolated_creds()
super(BotoTestCase, cls).resource_cleanup()
# NOTE(afazekas): let the super called even on exceptions
# The real exceptions already logged, if the super throws another,
# does not causes hidden issues
if fail_count:
raise exceptions.TearDownException(num=fail_count)
ec2_error_code = BotoExceptionMatcher()
# InsufficientInstanceCapacity can be both server and client error
ec2_error_code.server = ServerError()
ec2_error_code.client = ClientError()
s3_error_code = BotoExceptionMatcher()
s3_error_code.server = ServerError()
s3_error_code.client = ClientError()
valid_image_state = set(('available', 'pending', 'failed'))
# NOTE(afazekas): 'paused' is not valid status in EC2, but it does not have
# a good mapping, because it uses memory, but not really a running machine
valid_instance_state = set(('pending', 'running', 'shutting-down',
'terminated', 'stopping', 'stopped', 'paused'))
valid_volume_status = set(('creating', 'available', 'in-use',
'deleting', 'deleted', 'error'))
valid_snapshot_status = set(('pending', 'completed', 'error'))
gone_set = set(('_GONE',))
@classmethod
def get_lfunction_gone(cls, obj):
"""If the object is instance of a well know type returns back with
with the correspoding function otherwise it assumes the obj itself
is the function.
"""
ec = cls.ec2_error_code
if isinstance(obj, ec2.instance.Instance):
colusure_matcher = ec.client.InvalidInstanceID.NotFound
status_attr = "state"
elif isinstance(obj, ec2.image.Image):
colusure_matcher = ec.client.InvalidAMIID.NotFound
status_attr = "state"
elif isinstance(obj, ec2.snapshot.Snapshot):
colusure_matcher = ec.client.InvalidSnapshot.NotFound
status_attr = "status"
elif isinstance(obj, ec2.volume.Volume):
colusure_matcher = ec.client.InvalidVolume.NotFound
status_attr = "status"
else:
return obj
def _status():
try:
obj.update(validate=True)
except ValueError:
return "_GONE"
except exception.EC2ResponseError as exc:
if colusure_matcher.match(exc) is None:
return "_GONE"
else:
raise
return getattr(obj, status_attr)
return _status
def state_wait_gone(self, lfunction, final_set, valid_set):
if not isinstance(final_set, set):
final_set = set((final_set,))
final_set |= self.gone_set
lfunction = self.get_lfunction_gone(lfunction)
state = wait.state_wait(lfunction, final_set, valid_set)
self.assertIn(state, valid_set | self.gone_set)
return state
def waitImageState(self, lfunction, wait_for):
return self.state_wait_gone(lfunction, wait_for,
self.valid_image_state)
def waitInstanceState(self, lfunction, wait_for):
return self.state_wait_gone(lfunction, wait_for,
self.valid_instance_state)
def waitSnapshotStatus(self, lfunction, wait_for):
return self.state_wait_gone(lfunction, wait_for,
self.valid_snapshot_status)
def waitVolumeStatus(self, lfunction, wait_for):
return self.state_wait_gone(lfunction, wait_for,
self.valid_volume_status)
def assertImageStateWait(self, lfunction, wait_for):
state = self.waitImageState(lfunction, wait_for)
self.assertIn(state, wait_for)
def assertInstanceStateWait(self, lfunction, wait_for):
state = self.waitInstanceState(lfunction, wait_for)
self.assertIn(state, wait_for)
def assertVolumeStatusWait(self, lfunction, wait_for):
state = self.waitVolumeStatus(lfunction, wait_for)
self.assertIn(state, wait_for)
def assertSnapshotStatusWait(self, lfunction, wait_for):
state = self.waitSnapshotStatus(lfunction, wait_for)
self.assertIn(state, wait_for)
def assertAddressDissasociatedWait(self, address):
def _disassociate():
cli = self.ec2_client
addresses = cli.get_all_addresses(addresses=(address.public_ip,))
if len(addresses) != 1:
return "INVALID"
if addresses[0].instance_id:
LOG.info("%s associated to %s",
address.public_ip,
addresses[0].instance_id)
return "ASSOCIATED"
return "DISASSOCIATED"
state = wait.state_wait(_disassociate, "DISASSOCIATED",
set(("ASSOCIATED", "DISASSOCIATED")))
self.assertEqual(state, "DISASSOCIATED")
def assertAddressReleasedWait(self, address):
def _address_delete():
# NOTE(afazekas): the filter gives back IP
# even if it is not associated to my tenant
if (address.public_ip not in map(lambda a: a.public_ip,
self.ec2_client.get_all_addresses())):
return "DELETED"
return "NOTDELETED"
state = wait.state_wait(_address_delete, "DELETED")
self.assertEqual(state, "DELETED")
def assertReSearch(self, regexp, string):
if re.search(regexp, string) is None:
raise self.failureException("regexp: '%s' not found in '%s'" %
(regexp, string))
def assertNotReSearch(self, regexp, string):
if re.search(regexp, string) is not None:
raise self.failureException("regexp: '%s' found in '%s'" %
(regexp, string))
def assertReMatch(self, regexp, string):
if re.match(regexp, string) is None:
raise self.failureException("regexp: '%s' not matches on '%s'" %
(regexp, string))
def assertNotReMatch(self, regexp, string):
if re.match(regexp, string) is not None:
raise self.failureException("regexp: '%s' matches on '%s'" %
(regexp, string))
@classmethod
def destroy_bucket(cls, connection_data, bucket):
"""Destroys the bucket and its content, just for teardown."""
exc_num = 0
try:
with contextlib.closing(
boto.connect_s3(**connection_data)) as conn:
if isinstance(bucket, basestring):
bucket = conn.lookup(bucket)
assert isinstance(bucket, s3.bucket.Bucket)
for obj in bucket.list():
try:
bucket.delete_key(obj.key)
obj.close()
except BaseException:
LOG.exception("Failed to delete key %s " % obj.key)
exc_num += 1
conn.delete_bucket(bucket)
except BaseException:
LOG.exception("Failed to destroy bucket %s " % bucket)
exc_num += 1
if exc_num:
raise exceptions.TearDownException(num=exc_num)
@classmethod
def destroy_reservation(cls, reservation):
"""Terminate instances in a reservation, just for teardown."""
exc_num = 0
def _instance_state():
try:
instance.update(validate=True)
except ValueError:
return "_GONE"
except exception.EC2ResponseError as exc:
if cls.ec2_error_code.\
client.InvalidInstanceID.NotFound.match(exc) is None:
return "_GONE"
# NOTE(afazekas): incorrect code,
# but the resource must be destoreyd
if exc.error_code == "InstanceNotFound":
return "_GONE"
return instance.state
for instance in reservation.instances:
try:
instance.terminate()
wait.re_search_wait(_instance_state, "_GONE")
except BaseException:
LOG.exception("Failed to terminate instance %s " % instance)
exc_num += 1
if exc_num:
raise exceptions.TearDownException(num=exc_num)
# NOTE(afazekas): The incorrect ErrorCodes makes very, very difficult
# to write better teardown
@classmethod
def destroy_security_group_wait(cls, group):
"""Delete group.
Use just for teardown!
"""
# NOTE(afazekas): should wait/try until all related instance terminates
group.delete()
@classmethod
def destroy_volume_wait(cls, volume):
"""Delete volume, tries to detach first.
Use just for teardown!
"""
exc_num = 0
snaps = volume.snapshots()
if len(snaps):
LOG.critical("%s Volume has %s snapshot(s)", volume.id,
map(snaps.id, snaps))
# NOTE(afazekas): detaching/attching not valid EC2 status
def _volume_state():
volume.update(validate=True)
try:
# NOTE(gmann): Make sure volume is attached.
# Checking status as 'not "available"' is not enough to make
# sure volume is attached as it can be in "error" state
if volume.status == "in-use":
volume.detach(force=True)
except BaseException:
LOG.exception("Failed to detach volume %s" % volume)
# exc_num += 1 "nonlocal" not in python2
return volume.status
try:
wait.re_search_wait(_volume_state, "available")
# not validates status
LOG.info(_volume_state())
volume.delete()
except BaseException:
LOG.exception("Failed to delete volume %s" % volume)
exc_num += 1
if exc_num:
raise exceptions.TearDownException(num=exc_num)
@classmethod
def destroy_snapshot_wait(cls, snapshot):
"""delete snapshot, wait until it ceases to exist."""
snapshot.delete()
def _update():
snapshot.update(validate=True)
wait.wait_exception(_update)
# you can specify tuples if you want to specify the status pattern
for code in ('AddressLimitExceeded', 'AttachmentLimitExceeded', 'AuthFailure',
'Blocked', 'CustomerGatewayLimitExceeded', 'DependencyViolation',
'DiskImageSizeTooLarge', 'FilterLimitExceeded',
'Gateway.NotAttached', 'IdempotentParameterMismatch',
'IncorrectInstanceState', 'IncorrectState',
'InstanceLimitExceeded', 'InsufficientInstanceCapacity',
'InsufficientReservedInstancesCapacity',
'InternetGatewayLimitExceeded', 'InvalidAMIAttributeItemValue',
'InvalidAMIID.Malformed', 'InvalidAMIID.NotFound',
'InvalidAMIID.Unavailable', 'InvalidAssociationID.NotFound',
'InvalidAttachment.NotFound', 'InvalidConversionTaskId',
'InvalidCustomerGateway.DuplicateIpAddress',
'InvalidCustomerGatewayID.NotFound', 'InvalidDevice.InUse',
'InvalidDhcpOptionsID.NotFound', 'InvalidFormat',
'InvalidFilter', 'InvalidGatewayID.NotFound',
'InvalidGroup.Duplicate', 'InvalidGroupId.Malformed',
'InvalidGroup.InUse', 'InvalidGroup.NotFound',
'InvalidGroup.Reserved', 'InvalidInstanceID.Malformed',
'InvalidInstanceID.NotFound',
'InvalidInternetGatewayID.NotFound', 'InvalidIPAddress.InUse',
'InvalidKeyPair.Duplicate', 'InvalidKeyPair.Format',
'InvalidKeyPair.NotFound', 'InvalidManifest',
'InvalidNetworkAclEntry.NotFound',
'InvalidNetworkAclID.NotFound', 'InvalidParameterCombination',
'InvalidParameterValue', 'InvalidPermission.Duplicate',
'InvalidPermission.Malformed', 'InvalidReservationID.Malformed',
'InvalidReservationID.NotFound', 'InvalidRoute.NotFound',
'InvalidRouteTableID.NotFound',
'InvalidSecurity.RequestHasExpired',
'InvalidSnapshotID.Malformed', 'InvalidSnapshot.NotFound',
'InvalidUserID.Malformed', 'InvalidReservedInstancesId',
'InvalidReservedInstancesOfferingId',
'InvalidSubnetID.NotFound', 'InvalidVolumeID.Duplicate',
'InvalidVolumeID.Malformed', 'InvalidVolumeID.ZoneMismatch',
'InvalidVolume.NotFound', 'InvalidVpcID.NotFound',
'InvalidVpnConnectionID.NotFound',
'InvalidVpnGatewayID.NotFound',
'InvalidZone.NotFound', 'LegacySecurityGroup',
'MissingParameter', 'NetworkAclEntryAlreadyExists',
'NetworkAclEntryLimitExceeded', 'NetworkAclLimitExceeded',
'NonEBSInstance', 'PendingSnapshotLimitExceeded',
'PendingVerification', 'OptInRequired', 'RequestLimitExceeded',
'ReservedInstancesLimitExceeded', 'Resource.AlreadyAssociated',
'ResourceLimitExceeded', 'RouteAlreadyExists',
'RouteLimitExceeded', 'RouteTableLimitExceeded',
'RulesPerSecurityGroupLimitExceeded',
'SecurityGroupLimitExceeded',
'SecurityGroupsPerInstanceLimitExceeded',
'SnapshotLimitExceeded', 'SubnetLimitExceeded',
'UnknownParameter', 'UnsupportedOperation',
'VolumeLimitExceeded', 'VpcLimitExceeded',
'VpnConnectionLimitExceeded',
'VpnGatewayAttachmentLimitExceeded', 'VpnGatewayLimitExceeded'):
_add_matcher_class(BotoTestCase.ec2_error_code.client,
code, base=ClientError)
for code in ('InsufficientAddressCapacity', 'InsufficientInstanceCapacity',
'InsufficientReservedInstanceCapacity', 'InternalError',
'Unavailable'):
_add_matcher_class(BotoTestCase.ec2_error_code.server,
code, base=ServerError)
for code in (('AccessDenied', 403),
('AccountProblem', 403),
('AmbiguousGrantByEmailAddress', 400),
('BadDigest', 400),
('BucketAlreadyExists', 409),
('BucketAlreadyOwnedByYou', 409),
('BucketNotEmpty', 409),
('CredentialsNotSupported', 400),
('CrossLocationLoggingProhibited', 403),
('EntityTooSmall', 400),
('EntityTooLarge', 400),
('ExpiredToken', 400),
('IllegalVersioningConfigurationException', 400),
('IncompleteBody', 400),
('IncorrectNumberOfFilesInPostRequest', 400),
('InlineDataTooLarge', 400),
('InvalidAccessKeyId', 403),
'InvalidAddressingHeader',
('InvalidArgument', 400),
('InvalidBucketName', 400),
('InvalidBucketState', 409),
('InvalidDigest', 400),
('InvalidLocationConstraint', 400),
('InvalidPart', 400),
('InvalidPartOrder', 400),
('InvalidPayer', 403),
('InvalidPolicyDocument', 400),
('InvalidRange', 416),
('InvalidRequest', 400),
('InvalidSecurity', 403),
('InvalidSOAPRequest', 400),
('InvalidStorageClass', 400),
('InvalidTargetBucketForLogging', 400),
('InvalidToken', 400),
('InvalidURI', 400),
('KeyTooLong', 400),
('MalformedACLError', 400),
('MalformedPOSTRequest', 400),
('MalformedXML', 400),
('MaxMessageLengthExceeded', 400),
('MaxPostPreDataLengthExceededError', 400),
('MetadataTooLarge', 400),
('MethodNotAllowed', 405),
('MissingAttachment'),
('MissingContentLength', 411),
('MissingRequestBodyError', 400),
('MissingSecurityElement', 400),
('MissingSecurityHeader', 400),
('NoLoggingStatusForKey', 400),
('NoSuchBucket', 404),
('NoSuchKey', 404),
('NoSuchLifecycleConfiguration', 404),
('NoSuchUpload', 404),
('NoSuchVersion', 404),
('NotSignedUp', 403),
('NotSuchBucketPolicy', 404),
('OperationAborted', 409),
('PermanentRedirect', 301),
('PreconditionFailed', 412),
('Redirect', 307),
('RequestIsNotMultiPartContent', 400),
('RequestTimeout', 400),
('RequestTimeTooSkewed', 403),
('RequestTorrentOfBucketError', 400),
('SignatureDoesNotMatch', 403),
('TemporaryRedirect', 307),
('TokenRefreshRequired', 400),
('TooManyBuckets', 400),
('UnexpectedContent', 400),
('UnresolvableGrantByEmailAddress', 400),
('UserKeyMustBeSpecified', 400)):
_add_matcher_class(BotoTestCase.s3_error_code.client,
code, base=ClientError)
for code in (('InternalError', 500),
('NotImplemented', 501),
('ServiceUnavailable', 503),
('SlowDown', 503)):
_add_matcher_class(BotoTestCase.s3_error_code.server,
code, base=ServerError)
|
|
"""James-Stein"""
import numpy as np
import pandas as pd
import scipy
from scipy import optimize
from sklearn.base import BaseEstimator
from category_encoders.ordinal import OrdinalEncoder
import category_encoders.utils as util
from sklearn.utils.random import check_random_state
__author__ = 'Jan Motl'
class JamesSteinEncoder(BaseEstimator, util.TransformerWithTargetMixin):
"""James-Stein estimator.
Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper.
For feature value `i`, James-Stein estimator returns a weighted average of:
1. The mean target value for the observed feature value `i`.
2. The mean target value (regardless of the feature value).
This can be written as::
JS_i = (1-B)*mean(y_i) + B*mean(y)
The question is, what should be the weight `B`?
If we put too much weight on the conditional mean value, we will overfit.
If we put too much weight on the global mean, we will underfit.
The canonical solution in machine learning is to perform cross-validation.
However, Charles Stein came with a closed-form solution to the problem.
The intuition is: If the estimate of `mean(y_i)` is unreliable (`y_i` has high variance),
we should put more weight on `mean(y)`. Stein put it into an equation as::
B = var(y_i) / (var(y_i)+var(y))
The only remaining issue is that we do not know `var(y)`, let alone `var(y_i)`.
Hence, we have to estimate the variances. But how can we reliably estimate the
variances, when we already struggle with the estimation of the mean values?!
There are multiple solutions:
1. If we have the same count of observations for each feature value `i` and all
`y_i` are close to each other, we can pretend that all `var(y_i)` are identical.
This is called a pooled model.
2. If the observation counts are not equal, it makes sense to replace the variances
with squared standard errors, which penalize small observation counts::
SE^2 = var(y)/count(y)
This is called an independent model.
James-Stein estimator has, however, one practical limitation - it was defined
only for normal distributions. If you want to apply it for binary classification,
which allows only values {0, 1}, it is better to first convert the mean target value
from the bound interval <0,1> into an unbounded interval by replacing mean(y)
with log-odds ratio::
log-odds_ratio_i = log(mean(y_i)/mean(y_not_i))
This is called binary model. The estimation of parameters of this model is, however,
tricky and sometimes it fails fatally. In these situations, it is better to use beta
model, which generally delivers slightly worse accuracy than binary model but does
not suffer from fatal failures.
Parameters
----------
verbose: int
integer indicating verbosity of the output. 0 for none.
cols: list
a list of columns to encode, if None, all string columns will be encoded.
drop_invariant: bool
boolean for whether or not to drop encoded columns with 0 variance.
return_df: bool
boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array).
handle_missing: str
options are 'return_nan', 'error' and 'value', defaults to 'value', which returns the prior probability.
handle_unknown: str
options are 'return_nan', 'error' and 'value', defaults to 'value', which returns the prior probability.
model: str
options are 'pooled', 'beta', 'binary' and 'independent', defaults to 'independent'.
randomized: bool,
adds normal (Gaussian) distribution noise into training data in order to decrease overfitting (testing data are untouched).
sigma: float
standard deviation (spread or "width") of the normal distribution.
Example
-------
>>> from category_encoders import *
>>> import pandas as pd
>>> from sklearn.datasets import load_boston
>>> bunch = load_boston()
>>> y = bunch.target
>>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names)
>>> enc = JamesSteinEncoder(cols=['CHAS', 'RAD']).fit(X, y)
>>> numeric_dataset = enc.transform(X)
>>> print(numeric_dataset.info())
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 506 entries, 0 to 505
Data columns (total 13 columns):
CRIM 506 non-null float64
ZN 506 non-null float64
INDUS 506 non-null float64
CHAS 506 non-null float64
NOX 506 non-null float64
RM 506 non-null float64
AGE 506 non-null float64
DIS 506 non-null float64
RAD 506 non-null float64
TAX 506 non-null float64
PTRATIO 506 non-null float64
B 506 non-null float64
LSTAT 506 non-null float64
dtypes: float64(13)
memory usage: 51.5 KB
None
References
----------
.. [1] Parametric empirical Bayes inference: Theory and applications, equations 1.19 & 1.20, from
https://www.jstor.org/stable/2287098
.. [2] Empirical Bayes for multiple sample sizes, from
http://chris-said.io/2017/05/03/empirical-bayes-for-multiple-sample-sizes/
.. [3] Shrinkage Estimation of Log-odds Ratios for Comparing Mobility Tables, from
https://journals.sagepub.com/doi/abs/10.1177/0081175015570097
.. [4] Stein's paradox and group rationality, from
http://www.philos.rug.nl/~romeyn/presentation/2017_romeijn_-_Paris_Stein.pdf
.. [5] Stein's Paradox in Statistics, from
http://statweb.stanford.edu/~ckirby/brad/other/Article1977.pdf
"""
def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True,
handle_unknown='value', handle_missing='value', model='independent', random_state=None, randomized=False, sigma=0.05):
self.verbose = verbose
self.return_df = return_df
self.drop_invariant = drop_invariant
self.drop_cols = []
self.cols = cols
self.ordinal_encoder = None
self._dim = None
self.mapping = None
self.handle_unknown = handle_unknown
self.handle_missing = handle_missing
self.random_state = random_state
self.randomized = randomized
self.sigma = sigma
self.model = model
self.feature_names = None
# noinspection PyUnusedLocal
def fit(self, X, y, **kwargs):
"""Fit encoder according to X and binary y.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Binary target values.
Returns
-------
self : encoder
Returns self.
"""
# Unite parameters into pandas types
X = util.convert_input(X)
y = util.convert_input_vector(y, X.index).astype(float)
# The lengths must be equal
if X.shape[0] != y.shape[0]:
raise ValueError("The length of X is " + str(X.shape[0]) + " but length of y is " + str(y.shape[0]) + ".")
self._dim = X.shape[1]
# If columns aren't passed, just use every string column
if self.cols is None:
self.cols = util.get_obj_cols(X)
else:
self.cols = util.convert_cols_to_list(self.cols)
if self.handle_missing == 'error':
if X[self.cols].isnull().any().any():
raise ValueError('Columns to be encoded can not contain null')
self.ordinal_encoder = OrdinalEncoder(
verbose=self.verbose,
cols=self.cols,
handle_unknown='value',
handle_missing='value'
)
self.ordinal_encoder = self.ordinal_encoder.fit(X)
X_ordinal = self.ordinal_encoder.transform(X)
# Training
if self.model == 'independent':
self.mapping = self._train_independent(X_ordinal, y)
elif self.model == 'pooled':
self.mapping = self._train_pooled(X_ordinal, y)
elif self.model == 'beta':
self.mapping = self._train_beta(X_ordinal, y)
elif self.model == 'binary':
# The label must be binary with values {0,1}
unique = y.unique()
if len(unique) != 2:
raise ValueError("The target column y must be binary. But the target contains " + str(len(unique)) + " unique value(s).")
if y.isnull().any():
raise ValueError("The target column y must not contain missing values.")
if np.max(unique) < 1:
raise ValueError("The target column y must be binary with values {0, 1}. Value 1 was not found in the target.")
if np.min(unique) > 0:
raise ValueError("The target column y must be binary with values {0, 1}. Value 0 was not found in the target.")
# Perform the training
self.mapping = self._train_log_odds_ratio(X_ordinal, y)
else:
raise ValueError("model='" + str(self.model) + "' is not a recognized option")
X_temp = self.transform(X, override_return_df=True)
self.feature_names = X_temp.columns.tolist()
# Store column names with approximately constant variance on the training data
if self.drop_invariant:
self.drop_cols = []
generated_cols = util.get_generated_cols(X, X_temp, self.cols)
self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= 10e-5]
try:
[self.feature_names.remove(x) for x in self.drop_cols]
except KeyError as e:
if self.verbose > 0:
print("Could not remove column from feature names."
"Not found in generated cols.\n{}".format(e))
return self
def transform(self, X, y=None, override_return_df=False):
"""Perform the transformation to new categorical data. When the data are used for model training,
it is important to also pass the target in order to apply leave one out.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
y : array-like, shape = [n_samples] when transform by leave one out
None, when transform without target information (such as transform test set)
Returns
-------
p : array, shape = [n_samples, n_numeric + N]
Transformed values with encoding applied.
"""
if self.handle_missing == 'error':
if X[self.cols].isnull().any().any():
raise ValueError('Columns to be encoded can not contain null')
if self._dim is None:
raise ValueError('Must train encoder before it can be used to transform data.')
# Unite the input into pandas types
X = util.convert_input(X)
# Then make sure that it is the right size
if X.shape[1] != self._dim:
raise ValueError('Unexpected input dimension %d, expected %d' % (X.shape[1], self._dim,))
# If we are encoding the training data, we have to check the target
if y is not None:
y = util.convert_input_vector(y, X.index).astype(float)
if X.shape[0] != y.shape[0]:
raise ValueError("The length of X is " + str(X.shape[0]) + " but length of y is " + str(y.shape[0]) + ".")
if not list(self.cols):
return X
# Do not modify the input argument
X = X.copy(deep=True)
X = self.ordinal_encoder.transform(X)
if self.handle_unknown == 'error':
if X[self.cols].isin([-1]).any().any():
raise ValueError('Unexpected categories found in dataframe')
# Loop over columns and replace nominal values with WOE
X = self._score(X, y)
# Postprocessing
# Note: We should not even convert these columns.
if self.drop_invariant:
for col in self.drop_cols:
X.drop(col, 1, inplace=True)
if self.return_df or override_return_df:
return X
else:
return X.values
def _train_pooled(self, X, y):
# Implemented based on reference [1]
# Initialize the output
mapping = {}
# Calculate global statistics
prior = y.mean()
target_var = y.var()
global_count = len(y)
for switch in self.ordinal_encoder.category_mapping:
col = switch.get('col')
values = switch.get('mapping')
# Calculate sum and count of the target for each unique value in the feature col
stats = y.groupby(X[col]).agg(['mean', 'count'])
# See: Computer Age Statistical Inference: Algorithms, Evidence, and Data Science (Bradley Efron & Trevor Hastie, 2016)
# Equations 7.19 and 7.20.
# Note: The equations assume normal distribution of the label. But our label is p(y|x),
# which is definitely not normally distributed as probabilities are bound to lie on interval 0..1.
# We make this approximation because Efron does it as well.
# Equation 7.19
# Explanation of the equation:
# https://stats.stackexchange.com/questions/191444/variance-in-estimating-p-for-a-binomial-distribution
# if stats['count'].var() > 0:
# warnings.warn('The pooled model assumes that each category is observed exactly N times. This was violated in "' + str(col) +'" column. Consider comparing the accuracy of this model to "independent" model.')
# This is a parametric estimate of var(p) in the binomial distribution.
# We do not use it because we also want to support non-binary targets.
# The difference in the estimates is small.
# variance = prior * (1 - prior) / stats['count'].mean()
# This is a squared estimate of standard error of the mean:
# https://en.wikipedia.org/wiki/Standard_error
variance = target_var/(stats['count'].mean())
# Equation 7.20
SSE = ((stats['mean']-prior)**2).sum() # Sum of Squared Errors
if SSE > 0: # We have to avoid division by zero
B = ((len(stats['count'])-3)*variance) / SSE
B = B.clip(0, 1)
estimate = prior + (1 - B) * (stats['mean'] - prior)
else:
estimate = stats['mean']
# Ignore unique values. This helps to prevent overfitting on id-like columns
# This works better than: estimate[stats['count'] == 1] = prior
if len(stats['mean']) == global_count:
estimate[:] = prior
if self.handle_unknown == 'return_nan':
estimate.loc[-1] = np.nan
elif self.handle_unknown == 'value':
estimate.loc[-1] = prior
if self.handle_missing == 'return_nan':
estimate.loc[values.loc[np.nan]] = np.nan
elif self.handle_missing == 'value':
estimate.loc[-2] = prior
# Store the estimate for transform() function
mapping[col] = estimate
return mapping
def _train_independent(self, X, y):
# Implemented based on reference [2]
# Initialize the output
mapping = {}
# Calculate global statistics
prior = y.mean()
global_count = len(y)
global_var = y.var()
for switch in self.ordinal_encoder.category_mapping:
col = switch.get('col')
values = switch.get('mapping')
# Calculate sum and count of the target for each unique value in the feature col
stats = y.groupby(X[col]).agg(['mean', 'var'])
i_var = stats['var'].fillna(0) # When we do not have more than 1 sample, assume 0 variance
unique_cnt = len(X[col].unique())
# See: Parametric Empirical Bayes Inference: Theory and Applications (Morris, 1983)
# Equations 1.19 and 1.20.
# Note: The equations assume normal distribution of the label. But our label is p(y|x),
# which is definitely not normally distributed as probabilities are bound to lie on interval 0..1.
# Nevertheless, it seems to perform surprisingly well. This is in agreement with:
# Data Analysis with Stein's Estimator and Its Generalizations (Efron & Morris, 1975)
# The equations are similar to James-Stein estimator, as listed in:
# Stein's Paradox in Statistics (Efron & Morris, 1977)
# Or:
# Computer Age Statistical Inference: Algorithms, Evidence, and Data Science (Efron & Hastie, 2016)
# Equations 7.19 and 7.20.
# The difference is that they have equal count of observations per estimated variable, while we generally
# do not have that. Nice discussion about that is given at:
# http://chris-said.io/2017/05/03/empirical-bayes-for-multiple-sample-sizes/
smoothing = i_var / (global_var + i_var) * (unique_cnt-3) / (unique_cnt-1)
smoothing = 1 - smoothing
smoothing = smoothing.clip(lower=0, upper=1) # Smoothing should be in the interval <0,1>
estimate = smoothing*(stats['mean']) + (1-smoothing)*prior
# Ignore unique values. This helps to prevent overfitting on id-like columns
if len(stats['mean']) == global_count:
estimate[:] = prior
if self.handle_unknown == 'return_nan':
estimate.loc[-1] = np.nan
elif self.handle_unknown == 'value':
estimate.loc[-1] = prior
if self.handle_missing == 'return_nan':
estimate.loc[values.loc[np.nan]] = np.nan
elif self.handle_missing == 'value':
estimate.loc[-2] = prior
# Store the estimate for transform() function
mapping[col] = estimate
return mapping
def _train_log_odds_ratio(self, X, y):
# Implemented based on reference [3]
# Initialize the output
mapping = {}
# Calculate global statistics
global_sum = y.sum()
global_count = y.count()
# Iterative estimation of mu and sigma as given on page 9.
# This problem is traditionally solved with Newton-Raphson method:
# https://en.wikipedia.org/wiki/Newton%27s_method
# But we just use sklearn minimizer.
def get_best_sigma(sigma, mu_k, sigma_k, K):
global mu # Ugly. But I want to be able to read it once the optimization ends.
w_k = 1. / (sigma ** 2 + sigma_k ** 2) # Weights depends on sigma
mu = sum(w_k * mu_k) / sum(w_k) # Mu transitively depends on sigma
total = sum(w_k * (mu_k - mu) ** 2) # We want this to be close to K-1
loss = abs(total - (K - 1))
return loss
for switch in self.ordinal_encoder.category_mapping:
col = switch.get('col')
values = switch.get('mapping')
# Calculate sum and count of the target for each unique value in the feature col
stats = y.groupby(X[col]).agg(['sum', 'count']) # Count of x_{i,+} and x_i
# Create 2x2 contingency table
crosstable = pd.DataFrame()
crosstable['E-A-'] = global_count - stats['count'] + stats['sum'] - global_sum
crosstable['E-A+'] = stats['count'] - stats['sum']
crosstable['E+A-'] = global_sum - stats['sum']
crosstable['E+A+'] = stats['sum']
index = crosstable.index.values
crosstable = np.array(crosstable, dtype=np.float32) # The argument unites the types into float
# Count of contingency tables.
K = len(crosstable)
# Ignore id-like columns. This helps to prevent overfitting.
if K == global_count:
estimate = pd.Series(0, index=values)
else:
if K > 1: # We want to avoid division by zero in y_k calculation
# Estimate log-odds ratios with Yates correction as listed on page 5.
mu_k = np.log((crosstable[:, 0] + 0.5) * (crosstable[:, 3] + 0.5) / ((crosstable[:, 1] + 0.5) * (crosstable[:, 2] + 0.5)))
# Standard deviation estimate for 2x2 contingency table as given in equation 2.
# The explanation of the equation is given in:
# https://stats.stackexchange.com/questions/266098/how-do-i-calculate-the-standard-deviation-of-the-log-odds
sigma_k = np.sqrt(np.sum(1. / (crosstable + 0.5), axis=1))
# Estimate the sigma and mu. Sigma is non-negative.
result = scipy.optimize.minimize(get_best_sigma, x0=1e-4, args=(mu_k, sigma_k, K), bounds=[(0, np.inf)], method='TNC', tol=1e-12, options={'gtol': 1e-12, 'ftol': 1e-12, 'eps': 1e-12})
sigma = result.x[0]
# Empirical Bayes follows equation 7.
# However, James-Stein estimator behaves perversely when K < 3. Hence, we clip the B into interval <0,1>.
# Literature reference for the clipping:
# Estimates of Income for Small Places: An Application of James-Stein Procedures to Census Data (Fay & Harriout, 1979),
# page 270.
B = (K - 3) * sigma_k ** 2 / ((K - 1) * (sigma ** 2 + sigma_k ** 2))
B = B.clip(0, 1)
y_k = mu + (1 - B) * (mu_k - mu)
# Convert Numpy vector back into Series
estimate = pd.Series(y_k, index=index)
else:
estimate = pd.Series(0, index=values)
if self.handle_unknown == 'return_nan':
estimate.loc[-1] = np.nan
elif self.handle_unknown == 'value':
estimate.loc[-1] = 0
if self.handle_missing == 'return_nan':
estimate.loc[values.loc[np.nan]] = np.nan
elif self.handle_missing == 'value':
estimate.loc[-2] = 0
# Store the estimate for transform() function
mapping[col] = estimate
return mapping
def _train_beta(self, X, y):
# Implemented based on reference [4]
# Initialize the output
mapping = {}
# Calculate global statistics
prior = y.mean()
global_count = len(y)
for switch in self.ordinal_encoder.category_mapping:
col = switch.get('col')
values = switch.get('mapping')
# Calculate sum and count of the target for each unique value in the feature col
stats = y.groupby(X[col]).agg(['mean', 'count'])
# See: Stein's paradox and group rationality (Romeijn, 2017), page 14
smoothing = stats['count'] / (stats['count'] + global_count)
estimate = smoothing*(stats['mean']) + (1-smoothing)*prior
# Ignore unique values. This helps to prevent overfitting on id-like columns
if len(stats['mean']) == global_count:
estimate[:] = prior
if self.handle_unknown == 'return_nan':
estimate.loc[-1] = np.nan
elif self.handle_unknown == 'value':
estimate.loc[-1] = prior
if self.handle_missing == 'return_nan':
estimate.loc[values.loc[np.nan]] = np.nan
elif self.handle_missing == 'value':
estimate.loc[-2] = prior
# Store the estimate for transform() function
mapping[col] = estimate
return mapping
def _score(self, X, y):
for col in self.cols:
# Score the column
X[col] = X[col].map(self.mapping[col])
# Randomization is meaningful only for training data -> we do it only if y is present
if self.randomized and y is not None:
random_state_generator = check_random_state(self.random_state)
X[col] = (X[col] * random_state_generator.normal(1., self.sigma, X[col].shape[0]))
return X
def get_feature_names(self):
"""
Returns the names of all transformed / added columns.
Returns
-------
feature_names: list
A list with all feature names transformed or added.
Note: potentially dropped features are not included!
"""
if not isinstance(self.feature_names, list):
raise ValueError("Estimator has to be fitted to return feature names.")
else:
return self.feature_names
|
|
"""Support for the IBM Watson IoT Platform."""
import logging
import queue
import threading
import time
from ibmiotf import MissingMessageEncoderException
from ibmiotf.gateway import Client
import voluptuous as vol
from homeassistant.const import (
CONF_DOMAINS,
CONF_ENTITIES,
CONF_EXCLUDE,
CONF_ID,
CONF_INCLUDE,
CONF_TOKEN,
CONF_TYPE,
EVENT_HOMEASSISTANT_STOP,
EVENT_STATE_CHANGED,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
)
from homeassistant.core import callback
from homeassistant.helpers import state as state_helper
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_ORG = "organization"
DOMAIN = "watson_iot"
MAX_TRIES = 3
RETRY_DELAY = 20
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
vol.Schema(
{
vol.Required(CONF_ORG): cv.string,
vol.Required(CONF_TYPE): cv.string,
vol.Required(CONF_ID): cv.string,
vol.Required(CONF_TOKEN): cv.string,
vol.Optional(CONF_EXCLUDE, default={}): vol.Schema(
{
vol.Optional(CONF_ENTITIES, default=[]): cv.entity_ids,
vol.Optional(CONF_DOMAINS, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
}
),
vol.Optional(CONF_INCLUDE, default={}): vol.Schema(
{
vol.Optional(CONF_ENTITIES, default=[]): cv.entity_ids,
vol.Optional(CONF_DOMAINS, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
}
),
}
)
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Set up the Watson IoT Platform component."""
conf = config[DOMAIN]
include = conf[CONF_INCLUDE]
exclude = conf[CONF_EXCLUDE]
include_e = set(include[CONF_ENTITIES])
include_d = set(include[CONF_DOMAINS])
exclude_e = set(exclude[CONF_ENTITIES])
exclude_d = set(exclude[CONF_DOMAINS])
client_args = {
"org": conf[CONF_ORG],
"type": conf[CONF_TYPE],
"id": conf[CONF_ID],
"auth-method": "token",
"auth-token": conf[CONF_TOKEN],
}
watson_gateway = Client(client_args)
def event_to_json(event):
"""Add an event to the outgoing list."""
state = event.data.get("new_state")
if (
state is None
or state.state in (STATE_UNKNOWN, "", STATE_UNAVAILABLE)
or state.entity_id in exclude_e
or state.domain in exclude_d
):
return
if (include_e and state.entity_id not in include_e) or (
include_d and state.domain not in include_d
):
return
try:
_state_as_value = float(state.state)
except ValueError:
_state_as_value = None
if _state_as_value is None:
try:
_state_as_value = float(state_helper.state_as_number(state))
except ValueError:
_state_as_value = None
out_event = {
"tags": {"domain": state.domain, "entity_id": state.object_id},
"time": event.time_fired.isoformat(),
"fields": {"state": state.state},
}
if _state_as_value is not None:
out_event["fields"]["state_value"] = _state_as_value
for key, value in state.attributes.items():
if key != "unit_of_measurement":
# If the key is already in fields
if key in out_event["fields"]:
key = f"{key}_"
# For each value we try to cast it as float
# But if we can not do it we store the value
# as string
try:
out_event["fields"][key] = float(value)
except (ValueError, TypeError):
out_event["fields"][key] = str(value)
return out_event
instance = hass.data[DOMAIN] = WatsonIOTThread(hass, watson_gateway, event_to_json)
instance.start()
def shutdown(event):
"""Shut down the thread."""
instance.queue.put(None)
instance.join()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, shutdown)
return True
class WatsonIOTThread(threading.Thread):
"""A threaded event handler class."""
def __init__(self, hass, gateway, event_to_json):
"""Initialize the listener."""
threading.Thread.__init__(self, name="WatsonIOT")
self.queue = queue.Queue()
self.gateway = gateway
self.gateway.connect()
self.event_to_json = event_to_json
self.write_errors = 0
self.shutdown = False
hass.bus.listen(EVENT_STATE_CHANGED, self._event_listener)
@callback
def _event_listener(self, event):
"""Listen for new messages on the bus and queue them for Watson IoT."""
item = (time.monotonic(), event)
self.queue.put(item)
def get_events_json(self):
"""Return an event formatted for writing."""
events = []
try:
if (item := self.queue.get()) is None:
self.shutdown = True
else:
event_json = self.event_to_json(item[1])
if event_json:
events.append(event_json)
except queue.Empty:
pass
return events
def write_to_watson(self, events):
"""Write preprocessed events to watson."""
for event in events:
for retry in range(MAX_TRIES + 1):
try:
for field in event["fields"]:
value = event["fields"][field]
device_success = self.gateway.publishDeviceEvent(
event["tags"]["domain"],
event["tags"]["entity_id"],
field,
"json",
value,
)
if not device_success:
_LOGGER.error("Failed to publish message to Watson IoT")
continue
break
except (MissingMessageEncoderException, OSError):
if retry < MAX_TRIES:
time.sleep(RETRY_DELAY)
else:
_LOGGER.exception("Failed to publish message to Watson IoT")
def run(self):
"""Process incoming events."""
while not self.shutdown:
event = self.get_events_json()
if event:
self.write_to_watson(event)
self.queue.task_done()
def block_till_done(self):
"""Block till all events processed."""
self.queue.join()
|
|
from __future__ import unicode_literals
import uuid
from django.contrib.auth.models import User
from django.core.urlresolvers import NoReverseMatch
from django.db import models
from django.template.loader import Context, get_template
from django.utils import six
from django.utils.html import escape, format_html, format_html_join
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from reviewboard.attachments.models import FileAttachment
from reviewboard.diffviewer.diffutils import get_sorted_filediffs
from reviewboard.diffviewer.models import DiffSet
from reviewboard.reviews.fields import (BaseCommaEditableField,
BaseEditableField,
BaseReviewRequestField,
BaseReviewRequestFieldSet,
BaseTextAreaField)
from reviewboard.reviews.models import (Group, ReviewRequest,
ReviewRequestDraft,
Screenshot)
from reviewboard.scmtools.models import Repository
from reviewboard.site.urlresolvers import local_site_reverse
class BuiltinFieldMixin(object):
"""Mixin for built-in fields.
This overrides some functions to work with native fields on a
ReviewRequest or ReviewRequestDraft, rather than working with those
stored in extra_data.
"""
def __init__(self, *args, **kwargs):
super(BuiltinFieldMixin, self).__init__(*args, **kwargs)
if (not hasattr(self.review_request_details, self.field_id) and
isinstance(self.review_request_details, ReviewRequestDraft)):
# This field only exists in ReviewRequest, and not in
# the draft, so we're going to work there instead.
self.review_request_details = \
self.review_request_details.get_review_request()
def load_value(self, review_request_details):
value = getattr(review_request_details, self.field_id)
if isinstance(value, models.Manager):
value = list(value.all())
return value
def save_value(self, value):
setattr(self.review_request_details, self.field_id, value)
class BuiltinTextAreaFieldMixin(BuiltinFieldMixin):
"""Mixin for built-in text area fields.
This will ensure that the text is always rendered in Markdown,
no matter whether the source text is plain or Markdown. It will
still escape the text if it's not in Markdown format before
rendering.
"""
def get_data_attributes(self):
attrs = super(BuiltinTextAreaFieldMixin, self).get_data_attributes()
# This is already available in the review request state fed to the
# page, so we don't need it in the data attributes as well.
attrs.pop('raw-value', None)
return attrs
class BuiltinLocalsFieldMixin(BuiltinFieldMixin):
"""Mixin for internal fields needing access to local variables.
These are used by fields that operate on state generated when
creating the review request page. The view handling that page has
a lot of cached variables, which the fields need access to for
performance reasons.
This should not be used by any classes outside this file.
By default, this will not render or handle any value loading or change
entry recording. Subclasses must implement those manually.
"""
#: A list of variables needed from the review_detail view's locals().
locals_vars = []
def __init__(self, review_request_details, locals_vars={},
*args, **kwargs):
super(BuiltinLocalsFieldMixin, self).__init__(
review_request_details, *args, **kwargs)
for var in self.locals_vars:
setattr(self, var, locals_vars.get(var, None))
def should_render(self, value):
return False
def load_value(self, review_request_details):
return None
def record_change_entry(self, changedesc, old_value, new_value):
return None
class BaseCaptionsField(BuiltinLocalsFieldMixin, BaseReviewRequestField):
"""Base class for rendering captions for attachments.
This serves as a base for FileAttachmentCaptionsField and
ScreenshotCaptionsField. It provides the base rendering and
for caption changes on file attachments or screenshots.
"""
obj_map_attr = None
caption_object_field = None
change_entry_renders_inline = False
def render_change_entry_html(self, info):
render_item = super(BaseCaptionsField, self).render_change_entry_html
obj_map = getattr(self, self.obj_map_attr)
s = ['<table class="caption-changed">']
for id_str, caption in six.iteritems(info):
obj = obj_map[int(id_str)]
s.append(format_html(
'<tr>'
' <th><a href="{url}">{filename}</a>:</th>'
' <td>{caption}</td>'
'</tr>',
url=obj.get_absolute_url(),
filename=obj.filename,
caption=mark_safe(render_item(caption))))
s.append('</table>')
return ''.join(s)
def serialize_change_entry(self, changedesc):
data = changedesc.fields_changed[self.field_id]
return [
{
'old': data[six.text_type(obj.pk)]['old'][0],
'new': data[six.text_type(obj.pk)]['new'][0],
self.caption_object_field: obj,
}
for obj in self.model.objects.filter(pk__in=six.iterkeys(data))
]
class BaseModelListEditableField(BaseCommaEditableField):
"""Base class for editable comma-separated list of model instances.
This is used for built-in classes that work with ManyToManyFields.
"""
model_name_attr = None
def has_value_changed(self, old_value, new_value):
old_values = set([obj.pk for obj in old_value])
new_values = set([obj.pk for obj in new_value])
return old_values.symmetric_difference(new_values)
def record_change_entry(self, changedesc, old_value, new_value):
changedesc.record_field_change(self.field_id, old_value, new_value,
self.model_name_attr)
def render_change_entry_item_html(self, info, item):
label, url, pk = item
if url:
return '<a href="%s">%s</a>' % (escape(url), escape(label))
else:
return escape(label)
def save_value(self, value):
setattr(self, self.field_id, value)
class StatusField(BuiltinFieldMixin, BaseReviewRequestField):
"""The Status field on a review request."""
field_id = 'status'
label = _('Status')
is_required = True
def should_render(self, status):
"""Return whether this field should be rendered.
This field is "rendered" by displaying the publish and close banners,
and doesn't have a real field within the fieldsets.
"""
return False
def get_change_entry_sections_html(self, info):
"""Return sections of change entries with titles and rendered HTML.
Because the status field is specially handled, this just returns an
empty list.
"""
return []
class SummaryField(BuiltinFieldMixin, BaseEditableField):
"""The Summary field on a review request."""
field_id = 'summary'
label = _('Summary')
is_required = True
def should_render(self, summary):
# This field is rendered separately in the template, and isn't
# included with other fields in the "main" group, so just don't
# render it there.
return False
class DescriptionField(BuiltinTextAreaFieldMixin, BaseTextAreaField):
"""The Description field on a review request."""
field_id = 'description'
label = _('Description')
is_required = True
def is_text_markdown(self, value):
return self.review_request_details.description_rich_text
class TestingDoneField(BuiltinTextAreaFieldMixin, BaseTextAreaField):
"""The Testing Done field on a review request."""
field_id = 'testing_done'
label = _('Testing Done')
def is_text_markdown(self, value):
return self.review_request_details.testing_done_rich_text
class SubmitterField(BuiltinFieldMixin, BaseReviewRequestField):
"""The Submitter field on a review request."""
field_id = 'submitter'
label = _('Submitter')
model = User
def render_value(self, user):
return format_html(
'<a class="user" href="{0}">{1}</a>',
local_site_reverse(
'user',
local_site=self.review_request_details.local_site,
args=[user]),
user.get_full_name() or user.username)
class RepositoryField(BuiltinFieldMixin, BaseReviewRequestField):
"""The Repository field on a review request."""
field_id = 'repository'
label = _('Repository')
model = Repository
def should_render(self, value):
review_request = self.review_request_details.get_review_request()
return review_request.repository_id is not None
class BranchField(BuiltinFieldMixin, BaseEditableField):
"""The Branch field on a review request."""
field_id = 'branch'
label = _('Branch')
class BugsField(BuiltinFieldMixin, BaseCommaEditableField):
"""The Bugs field on a review request."""
field_id = 'bugs_closed'
label = _('Bugs')
one_line_per_change_entry = False
def load_value(self, review_request_details):
return review_request_details.get_bug_list()
def save_value(self, value):
setattr(self.review_request_details, self.field_id, ', '.join(value))
def render_item(self, bug_id):
bug_url = self._get_bug_url(bug_id)
if bug_url:
return format_html('<a class="bug" href="{url}">{id}</a>',
url=bug_url, id=bug_id)
else:
return escape(bug_id)
def render_change_entry_item_html(self, info, item):
return self.render_item(item[0])
def _get_bug_url(self, bug_id):
review_request = self.review_request_details.get_review_request()
repository = self.review_request_details.repository
local_site_name = None
bug_url = None
if review_request.local_site:
local_site_name = review_request.local_site.name
try:
if (repository and
repository.bug_tracker and
'%s' in repository.bug_tracker):
bug_url = local_site_reverse(
'bug_url', local_site_name=local_site_name,
args=(review_request.display_id, bug_id))
except NoReverseMatch:
pass
return bug_url
class DependsOnField(BuiltinFieldMixin, BaseModelListEditableField):
"""The Depends On field on a review request."""
field_id = 'depends_on'
label = _('Depends On')
model = ReviewRequest
model_name_attr = 'summary'
def render_change_entry_item_html(self, info, item):
item = ReviewRequest.objects.get(pk=item[2])
rendered_item = format_html(
'<a href="{url}">{id} - {summary}</a>',
url=item.get_absolute_url(),
id=item.pk,
summary=item.summary)
if item.status == ReviewRequest.SUBMITTED:
return '<s>%s</s>' % rendered_item
else:
return rendered_item
def render_item(self, item):
rendered_item = format_html(
'<a href="{url}" title="{summary}">{id}</a>',
url=item.get_absolute_url(),
summary=item.summary,
id=item.display_id)
if item.status == ReviewRequest.SUBMITTED:
return '<s>%s</s>' % rendered_item
else:
return rendered_item
class BlocksField(BuiltinFieldMixin, BaseReviewRequestField):
"""The Blocks field on a review request."""
field_id = 'blocks'
label = _('Blocks')
model = ReviewRequest
def load_value(self, review_request_details):
return review_request_details.get_review_request().get_blocks()
def should_render(self, blocks):
return len(blocks) > 0
def render_value(self, blocks):
return format_html_join(
', ',
'<a href="{0}">{1}</a>',
[
(item.get_absolute_url(), item.display_id)
for item in blocks
])
class ChangeField(BuiltinFieldMixin, BaseReviewRequestField):
"""The Change field on a review request.
This is shown for repositories supporting changesets. The change
number is similar to a commit ID, with the exception that it's only
ever stored on the ReviewRequest and never changes.
If both ``changenum`` and ``commit_id`` are provided on the review
request, only this field will be shown, as both are likely to have
values.
"""
field_id = 'changenum'
label = _('Change')
def load_value(self, review_request_details):
return review_request_details.get_review_request().changenum
def should_render(self, changenum):
return bool(changenum)
def render_value(self, changenum):
review_request = self.review_request_details.get_review_request()
is_pending, changenum = review_request.changeset_is_pending(changenum)
if is_pending:
return escape(_('%s (pending)') % changenum)
else:
return changenum
class CommitField(BuiltinFieldMixin, BaseReviewRequestField):
"""The Commit field on a review request.
This displays the ID of the commit the review request is representing.
Since the ``commit_id`` and ``changenum`` fields are both populated, we
let ChangeField take precedence. It knows how to render information based
on a changeset ID.
"""
field_id = 'commit_id'
label = _('Commit')
can_record_change_entry = True
def should_render(self, commit_id):
return (bool(commit_id) and
not self.review_request_details.get_review_request().changenum)
def render_value(self, commit_id):
# Abbreviate SHA-1s
if len(commit_id) == 40:
abbrev_commit_id = commit_id[:7] + '...'
return '<span title="%s">%s</span>' % (escape(commit_id),
escape(abbrev_commit_id))
else:
return escape(commit_id)
class DiffField(BuiltinLocalsFieldMixin, BaseReviewRequestField):
"""Represents a newly uploaded diff on a review request.
This is not shown as an actual displayable field on the review request
itself. Instead, it is used only during the ChangeDescription population
and processing steps.
"""
field_id = 'diff'
label = _('Diff')
locals_vars = ['diffsets_by_id']
can_record_change_entry = True
MAX_FILES_PREVIEW = 8
def render_change_entry_html(self, info):
added_diff_info = info['added'][0]
review_request = self.review_request_details.get_review_request()
try:
diffset = self.diffsets_by_id[added_diff_info[2]]
except KeyError:
# If a published revision of a diff has been deleted from the
# database, this will explode. Just return a blank string for this,
# so that it doesn't show a traceback.
return ''
diff_revision = diffset.revision
past_revision = diff_revision - 1
diff_url = added_diff_info[1]
s = []
# Fetch the total number of inserts/deletes. These will be shown
# alongside the diff revision.
counts = diffset.get_total_line_counts()
raw_insert_count = counts.get('raw_insert_count', 0)
raw_delete_count = counts.get('raw_delete_count', 0)
line_counts = []
if raw_insert_count > 0:
line_counts.append('<span class="insert-count">+%d</span>'
% raw_insert_count)
if raw_delete_count > 0:
line_counts.append('<span class="delete-count">-%d</span>'
% raw_delete_count)
# Display the label, URL, and line counts for the diff.
s.append(format_html(
'<p class="diff-changes">'
' <a href="{url}">{label}</a>'
' <span class="line-counts">({line_counts})</span>'
'</p>',
url=diff_url,
label=_('Revision %s') % diff_revision,
count=_('%d files') % diffset.file_count,
line_counts=mark_safe(' '.join(line_counts))))
if past_revision > 0:
# This is not the first diff revision. Include an interdiff link.
interdiff_url = local_site_reverse(
'view-interdiff',
local_site=review_request.local_site,
args=[
review_request.display_id,
past_revision,
diff_revision,
])
s.append(format_html(
'<p><a href="{url}">{text}</a>',
url=interdiff_url,
text=_('Show changes')))
if diffset.file_count > 0:
# Begin displaying the list of files modified in this diff.
# It will be capped at a fixed number (MAX_FILES_PREVIEW).
s += [
'<div class="diff-index">',
' <table>',
]
# We want a sorted list of filediffs, but tagged with the order in
# which they come from the database, so that we can properly link
# to the respective files in the diff viewer.
files = get_sorted_filediffs(enumerate(diffset.files.all()),
key=lambda i: i[1])
for i, filediff in files[:self.MAX_FILES_PREVIEW]:
counts = filediff.get_line_counts()
data_attrs = [
'data-%s="%s"' % (attr.replace('_', '-'), counts[attr])
for attr in ('insert_count', 'delete_count',
'replace_count', 'total_line_count')
if counts.get(attr) is not None
]
s.append(format_html(
'<tr {data_attrs}>'
' <td class="diff-file-icon"></td>'
' <td class="diff-file-info">'
' <a href="{url}">{filename}</a>'
' </td>'
'</tr>',
data_attrs=mark_safe(' '.join(data_attrs)),
url=diff_url + '#%d' % i,
filename=filediff.source_file))
num_remaining = diffset.file_count - self.MAX_FILES_PREVIEW
if num_remaining > 0:
# There are more files remaining than we've shown, so show
# the count.
s.append(format_html(
'<tr>'
' <td></td>'
' <td class="diff-file-info">{text}</td>'
'</tr>',
text=_('%s more') % num_remaining))
s += [
' </table>',
'</div>',
]
return ''.join(s)
def has_value_changed(self, old_value, new_value):
# If there's a new diffset at all (in new_value), then it passes
# the test.
return new_value is not None
def load_value(self, review_request_details):
# This will be None for a ReviewRequest, and may have a value for
# ReviewRequestDraft if a new diff was attached.
return getattr(review_request_details, 'diffset', None)
def record_change_entry(self, changedesc, unused, diffset):
review_request = self.review_request_details.get_review_request()
url = local_site_reverse(
'view-diff-revision',
local_site=review_request.local_site,
args=[review_request.display_id, diffset.revision])
changedesc.fields_changed['diff'] = {
'added': [(
_('Diff r%s') % diffset.revision,
url,
diffset.pk
)]
}
def serialize_change_entry(self, changedesc):
diffset_id = changedesc.fields_changed['diff']['added'][0][2]
return {
'added': DiffSet.objects.get(pk=diffset_id),
}
class FileAttachmentCaptionsField(BaseCaptionsField):
"""Renders caption changes for file attachments.
This is not shown as an actual displayable field on the review request
itself. Instead, it is used only during the ChangeDescription rendering
stage. It is not, however, used for populating entries in
ChangeDescription.
"""
field_id = 'file_captions'
label = _('File Captions')
obj_map_attr = 'file_attachment_id_map'
locals_vars = [obj_map_attr]
model = FileAttachment
caption_object_field = 'file_attachment'
class FileAttachmentsField(BuiltinLocalsFieldMixin, BaseCommaEditableField):
"""Renders removed or added file attachments.
This is not shown as an actual displayable field on the review request
itself. Instead, it is used only during the ChangeDescription rendering
stage. It is not, however, used for populating entries in
ChangeDescription.
"""
field_id = 'files'
label = _('Files')
locals_vars = ['file_attachment_id_map']
model = FileAttachment
thumbnail_template = 'reviews/changedesc_file_attachment.html'
def get_change_entry_sections_html(self, info):
sections = []
if 'removed' in info:
sections.append({
'title': _('Removed Files'),
'rendered_html': mark_safe(
self.render_change_entry_html(info['removed'])),
})
if 'added' in info:
sections.append({
'title': _('Added Files'),
'rendered_html': mark_safe(
self.render_change_entry_html(info['added'])),
})
return sections
def render_change_entry_html(self, values):
# Fetch the template ourselves only once and render it for each item,
# instead of calling render_to_string() in the loop, so we don't
# have to locate and parse/fetch from cache for every item.
template = get_template(self.thumbnail_template)
review_request = self.review_request_details.get_review_request()
if review_request.local_site:
local_site_name = review_request.local_site.name
else:
local_site_name = None
items = []
for caption, filename, pk in values:
if pk in self.file_attachment_id_map:
attachment = self.file_attachment_id_map[pk]
else:
try:
attachment = FileAttachment.objects.get(pk=pk)
except FileAttachment.DoesNotExist:
continue
items.append(template.render(Context({
'file': attachment,
'review_request': review_request,
'local_site_name': local_site_name,
'uuid': uuid.uuid4(),
})))
return ''.join(items)
class ScreenshotCaptionsField(BaseCaptionsField):
"""Renders caption changes for screenshots.
This is not shown as an actual displayable field on the review request
itself. Instead, it is used only during the ChangeDescription rendering
stage. It is not, however, used for populating entries in
ChangeDescription.
"""
field_id = 'screenshot_captions'
label = _('Screenshot Captions')
obj_map_attr = 'screenshot_id_map'
locals_vars = [obj_map_attr]
model = Screenshot
caption_object_field = 'screenshot'
class ScreenshotsField(BaseCommaEditableField):
"""Renders removed or added screenshots.
This is not shown as an actual displayable field on the review request
itself. Instead, it is used only during the ChangeDescription rendering
stage. It is not, however, used for populating entries in
ChangeDescription.
"""
field_id = 'screenshots'
label = _('Screenshots')
model = Screenshot
class TargetGroupsField(BuiltinFieldMixin, BaseModelListEditableField):
"""The Target Groups field on a review request."""
field_id = 'target_groups'
label = _('Groups')
model = Group
model_name_attr = 'name'
def render_item(self, group):
return '<a href="%s">%s</a>' % (escape(group.get_absolute_url()),
escape(group.name))
class TargetPeopleField(BuiltinFieldMixin, BaseModelListEditableField):
"""The Target People field on a review request."""
field_id = 'target_people'
label = _('People')
model = User
model_name_attr = 'username'
def render_item(self, user):
extra_classes = ['user']
if not user.is_active:
extra_classes.append('inactive')
return format_html(
'<a href="{0}" class="{1}">{2}</a>',
local_site_reverse(
'user',
local_site=self.review_request_details.local_site,
args=[user]),
' '.join(extra_classes),
user.username)
class MainFieldSet(BaseReviewRequestFieldSet):
fieldset_id = 'main'
field_classes = [
SummaryField,
DescriptionField,
TestingDoneField,
]
class InformationFieldSet(BaseReviewRequestFieldSet):
fieldset_id = 'info'
label = _('Information')
field_classes = [
SubmitterField,
RepositoryField,
BranchField,
BugsField,
DependsOnField,
BlocksField,
ChangeField,
CommitField,
]
class ReviewersFieldSet(BaseReviewRequestFieldSet):
fieldset_id = 'reviewers'
label = _('Reviewers')
show_required = True
field_classes = [
TargetGroupsField,
TargetPeopleField,
]
class ChangeEntryOnlyFieldSet(BaseReviewRequestFieldSet):
fieldset_id = '_change_entries_only'
field_classes = [
DiffField,
FileAttachmentCaptionsField,
ScreenshotCaptionsField,
FileAttachmentsField,
ScreenshotsField,
StatusField,
]
builtin_fieldsets = [
MainFieldSet,
InformationFieldSet,
ReviewersFieldSet,
ChangeEntryOnlyFieldSet,
]
|
|
"""Support for remote Python debugging.
Some ASCII art to describe the structure:
IN PYTHON SUBPROCESS # IN IDLE PROCESS
#
# oid='gui_adapter'
+----------+ # +------------+ +-----+
| GUIProxy |--remote#call-->| GUIAdapter |--calls-->| GUI |
+-----+--calls-->+----------+ # +------------+ +-----+
| Idb | # /
+-----+<-calls--+------------+ # +----------+<--calls-/
| IdbAdapter |<--remote#call--| IdbProxy |
+------------+ # +----------+
oid='idb_adapter' #
The purpose of the Proxy and Adapter classes is to translate certain
arguments and return values that cannot be transported through the RPC
barrier, in particular frame and traceback objects.
"""
import types
from idlelib import Debugger
debugging = 0
idb_adap_oid = "idb_adapter"
gui_adap_oid = "gui_adapter"
#=======================================
#
# In the PYTHON subprocess:
frametable = {}
dicttable = {}
codetable = {}
tracebacktable = {}
def wrap_frame(frame):
fid = id(frame)
frametable[fid] = frame
return fid
def wrap_info(info):
"replace info[2], a traceback instance, by its ID"
if info is None:
return None
else:
traceback = info[2]
assert isinstance(traceback, types.TracebackType)
traceback_id = id(traceback)
tracebacktable[traceback_id] = traceback
modified_info = (info[0], info[1], traceback_id)
return modified_info
class GUIProxy:
def __init__(self, conn, gui_adap_oid):
self.conn = conn
self.oid = gui_adap_oid
def interaction(self, message, frame, info=None):
# calls rpc.SocketIO.remotecall() via run.MyHandler instance
# pass frame and traceback object IDs instead of the objects themselves
self.conn.remotecall(self.oid, "interaction",
(message, wrap_frame(frame), wrap_info(info)),
{})
class IdbAdapter:
def __init__(self, idb):
self.idb = idb
#----------called by an IdbProxy----------
def set_step(self):
self.idb.set_step()
def set_quit(self):
self.idb.set_quit()
def set_continue(self):
self.idb.set_continue()
def set_next(self, fid):
frame = frametable[fid]
self.idb.set_next(frame)
def set_return(self, fid):
frame = frametable[fid]
self.idb.set_return(frame)
def get_stack(self, fid, tbid):
##print >>sys.__stderr__, "get_stack(%r, %r)" % (fid, tbid)
frame = frametable[fid]
if tbid is None:
tb = None
else:
tb = tracebacktable[tbid]
stack, i = self.idb.get_stack(frame, tb)
##print >>sys.__stderr__, "get_stack() ->", stack
stack = [(wrap_frame(frame2), k) for frame2, k in stack]
##print >>sys.__stderr__, "get_stack() ->", stack
return stack, i
def run(self, cmd):
import __main__
self.idb.run(cmd, __main__.__dict__)
def set_break(self, filename, lineno):
msg = self.idb.set_break(filename, lineno)
return msg
def clear_break(self, filename, lineno):
msg = self.idb.clear_break(filename, lineno)
return msg
def clear_all_file_breaks(self, filename):
msg = self.idb.clear_all_file_breaks(filename)
return msg
#----------called by a FrameProxy----------
def frame_attr(self, fid, name):
frame = frametable[fid]
return getattr(frame, name)
def frame_globals(self, fid):
frame = frametable[fid]
dict = frame.f_globals
did = id(dict)
dicttable[did] = dict
return did
def frame_locals(self, fid):
frame = frametable[fid]
dict = frame.f_locals
did = id(dict)
dicttable[did] = dict
return did
def frame_code(self, fid):
frame = frametable[fid]
code = frame.f_code
cid = id(code)
codetable[cid] = code
return cid
#----------called by a CodeProxy----------
def code_name(self, cid):
code = codetable[cid]
return code.co_name
def code_filename(self, cid):
code = codetable[cid]
return code.co_filename
#----------called by a DictProxy----------
def dict_keys(self, did):
dict = dicttable[did]
return dict.keys()
def dict_item(self, did, key):
dict = dicttable[did]
value = dict[key]
value = repr(value)
return value
#----------end class IdbAdapter----------
def start_debugger(rpchandler, gui_adap_oid):
"""Start the debugger and its RPC link in the Python subprocess
Start the subprocess side of the split debugger and set up that side of the
RPC link by instantiating the GUIProxy, Idb debugger, and IdbAdapter
objects and linking them together. Register the IdbAdapter with the
RPCServer to handle RPC requests from the split debugger GUI via the
IdbProxy.
"""
gui_proxy = GUIProxy(rpchandler, gui_adap_oid)
idb = Debugger.Idb(gui_proxy)
idb_adap = IdbAdapter(idb)
rpchandler.register(idb_adap_oid, idb_adap)
return idb_adap_oid
#=======================================
#
# In the IDLE process:
class FrameProxy:
def __init__(self, conn, fid):
self._conn = conn
self._fid = fid
self._oid = "idb_adapter"
self._dictcache = {}
def __getattr__(self, name):
if name[:1] == "_":
raise AttributeError, name
if name == "f_code":
return self._get_f_code()
if name == "f_globals":
return self._get_f_globals()
if name == "f_locals":
return self._get_f_locals()
return self._conn.remotecall(self._oid, "frame_attr",
(self._fid, name), {})
def _get_f_code(self):
cid = self._conn.remotecall(self._oid, "frame_code", (self._fid,), {})
return CodeProxy(self._conn, self._oid, cid)
def _get_f_globals(self):
did = self._conn.remotecall(self._oid, "frame_globals",
(self._fid,), {})
return self._get_dict_proxy(did)
def _get_f_locals(self):
did = self._conn.remotecall(self._oid, "frame_locals",
(self._fid,), {})
return self._get_dict_proxy(did)
def _get_dict_proxy(self, did):
if did in self._dictcache:
return self._dictcache[did]
dp = DictProxy(self._conn, self._oid, did)
self._dictcache[did] = dp
return dp
class CodeProxy:
def __init__(self, conn, oid, cid):
self._conn = conn
self._oid = oid
self._cid = cid
def __getattr__(self, name):
if name == "co_name":
return self._conn.remotecall(self._oid, "code_name",
(self._cid,), {})
if name == "co_filename":
return self._conn.remotecall(self._oid, "code_filename",
(self._cid,), {})
class DictProxy:
def __init__(self, conn, oid, did):
self._conn = conn
self._oid = oid
self._did = did
def keys(self):
return self._conn.remotecall(self._oid, "dict_keys", (self._did,), {})
def __getitem__(self, key):
return self._conn.remotecall(self._oid, "dict_item",
(self._did, key), {})
def __getattr__(self, name):
##print >>sys.__stderr__, "failed DictProxy.__getattr__:", name
raise AttributeError, name
class GUIAdapter:
def __init__(self, conn, gui):
self.conn = conn
self.gui = gui
def interaction(self, message, fid, modified_info):
##print "interaction: (%s, %s, %s)" % (message, fid, modified_info)
frame = FrameProxy(self.conn, fid)
self.gui.interaction(message, frame, modified_info)
class IdbProxy:
def __init__(self, conn, shell, oid):
self.oid = oid
self.conn = conn
self.shell = shell
def call(self, methodname, *args, **kwargs):
##print "**IdbProxy.call %s %s %s" % (methodname, args, kwargs)
value = self.conn.remotecall(self.oid, methodname, args, kwargs)
##print "**IdbProxy.call %s returns %r" % (methodname, value)
return value
def run(self, cmd, locals):
# Ignores locals on purpose!
seq = self.conn.asyncqueue(self.oid, "run", (cmd,), {})
self.shell.interp.active_seq = seq
def get_stack(self, frame, tbid):
# passing frame and traceback IDs, not the objects themselves
stack, i = self.call("get_stack", frame._fid, tbid)
stack = [(FrameProxy(self.conn, fid), k) for fid, k in stack]
return stack, i
def set_continue(self):
self.call("set_continue")
def set_step(self):
self.call("set_step")
def set_next(self, frame):
self.call("set_next", frame._fid)
def set_return(self, frame):
self.call("set_return", frame._fid)
def set_quit(self):
self.call("set_quit")
def set_break(self, filename, lineno):
msg = self.call("set_break", filename, lineno)
return msg
def clear_break(self, filename, lineno):
msg = self.call("clear_break", filename, lineno)
return msg
def clear_all_file_breaks(self, filename):
msg = self.call("clear_all_file_breaks", filename)
return msg
def start_remote_debugger(rpcclt, pyshell):
"""Start the subprocess debugger, initialize the debugger GUI and RPC link
Request the RPCServer start the Python subprocess debugger and link. Set
up the Idle side of the split debugger by instantiating the IdbProxy,
debugger GUI, and debugger GUIAdapter objects and linking them together.
Register the GUIAdapter with the RPCClient to handle debugger GUI
interaction requests coming from the subprocess debugger via the GUIProxy.
The IdbAdapter will pass execution and environment requests coming from the
Idle debugger GUI to the subprocess debugger via the IdbProxy.
"""
global idb_adap_oid
idb_adap_oid = rpcclt.remotecall("exec", "start_the_debugger",\
(gui_adap_oid,), {})
idb_proxy = IdbProxy(rpcclt, pyshell, idb_adap_oid)
gui = Debugger.Debugger(pyshell, idb_proxy)
gui_adap = GUIAdapter(rpcclt, gui)
rpcclt.register(gui_adap_oid, gui_adap)
return gui
def close_remote_debugger(rpcclt):
"""Shut down subprocess debugger and Idle side of debugger RPC link
Request that the RPCServer shut down the subprocess debugger and link.
Unregister the GUIAdapter, which will cause a GC on the Idle process
debugger and RPC link objects. (The second reference to the debugger GUI
is deleted in PyShell.close_remote_debugger().)
"""
close_subprocess_debugger(rpcclt)
rpcclt.unregister(gui_adap_oid)
def close_subprocess_debugger(rpcclt):
rpcclt.remotecall("exec", "stop_the_debugger", (idb_adap_oid,), {})
def restart_subprocess_debugger(rpcclt):
idb_adap_oid_ret = rpcclt.remotecall("exec", "start_the_debugger",\
(gui_adap_oid,), {})
assert idb_adap_oid_ret == idb_adap_oid, 'Idb restarted with different oid'
|
|
# -*- coding: utf-8 -*-
"""
Newspaper treats urls for news articles as critical components.
Hence, we have an entire module dedicated to them.
"""
__title__ = 'newspaper'
__author__ = 'Lucas Ou-Yang'
__license__ = 'MIT'
__copyright__ = 'Copyright 2014, Lucas Ou-Yang'
import logging
import re
from urllib.parse import parse_qs, urljoin, urlparse, urlsplit, urlunsplit
from tldextract import tldextract
log = logging.getLogger(__name__)
MAX_FILE_MEMO = 20000
DATE_REGEX = r'([\./\-_]{0,1}(19|20)\d{2})[\./\-_]{0,1}(([0-3]{0,1}[0-9][\./\-_])|(\w{3,5}[\./\-_]))([0-3]{0,1}[0-9][\./\-]{0,1})?'
ALLOWED_TYPES = ['html', 'htm', 'md', 'rst', 'aspx', 'jsp', 'rhtml', 'cgi',
'xhtml', 'jhtml', 'asp']
GOOD_PATHS = ['story', 'article', 'feature', 'featured', 'slides',
'slideshow', 'gallery', 'news', 'video', 'media',
'v', 'radio', 'press']
BAD_CHUNKS = ['careers', 'contact', 'about', 'faq', 'terms', 'privacy',
'advert', 'preferences', 'feedback', 'info', 'browse', 'howto',
'account', 'subscribe', 'donate', 'shop', 'admin']
BAD_DOMAINS = ['amazon', 'doubleclick', 'twitter']
def remove_args(url, keep_params=(), frags=False):
"""
Remove all param arguments from a url.
"""
parsed = urlsplit(url)
filtered_query= '&'.join(
qry_item for qry_item in parsed.query.split('&')
if qry_item.startswith(keep_params)
)
if frags:
frag = parsed[4:]
else:
frag = ('',)
return urlunsplit(parsed[:3] + (filtered_query,) + frag)
def redirect_back(url, source_domain):
"""
Some sites like Pinterest have api's that cause news
args to direct to their site with the real news url as a
GET param. This method catches that and returns our param.
"""
parse_data = urlparse(url)
domain = parse_data.netloc
query = parse_data.query
# If our url is even from a remotely similar domain or
# sub domain, we don't need to redirect.
if source_domain in domain or domain in source_domain:
return url
query_item = parse_qs(query)
if query_item.get('url'):
# log.debug('caught redirect %s into %s' % (url, query_item['url'][0]))
return query_item['url'][0]
return url
def prepare_url(url, source_url=None):
"""
Operations that purify a url, removes arguments,
redirects, and merges relatives with absolutes.
"""
try:
if source_url is not None:
source_domain = urlparse(source_url).netloc
proper_url = urljoin(source_url, url)
proper_url = redirect_back(proper_url, source_domain)
# proper_url = remove_args(proper_url)
else:
# proper_url = remove_args(url)
proper_url = url
except ValueError as e:
log.critical('url %s failed on err %s' % (url, str(e)))
proper_url = ''
return proper_url
def valid_url(url, verbose=False, test=False):
"""
Is this URL a valid news-article url?
Perform a regex check on an absolute url.
First, perform a few basic checks like making sure the format of the url
is right, (scheme, domain, tld).
Second, make sure that the url isn't some static resource, check the
file type.
Then, search of a YYYY/MM/DD pattern in the url. News sites
love to use this pattern, this is a very safe bet.
Separators can be [\.-/_]. Years can be 2 or 4 digits, must
have proper digits 1900-2099. Months and days can be
ambiguous 2 digit numbers, one is even optional, some sites are
liberal with their formatting also matches snippets of GET
queries with keywords inside them. ex: asdf.php?topic_id=blahlbah
We permit alphanumeric, _ and -.
Our next check makes sure that a keyword is within one of the
separators in a url (subdomain or early path separator).
cnn.com/story/blah-blah-blah would pass due to "story".
We filter out articles in this stage by aggressively checking to
see if any resemblance of the source& domain's name or tld is
present within the article title. If it is, that's bad. It must
be a company link, like 'cnn is hiring new interns'.
We also filter out articles with a subdomain or first degree path
on a registered bad keyword.
"""
# If we are testing this method in the testing suite, we actually
# need to preprocess the url like we do in the article's constructor!
if test:
url = prepare_url(url)
# 11 chars is shortest valid url length, eg: http://x.co
if url is None or len(url) < 11:
if verbose: print('\t%s rejected because len of url is less than 11' % url)
return False
r1 = ('mailto:' in url) # TODO not sure if these rules are redundant
r2 = ('http://' not in url) and ('https://' not in url)
if r1 or r2:
if verbose: print('\t%s rejected because len of url structure' % url)
return False
path = urlparse(url).path
# input url is not in valid form (scheme, netloc, tld)
if not path.startswith('/'):
return False
# the '/' which may exist at the end of the url provides us no information
if path.endswith('/'):
path = path[:-1]
# '/story/cnn/blahblah/index.html' --> ['story', 'cnn', 'blahblah', 'index.html']
path_chunks = [x for x in path.split('/') if len(x) > 0]
# siphon out the file type. eg: .html, .htm, .md
if len(path_chunks) > 0:
file_type = url_to_filetype(url)
# if the file type is a media type, reject instantly
if file_type and file_type not in ALLOWED_TYPES:
if verbose: print('\t%s rejected due to bad filetype' % url)
return False
last_chunk = path_chunks[-1].split('.')
# the file type is not of use to use anymore, remove from url
if len(last_chunk) > 1:
path_chunks[-1] = last_chunk[-2]
# Index gives us no information
if 'index' in path_chunks:
path_chunks.remove('index')
# extract the tld (top level domain)
tld_dat = tldextract.extract(url)
subd = tld_dat.subdomain
tld = tld_dat.domain.lower()
url_slug = path_chunks[-1] if path_chunks else ''
if tld in BAD_DOMAINS:
if verbose: print('%s caught for a bad tld' % url)
return False
if len(path_chunks) == 0:
dash_count, underscore_count = 0, 0
else:
dash_count = url_slug.count('-')
underscore_count = url_slug.count('_')
# If the url has a news slug title
if url_slug and (dash_count > 4 or underscore_count > 4):
if dash_count >= underscore_count:
if tld not in [ x.lower() for x in url_slug.split('-') ]:
if verbose: print('%s verified for being a slug' % url)
return True
if underscore_count > dash_count:
if tld not in [ x.lower() for x in url_slug.split('_') ]:
if verbose: print('%s verified for being a slug' % url)
return True
# There must be at least 2 subpaths
if len(path_chunks) <= 1:
if verbose: print('%s caught for path chunks too small' % url)
return False
# Check for subdomain & path red flags
# Eg: http://cnn.com/careers.html or careers.cnn.com --> BAD
for b in BAD_CHUNKS:
if b in path_chunks or b == subd:
if verbose: print('%s caught for bad chunks' % url)
return False
match_date = re.search(DATE_REGEX, url)
# if we caught the verified date above, it's an article
if match_date is not None:
if verbose: print('%s verified for date' % url)
return True
for GOOD in GOOD_PATHS:
if GOOD.lower() in [p.lower() for p in path_chunks]:
if verbose: print('%s verified for good path' % url)
return True
if verbose: print('%s caught for default false' % url)
return False
def url_to_filetype(abs_url):
"""
Input a URL and output the filetype of the file
specified by the url. Returns None for no filetype.
'http://blahblah/images/car.jpg' -> 'jpg'
'http://yahoo.com' -> None
"""
path = urlparse(abs_url).path
# Eliminate the trailing '/', we are extracting the file
if path.endswith('/'):
path = path[:-1]
path_chunks = [x for x in path.split('/') if len(x) > 0]
last_chunk = path_chunks[-1].split('.') # last chunk == file usually
file_type = last_chunk[-1] if len(last_chunk) >= 2 else None
return file_type or None
def get_domain(abs_url, **kwargs):
"""
returns a url's domain, this method exists to
encapsulate all url code into this file
"""
if abs_url is None:
return None
return urlparse(abs_url, **kwargs).netloc
def get_scheme(abs_url, **kwargs):
"""
"""
if abs_url is None:
return None
return urlparse(abs_url, **kwargs).scheme
def get_path(abs_url, **kwargs):
"""
"""
if abs_url is None:
return None
return urlparse(abs_url, **kwargs).path
def is_abs_url(url):
"""
this regex was brought to you by django!
"""
regex = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|' # ...or ipv4
r'\[?[A-F0-9]*:[A-F0-9:]+\]?)' # ...or ipv6
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
c_regex = re.compile(regex)
return (c_regex.search(url) != None)
|
|
'''
Created on Jan 25, 2011
@author: Mark V Systems Limited
(c) Copyright 2011 Mark V Systems Limited, All rights reserved.
'''
from tkinter import *
try:
from tkinter.ttk import *
from tkinter.ttk import Combobox as _Combobox
except ImportError:
from ttk import *
_Combobox = Combobox
TOPBORDER = 1
LEFTBORDER = 2
RIGHTBORDER = 3
BOTTOMBORDER = 4
CENTERCELL = 5
borderImage = None
class gridBorder(Separator):
def __init__(self, master, x, y, border, columnspan=None, rowspan=None):
Separator.__init__(self, master=master)
if border in (TOPBORDER, BOTTOMBORDER):
x = x * 2 - 1
if columnspan: columnspan = columnspan * 2 + 1
else: columnspan = 3
self.config(orient="horizontal")
sticky = (E,W)
if border in (LEFTBORDER, RIGHTBORDER):
y = y * 2 - 1
if rowspan: rowspan = rowspan * 2 + 1
else: rowspan = 3
self.config(orient="vertical")
sticky = (N,S)
if border == TOPBORDER:
rowspan = None
y = y * 2 - 1
#master.columnconfigure(x, weight=1, uniform='stretchX')
master.rowconfigure(y, weight=0, uniform='noStretch')
elif border == BOTTOMBORDER:
if rowspan:
y = (y + rowspan - 1) * 2 + 1
rowspan = None
else:
y = y * 2 + 1
#master.columnconfigure(x, weight=1, uniform='stretchX')
master.rowconfigure(y, weight=0, uniform='noStretch')
elif border == LEFTBORDER:
columnspan = None
x = x * 2 - 1
master.columnconfigure(x, weight=0, uniform='noStretch')
#master.rowconfigure(y, weight=1, uniform='stretchY')
elif border == RIGHTBORDER:
if columnspan:
x = (x + columnspan - 1) * 2 + 1
columnspan = None
else:
x = x * 2 + 1
master.columnconfigure(x, weight=0, uniform='noStretch')
#master.rowconfigure(y, weight=1, uniform='stretchY')
if columnspan and columnspan > 1 and rowspan and rowspan > 1:
self.grid(column=x, row=y, sticky=sticky, columnspan=columnspan, rowspan=rowspan)
elif columnspan and columnspan > 1:
self.grid(column=x, row=y, sticky=sticky, columnspan=columnspan)
elif rowspan and rowspan > 1:
self.grid(column=x, row=y, sticky=sticky, rowspan=rowspan)
else:
self.grid(column=x, row=y, sticky=sticky)
self.x = x
self.y = y
self.columnspan = columnspan
self.rowspan = rowspan
# copy bindings
try:
contextMenuBinding = master.bind(master.contextMenuClick)
if contextMenuBinding:
self.bind(master.contextMenuClick, contextMenuBinding)
except AttributeError:
pass
#if isinstance(master.master.master, scrolledHeaderedFrame):
# self.bind("<Configure>", master.master.master._configure_cell)
class gridSpacer(Frame):
def __init__(self, master, x, y, where):
Frame.__init__(self, master=master)
if where == CENTERCELL:
offset = 0
elif where in (TOPBORDER, LEFTBORDER):
offset = -1
else:
offset = 1
x = x * 2 + offset
y = y * 2 + offset
self.grid(column=x, row=y) # same dimensions as separator in col/row headers
self.x = x
self.y = y
self.config(width=2,height=2) # need same default as Spacer, which is 2 pixels (shadow pixel and highlight pixel)
if where in (TOPBORDER, BOTTOMBORDER):
master.rowconfigure(y, weight=0, uniform='noStretch')
elif where in (LEFTBORDER, RIGHTBORDER):
master.columnconfigure(x, weight=0, uniform='noStretch')
# copy bindings
try:
contextMenuBinding = master.bind(master.contextMenuClick)
if contextMenuBinding:
self.bind(master.contextMenuClick, contextMenuBinding)
except AttributeError:
pass
#if isinstance(master.master.master, scrolledHeaderedFrame):
# self.bind("<Configure>", master.master.master._configure_cell)
class gridHdr(Label):
def __init__(self, master, x, y, text, columnspan=None, rowspan=None, anchor='center', padding=None,
wraplength=None, width=None, minwidth=None, stretchCols=True, stretchRows=True,
objectId=None, onClick=None):
Label.__init__(self, master=master)
if isinstance(master.master.master, scrolledHeaderedFrame):
x = x * 2
y = y * 2
if columnspan: columnspan = columnspan * 2 - 1
if rowspan: rowspan = rowspan * 2 - 1
# #master.columnconfigure(x, weight=1, uniform='stretchX')
#master.rowconfigure(y, weight=1, uniform='stretch')
self.config(text=text if text is not None else "",
#relief="solid", use border instead to effect row-col spanned cells properly
#bg="#ffffff000", fg="#000000fff",
#readonlybackground="#ddddddddd",
#background="#888000000",
width=width,
anchor=anchor)
if padding:
self.config(padding=padding)
if wraplength:
self.config(wraplength=wraplength)
if columnspan and columnspan > 1 and rowspan and rowspan > 1:
self.grid(column=x, row=y, sticky=(E,W,N,S), columnspan=columnspan, rowspan=rowspan)
elif columnspan and columnspan > 1:
self.grid(column=x, row=y, sticky=(E,W,N,S), columnspan=columnspan)
elif rowspan and rowspan > 1:
self.grid(column=x, row=y, sticky=(E,W,N,S), rowspan=rowspan)
else:
self.grid(column=x, row=y, sticky=(E,W,N,S))
self.x = x
self.y = y
self.columnspan = columnspan
self.rowspan = rowspan
self.objectId = objectId
if minwidth:
master.columnconfigure(x, minsize=minwidth)
if stretchCols:
master.columnconfigure(x, weight=1)
else:
master.columnconfigure(x, weight=0, uniform='noStretch')
if stretchRows:
master.rowconfigure(y, weight=1)
else:
master.rowconfigure(y, weight=0, uniform='noStretch')
# copy bindings
try:
contextMenuBinding = master.bind(master.contextMenuClick)
if contextMenuBinding:
self.bind(master.contextMenuClick, contextMenuBinding)
except AttributeError:
pass
if isinstance(master.master.master, scrolledHeaderedFrame):
self.bind("<Configure>", master.master.master._configure_cell)
if onClick:
self.bind("<1>", onClick)
class gridCell(Entry):
def __init__(self, master, x, y, value="", width=None, justify=None, objectId=None, onClick=None):
Entry.__init__(self, master=master)
self.valueVar = StringVar()
self.valueVar.trace('w', self.valueChanged)
self.config(textvariable=self.valueVar,
#relief="ridge",
#bg="#ff8ff8ff8", fg="#000000000",
justify=justify,
width=width,
)
if isinstance(master.master.master, scrolledHeaderedFrame):
x = x * 2
y = y * 2
self.grid(column=x, row=y, sticky=(N,S,E,W))
self.x = x
self.y = y
if value is not None:
self.valueVar.set(value)
self.objectId = objectId
# copy bindings
try:
contextMenuBinding = master.bind(master.contextMenuClick)
if contextMenuBinding:
self.bind(master.contextMenuClick, contextMenuBinding)
except AttributeError:
pass
if isinstance(master.master.master, scrolledHeaderedFrame):
self.bind("<Configure>", master.master.master._configure_cell)
if onClick:
self.bind("<1>", onClick)
self.isChanged = False
@property
def value(self):
return self.valueVar.get()
def setValue(self, value):
return self.valueVar.set(value)
def valueChanged(self, *args):
self.isChanged = True
class gridCombobox(_Combobox):
def __init__(self, master, x, y, value="", values=(), width=None, objectId=None, columnspan=None, selectindex=None, comboboxselected=None, state=None, padx=None, attr=None):
_Combobox.__init__(self, master=master)
self.attr = attr
self.valueVar = StringVar()
self.valueVar.trace('w', self.valueChanged)
self.config(textvariable=self.valueVar,
background="#ff8ff8ff8", foreground="#000000000",
# justify='center'
width=width,
state=state
)
self["values"] = values
if isinstance(master.master.master, scrolledHeaderedFrame):
x = x * 2
y = y * 2
if columnspan: columnspan = columnspan * 2 - 1
if columnspan and columnspan > 1:
self.grid(column=x, row=y, sticky=(E,W), columnspan=columnspan, padx=padx)
else:
self.grid(column=x, row=y, sticky=(E,W), padx=padx)
if selectindex is not None:
self.valueVar.set(values[selectindex])
elif value:
self.valueVar.set(value)
elif attr:
try:
options = master.master.options
if attr in options:
self.valueVar.set( options[attr] or "" )
except AttributeError:
pass
self.objectId = objectId
# copy bindings
try:
contextMenuBinding = master.bind(master.contextMenuClick)
if contextMenuBinding:
self.bind(master.contextMenuClick, contextMenuBinding)
except AttributeError:
pass
if comboboxselected:
self.bind("<<ComboboxSelected>>", comboboxselected)
self.isChanged = False
@property
def value(self):
return self.valueVar.get()
@property
def valueIndex(self):
value = self.valueVar.get()
values = self["values"]
if value in values:
return values.index(value)
return -1
def valueChanged(self, *args):
self.isChanged = True
class label(Label):
def __init__(self, master, x, y, text):
Label.__init__(self, master=master, text=text)
#self.config(justify='left')
self.grid(column=x, row=y, sticky=W, padx=8)
class checkbox(Checkbutton):
def __init__(self, master, x, y, text, attr=None, columnspan=None, onclick=None):
self.attr = attr
self.onclick = onclick
self.valueVar = StringVar()
self.valueVar.trace('w', self.valueChanged)
Checkbutton.__init__(self, master=master, text=text, variable=self.valueVar)
self.grid(column=x, row=y, sticky=W, padx=24)
if columnspan:
self.grid(columnspan=columnspan)
try:
options = master.master.options
if attr in options:
self.valueVar.set( options[attr] )
except AttributeError:
pass
self.isChanged = False
@property
def value(self):
if self.valueVar.get() == "1":
return True
else:
return False
def valueChanged(self, *args):
self.isChanged = True
if self.onclick is not None:
self.onclick(self)
class radiobutton(Radiobutton):
def __init__(self, master, x, y, text, value, attr=None, valueVar=None):
self.attr = attr
self.valueVar = valueVar if valueVar else StringVar()
Radiobutton.__init__(self, master=master, text=text, variable=self.valueVar, value=value)
self.grid(column=x, row=y, sticky=W, padx=24)
try:
options = master.master.options
if attr in options:
self.valueVar.set( options[attr] )
except AttributeError:
pass
@property
def value(self):
return self.valueVar.get()
class scrolledFrame(Frame):
def __init__(self, parent, *args, **kw):
Frame.__init__(self, parent, *args, **kw)
vscrollbar = Scrollbar(self, orient=VERTICAL)
hscrollbar = Scrollbar(self, orient=HORIZONTAL)
self.canvas = canvas = Canvas(self, bd=0, highlightthickness=0,
yscrollcommand=vscrollbar.set,
xscrollcommand=hscrollbar.set)
self.grid(row=0, column=0, sticky=(N,S,E,W))
canvas.grid(row=0, column=0, sticky=(N,S,E,W))
vscrollbar.grid(row=0, column=1, sticky=(N,S))
hscrollbar.grid(row=1, column=0, sticky=(E,W))
vscrollbar.config(command=canvas.yview)
hscrollbar.config(command=canvas.xview)
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
canvas.columnconfigure(0, weight=1)
canvas.rowconfigure(0, weight=1)
# reset the view
canvas.xview_moveto(0)
canvas.yview_moveto(0)
# create a frame inside the canvas which will be scrolled with it
self.interior = interior = Frame(canvas)
self.interior_id = canvas.create_window(0, 0, window=interior, anchor=NW)
interior.bind('<Configure>', self._configure_interior)
canvas.bind('<Configure>', self._configure_canvas)
def _configure_interior(self,event):
# update the scrollbars to match the size of the inner frame
interiorW = self.interior.winfo_reqwidth()
interiorH = self.interior.winfo_reqheight()
self.canvas.config(scrollregion=(0,0,interiorW,interiorH))
''' needed if scrolling only in 1 direction (for the axis that doesn't have scrollbar)
if interiorW != self.canvas.winfo_width():
# update the canvas's width to fit the inner frame
self.canvas.config(width=interiorW)
if interiorH != self.canvas.winfo_height():
self.canvas.config(height=interiorH)
'''
def _configure_canvas(self, event):
''' needed if only scrolling in one direction
canvasW = self.canvas.winfo_width()
if self.interior.winfo_reqwidth() != canvasW:
# update the inner frame's width to fill the canvas
self.canvas.itemconfigure(self.interior_id, width=canvasW)
canvasH = self.canvas.winfo_height()
if self.interior.winfo_reqheight() != canvasH:
self.canvas.itemconfigure(self.interior_id, height=canvasH)
'''
def clearGrid(self):
x,y = self.size()
for widget in self.winfo_children():
widget.destroy()
if x > 1 and y > 1: # not gridTblHdr
for x in range(x): self.tk.call( ('grid', 'columnconfigure', self._w, x, '-minsize', 0 ) )
for y in range(y): self.tk.call( ('grid', 'rowconfigure', self._w, y, '-minsize', 0 ) )
self.config(width=1,height=1)
self.update()
self.colsConfigured = False
class scrolledHeaderedFrame(Frame):
def __init__(self, parent, *args, **kw):
Frame.__init__(self, parent, *args, **kw)
self.colsConfigured = False
self.bodyCellsConfigured = False
self.blockConfigureCell = False
self.hdrVscrollbar = Scrollbar(self, orient=VERTICAL)
self.hdrHscrollbar = Scrollbar(self, orient=HORIZONTAL)
self.bodyVscrollbar = Scrollbar(self, orient=VERTICAL)
self.bodyHscrollbar = Scrollbar(self, orient=HORIZONTAL)
self.colHdrCanvas = Canvas(self, bd=0, highlightthickness=0,
yscrollcommand=self.hdrVscrollbar.set)
self.rowHdrCanvas = Canvas(self, bd=0, highlightthickness=0,
xscrollcommand=self.hdrHscrollbar.set)
self.bodyCanvas = Canvas(self, bd=0, highlightthickness=0,
yscrollcommand=self.bodyVscrollbar.set,
xscrollcommand=self.bodyHscrollbar.set)
self.grid(row=0, column=0, sticky=(N,S,E,W))
self.tblHdrInterior = Frame(self)
self.tblHdrInterior.grid(row=1, column=0, sticky=(N,S,E,W))
self.colHdrCanvas.grid(row=1, column=1, sticky=(N,W,E))
self.rowHdrCanvas.grid(row=2, column=0, sticky=(N,W,S))
self.bodyCanvas.grid(row=2, column=1, sticky=(N,S,E,W))
self.hdrVscrollbar.grid(row=1, column=2, sticky=(N,S))
self.hdrHscrollbar.grid(row=3, column=0, sticky=(E,W))
self.bodyVscrollbar.grid(row=2, column=2, sticky=(N,S))
self.bodyHscrollbar.grid(row=3, column=1, sticky=(E,W))
self.hdrVscrollbar.config(command=self.colHdrCanvas.yview)
self.hdrHscrollbar.config(command=self.rowHdrCanvas.xview)
self.bodyVscrollbar.config(command=self._vscroll_body)
self.bodyHscrollbar.config(command=self._hscroll_body)
self.columnconfigure(1, weight=1)
self.rowconfigure(2, weight=1)
'''
self.rowHdrCanvas.columnconfigure(1, weight=1)
self.colHdrCanvas.rowconfigure(2, weight=1)
self.bodyCanvas.columnconfigure(1, weight=1)
self.bodyCanvas.rowconfigure(2, weight=1)
'''
# reset the view
self.colHdrCanvas.xview_moveto(0)
self.colHdrCanvas.yview_moveto(0)
self.rowHdrCanvas.xview_moveto(0)
self.rowHdrCanvas.yview_moveto(0)
self.bodyCanvas.xview_moveto(0)
self.bodyCanvas.yview_moveto(0)
# create a frame inside the canvas which will be scrolled with it
self.colHdrInterior = Frame(self.colHdrCanvas)
self.rowHdrInterior = Frame(self.rowHdrCanvas)
self.bodyInterior = Frame(self.bodyCanvas)
self.colHdrInterior_id = self.colHdrCanvas.create_window(0, 0, window=self.colHdrInterior, anchor=NW)
self.rowHdrInterior_id = self.rowHdrCanvas.create_window(0, 0, window=self.rowHdrInterior, anchor=NW)
self.bodyInterior_id = self.bodyCanvas.create_window(0, 0, window=self.bodyInterior, anchor=NW)
self.colHdrInterior.bind('<Configure>', self._configure_colHdrInterior)
self.rowHdrInterior.bind('<Configure>', self._configure_rowHdrInterior)
self.bodyInterior.bind('<Configure>', self._configure_bodyInterior)
self.colHdrCanvas.bind('<Configure>', self._configure_colHdrCanvas)
self.rowHdrCanvas.bind('<Configure>', self._configure_rowHdrCanvas)
self.bodyCanvas.bind('<Configure>', self._configure_bodyCanvas)
'''
self.colHdrInterior.bind('<Configure>', self._configure_interiors)
self.rowHdrInterior.bind('<Configure>', self._configure_rowHdrInterior)
self.bodyInterior.bind('<Configure>', self._configure_rowHdrInterior)
self.colHdrCanvas.bind('<Configure>', self._configure_canvases)
self.rowHdrCanvas.bind('<Configure>', self._configure_canvases)
#self.bodyCanvas.bind('<Configure>', self._configure_canvases)
'''
# on linux Button-4, Button-5 events
#self.rowHdrCanvas.bind("<MouseWheel>", self._mousewheel)
#self.bodyCanvas.bind("<MouseWheel>", self._mousewheel)
def _vscroll_body(self, *args):
self.rowHdrCanvas.yview(*args)
self.bodyCanvas.yview(*args)
def _hscroll_body(self, *args):
self.colHdrCanvas.xview(*args)
self.bodyCanvas.xview(*args)
def _mousewheel(self, event):
# on linux: if (event.num == 4): delta = -1 elif (event.num == 5): delta = 1 else: delta = event.delta
self.rowHdrCanvas.yview("scroll", event.delta, "units")
self.bodyCanvas.yview("scroll", event.delta, "units")
return "break" #don't do default scrolling
def clearGrid(self):
self.colHdrCanvas.xview_moveto(0)
self.colHdrCanvas.yview_moveto(0)
self.rowHdrCanvas.xview_moveto(0)
self.rowHdrCanvas.yview_moveto(0)
self.bodyCanvas.xview_moveto(0)
self.bodyCanvas.yview_moveto(0)
for grid in (self.tblHdrInterior, self.colHdrInterior, self.rowHdrInterior, self.bodyInterior):
x,y = grid.size()
for widget in grid.winfo_children():
widget.destroy()
if x > 1 and y > 1: # not gridTblHdr
for x in range(x): grid.tk.call( ('grid', 'columnconfigure', grid._w, x, '-minsize', 0 ) )
for y in range(y): grid.tk.call( ('grid', 'rowconfigure', grid._w, y, '-minsize', 0 ) )
grid.config(width=1,height=1)
grid.master.config(width=1,height=1,scrollregion=(0,0,1,1))
self.update()
self.colsConfigured = False
def _configure_colHdrInterior(self,event):
#print("configure_colHdrInterior")
# seems to not help:
#if not self.colsConfigured:
# self.conformHdrsToBody()
interiorW = self.colHdrInterior.winfo_reqwidth()
interiorH = self.colHdrInterior.winfo_reqheight()
raiseHeight = interiorH != self.colHdrCanvas.winfo_height()
# tkinter bug, mac won't display col headers without setting height here and below
# 1 pixel higher, not needed on PC/linux
self.colHdrCanvas.config(height=interiorH, scrollregion=(0,0,interiorW,interiorH))
if raiseHeight: # update the canvas's width to fit the inner frame
self.colHdrCanvas.config(height=interiorH + 1)
#if interiorH != self.tblHdrInterior.winfo_height():
# self.tblHdrInterior.tk.call( ('grid', 'rowconfigure', self.tblHdrInterior._w, 1, '-minsize', interiorH ) )
def _configure_rowHdrInterior(self,event):
#print("configure_rowHdrInterior")
interiorW = self.rowHdrInterior.winfo_reqwidth()
interiorH = self.rowHdrInterior.winfo_reqheight()
# width doesn't set wide enough when first expanding, force by setting wider before scroll region
widenWidth = interiorW != self.rowHdrCanvas.winfo_width() and interiorW != 1 # 1 means nothing set yet
# tkinter bug? right side of row headers is clipped without setting it 1 pixel wider below
# and then back on next configure event. Would like to remove first config of width.
# also: mac won't display at all without this trick
self.rowHdrCanvas.config(width=interiorW, scrollregion=(0,0,interiorW,interiorH))
if widenWidth: # update the canvas's width to fit the inner frame
self.rowHdrCanvas.config(width=interiorW + 1) # remove if tkinter issue gets solved
#if interiorW != self.tblHdrInterior.winfo_width() or \
# interiorW != self.tblHdrInterior.tk.call( ('grid', 'columnconfigure', self.tblHdrInterior._w, 1, '-minsize' ) ):
# self.tblHdrInterior.tk.call( ('grid', 'columnconfigure', self.tblHdrInterior._w, 1, '-minsize', interiorW ) )
def _configure_bodyInterior(self,event):
#print("configure_bodyInterior")
# seems to not help:
#if not self.bodyCellsConfigured:
# self.conformBodyCellsToHeader()
interiorW = self.bodyInterior.winfo_reqwidth()
interiorH = self.bodyInterior.winfo_reqheight()
self.bodyCanvas.config(scrollregion=(0,0,interiorW,interiorH))
def _configure_colHdrCanvas(self, event):
#print("configure_colHdrCanvas")
canvasH = self.colHdrCanvas.winfo_height()
if self.colHdrInterior.winfo_reqheight() != canvasH:
self.colHdrCanvas.itemconfigure(self.colHdrInterior_id, height=canvasH)
def _configure_rowHdrCanvas(self, event):
canvasW = self.rowHdrCanvas.winfo_width()
#print("configure_rowHdrCanvas width {0}".format(canvasW))
if self.rowHdrInterior.winfo_reqwidth() != canvasW:
self.rowHdrCanvas.itemconfigure(self.rowHdrInterior_id, width=canvasW)
# set table header wrap length
if hasattr(self.tblHdrInterior, "tblHdrLabel") and canvasW > self.tblHdrInterior.tblHdrWraplength:
self.tblHdrInterior.tblHdrWraplength = canvasW - 4
self.tblHdrInterior.tblHdrLabel.config(wraplength=canvasW - 4)
def _configure_bodyCanvas(self, event):
#print("configure_bodyCanvas")
#canvasW = self.rowHdrCanvas.winfo_width()
#if self.rowHdrInterior.winfo_reqwidth() != canvasW:
# self.rowHdrCanvas.itemconfigure(self.rowHdrInterior_id, width=canvasW)
pass
def _configure_interiors(self,event):
#print("configure_interiors")
bodyW = self.bodyInterior.winfo_reqwidth()
bodyH = self.bodyInterior.winfo_reqheight()
colHdrW = self.colHdrInterior.winfo_reqwidth()
colHdrH = self.colHdrInterior.winfo_reqheight()
rowHdrW = self.rowHdrInterior.winfo_reqwidth()
rowHdrH = self.rowHdrInterior.winfo_reqheight()
bodyW = max(bodyW,colHdrW)
bodyH = max(bodyH,rowHdrH)
self.bodyCanvas.config(scrollregion=(0,0,bodyW,bodyH))
self.colHdrCanvas.config(scrollregion=(0,0,bodyW,colHdrH))
self.rowHdrCanvas.config(scrollregion=(0,0,rowHdrW,bodyH))
def _configure_canvases(self, event):
#print("configure_canvases")
canvasH = self.colHdrCanvas.winfo_height()
if self.colHdrInterior.winfo_reqheight() != canvasH:
self.colHdrCanvas.itemconfigure(self.colHdrInterior_id, height=canvasH)
canvasW = self.rowHdrCanvas.winfo_width()
if self.rowHdrInterior.winfo_reqwidth() != canvasW:
self.rowHdrCanvas.itemconfigure(self.rowHdrInterior_id, width=canvasW)
def _configure_cell(self, event):
#if self.blockConfigureCell:
# return
self.blockConfigureCell = True
cell = event.widget
x = cell.x
y = cell.y
cellW = cell.winfo_reqwidth()
cellH = cell.winfo_reqheight()
isColHdrCell = event.widget.master == self.colHdrInterior
isRowHdrCell = event.widget.master == self.rowHdrInterior
isBodyCell = event.widget.master == self.bodyInterior
#print("configure_cell {4} x={0} y={1} w={2} h={3}".format(x,y,cellW,cellH, "colHdr" if isColHdrCell else "rowHdr" if isRowHdrCell else "body" if isBodyCell else "unknown"))
if isColHdrCell:
if hasattr(cell,'columnspan') and cell.columnspan:
columnspan = cell.columnspan # this is the non borders columns spanned
else:
columnspan = 1
cellspan = ((columnspan + 1)//2)
w = int( ( cellW - ((columnspan - 1)/2) ) / cellspan )
wWiderAlloced = 0
wNumWider = 0
for X in range(x, x + columnspan, 2): # spanned cols divided equally over their columns
bodyColW = self.bodyInterior.tk.call( ('grid', 'columnconfigure', self.bodyInterior._w, X, '-minsize' ) )
if bodyColW > w:
wWiderAlloced += bodyColW
wNumWider += 1
if cellspan - wNumWider > 0 and cellW > wWiderAlloced:
W = int((cellW - wWiderAlloced) / (cellspan - wNumWider))
for X in range(x, x + columnspan, 2): # spanned cols divided equally over their columns
bodyColW = self.bodyInterior.tk.call( ('grid', 'columnconfigure', self.bodyInterior._w, X, '-minsize' ) )
if W > bodyColW: # even cells only
self.bodyInterior.tk.call( ('grid', 'columnconfigure', self.bodyInterior._w, X, '-minsize', W ) )
#self.bodyInterior.update()
'''
for X in range(x, x + columnspan*2, 2): # spanned cols divided equally over their columns
w = int(cellW / columnspan)
bodyColW = self.bodyInterior.tk.call( ('grid', 'columnconfigure', self.bodyInterior._w, X, '-minsize' ) )
if cellW > bodyColW: # even (body) cells only
self.bodyInterior.tk.call( ('grid', 'columnconfigure', self.bodyInterior._w, X, '-minsize', w ) )
#self.bodyInterior.update()
'''
if isRowHdrCell:
rowspan = getattr(cell,'rowspan',None) or 1
bodyRowH = self.bodyInterior.tk.call( ('grid', 'rowconfigure', self.bodyInterior._w, y, '-minsize' ) )
cellHperRow = ( cellH - (rowspan // 2 * 3) ) / ((rowspan + 1) // 2) # rowspan includes spanned separators
#print("body row span height {0} per row height {1}".format(bodyRowH, cellHperRow))
if cellHperRow > bodyRowH:
for ySpanned in range(y+rowspan-1, y-1, -2):
self.bodyInterior.tk.call( ('grid', 'rowconfigure', self.bodyInterior._w, ySpanned, '-minsize', cellHperRow ) )
#self.bodyInterior.update()
#print("...bodyRowH before={} after={}".format(bodyRowH, self.bodyInterior.tk.call( ('grid', 'rowconfigure', self.bodyInterior._w, y, '-minsize' ) )))
if isBodyCell:
rowHdrH = self.rowHdrInterior.tk.call( ('grid', 'rowconfigure', self.rowHdrInterior._w, y, '-minsize' ) )
if cellH > rowHdrH:
self.rowHdrInterior.tk.call( ('grid', 'rowconfigure', self.rowHdrInterior._w, y, '-minsize', cellH ) )
#self.rowHdrInterior.update()
colHdrW = self.colHdrInterior.tk.call( ('grid', 'columnconfigure', self.colHdrInterior._w, x, '-minsize' ) )
if cellW > colHdrW:
self.colHdrInterior.tk.call( ('grid', 'columnconfigure', self.colHdrInterior._w, x, '-minsize', cellW ) )
#self.colHdrInterior.update()
elif colHdrW > cellW:
self.bodyInterior.tk.call( ('grid', 'columnconfigure', self.bodyInterior._w, x, '-minsize', colHdrW ) )
#print("...rowHdrH={} colHdrW={}".format(rowHdrH, colHdrW))
#self.bodyInterior.update()
self.blockConfigureCell = False
def conformHdrsToBody(self):
self.colsConfigured = True
# non-spanned cells
'''
for hdrCell in self.rowHdrInterior.children.values():
hdrCellH = hdrCell.winfo_reqheight()
y = hdrCell.y
bodyColH = self.bodyInterior.tk.call( ('grid', 'rowconfigure', self.bodyInterior._w, y, '-minsize' ) )
if hdrCellH > bodyColH:
self.bodyInterior.tk.call( ('grid', 'rowconfigure', self.bodyInterior._w, y, '-minsize', hdrCellH ) )
# set min width to body cells
for bodyCell in self.bodyInterior.children.values():
bodyCellW = bodyCell.winfo_reqwidth()
bodyCellH = bodyCell.winfo_reqheight()
x = bodyCell.x
hdrColW = self.colHdrInterior.tk.call( ('grid', 'columnconfigure', self.colHdrInterior._w, x, '-minsize' ) )
if bodyCellW > hdrColW:
self.bodyInterior.tk.call( ('grid', 'columnconfigure', self.bodyInterior._w, x, '-minsize', bodyCellW ) )
y = bodyCell.y
rowColH = self.colHdrInterior.tk.call( ('grid', 'rowconfigure', self.rowHdrInterior._w, y, '-minsize' ) )
if bodyCellH > rowColH:
self.bodyInterior.tk.call( ('grid', 'rowconfigure', self.bodyInterior._w, y, '-minsize', bodyCellH ) )
'''
hdrCells = self.colHdrInterior.children
hdrCellSortKeys = [] # sort by col span, column row in header
for hdrCellId, hdrCell in hdrCells.items():
if not hdrCell.x & 1:
colspan = hdrCell.columnspan if hasattr(hdrCell,'columnspan') and hdrCell.columnspan else 1
hdrCellSortKeys.append( (colspan, hdrCell.x, -hdrCell.y, hdrCellId) )
hdrCellSortKeys.sort()
for columnspan, x, y, hdrCellId in hdrCellSortKeys:
hdrCell = hdrCells[hdrCellId]
hdrCellW = hdrCell.winfo_reqwidth()
w = int(hdrCellW / columnspan)
wWiderAlloced = 0
wNumWider = 0
for X in range(x, x + columnspan*2, 2): # spanned cols divided equally over their columns
bodyColW = self.bodyInterior.tk.call( ('grid', 'columnconfigure', self.bodyInterior._w, X, '-minsize' ) )
if bodyColW > w: # even cells only
wWiderAlloced += bodyColW
wNumWider += 1
if columnspan - wNumWider > 0 and hdrCellW > wWiderAlloced:
W = int((hdrCellW - wWiderAlloced) / (columnspan - wNumWider))
for X in range(x, x + columnspan*2, 2): # spanned cols divided equally over their columns
bodyColW = self.bodyInterior.tk.call( ('grid', 'columnconfigure', self.bodyInterior._w, X, '-minsize' ) )
if W > bodyColW: # even (body) cells only
self.bodyInterior.tk.call( ('grid', 'columnconfigure', self.bodyInterior._w, X, '-minsize', W ) )
#self.bodyInterior.update()
def conformBodyCellsToHeader(self):
#print("conformBodyCellsToHeader")
self.bodyCellsConfigured = True
for bodyCell in self.bodyInterior.children.values():
if isinstance(bodyCell,gridSpacer):
continue
bodyCellW = bodyCell.winfo_reqwidth()
bodyCellH = bodyCell.winfo_reqheight()
x = bodyCell.x
hdrColW = self.colHdrInterior.tk.call( ('grid', 'columnconfigure', self.colHdrInterior._w, x, '-minsize' ) )
if bodyCellW < hdrColW:
self.bodyInterior.tk.call( ('grid', 'columnconfigure', self.bodyInterior._w, x, '-minsize', hdrColW ) )
y = bodyCell.y
rowColH = self.colHdrInterior.tk.call( ('grid', 'rowconfigure', self.rowHdrInterior._w, y, '-minsize' ) )
#print("conform row=" + str(y) + " rowH=" + str(rowColH) + " cellH=" + str(bodyCellH))
if bodyCellH < rowColH:
self.bodyInterior.tk.call( ('grid', 'rowconfigure', self.bodyInterior._w, y, '-minsize', rowColH ) )
#self.colHdrInterior.update()
#self.rowHdrInterior.update()
#self.bodyInterior.update()
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ftrl-proximal for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=["train.FtrlOptimizer"])
class FtrlOptimizer(optimizer.Optimizer):
"""Optimizer that implements the FTRL algorithm.
This version has support for both online L2 (McMahan et al., 2013) and
shrinkage-type L2, which is the addition of an L2 penalty
to the loss function.
References:
Ad-click prediction:
[McMahan et al., 2013](https://dl.acm.org/citation.cfm?id=2488200)
([pdf](https://dl.acm.org/ft_gateway.cfm?id=2488200&ftid=1388399&dwn=1&CFID=32233078&CFTOKEN=d60fe57a294c056a-CB75C374-F915-E7A6-1573FBBC7BF7D526))
"""
def __init__(self,
learning_rate,
learning_rate_power=-0.5,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0,
use_locking=False,
name="Ftrl",
accum_name=None,
linear_name=None,
l2_shrinkage_regularization_strength=0.0):
r"""Construct a new FTRL optimizer.
Args:
learning_rate: A float value or a constant float `Tensor`.
learning_rate_power: A float value, must be less or equal to zero.
Controls how the learning rate decreases during training. Use zero for
a fixed learning rate. See section 3.1 in (McMahan et al., 2013).
initial_accumulator_value: The starting value for accumulators.
Only zero or positive values are allowed.
l1_regularization_strength: A float value, must be greater than or
equal to zero.
l2_regularization_strength: A float value, must be greater than or
equal to zero.
use_locking: If `True` use locks for update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "Ftrl".
accum_name: The suffix for the variable that keeps the gradient squared
accumulator. If not present, defaults to name.
linear_name: The suffix for the variable that keeps the linear gradient
accumulator. If not present, defaults to name + "_1".
l2_shrinkage_regularization_strength: A float value, must be greater than
or equal to zero. This differs from L2 above in that the L2 above is a
stabilization penalty, whereas this L2 shrinkage is a magnitude penalty.
The FTRL formulation can be written as:
w_{t+1} = argmin_w(\hat{g}_{1:t}w + L1*||w||_1 + L2*||w||_2^2), where
\hat{g} = g + (2*L2_shrinkage*w), and g is the gradient of the loss
function w.r.t. the weights w.
Specifically, in the absence of L1 regularization, it is equivalent to
the following update rule:
w_{t+1} = w_t - lr_t / (1 + 2*L2*lr_t) * g_t -
2*L2_shrinkage*lr_t / (1 + 2*L2*lr_t) * w_t
where lr_t is the learning rate at t.
When input is sparse shrinkage will only happen on the active weights.
Raises:
ValueError: If one of the arguments is invalid.
References:
Ad-click prediction:
[McMahan et al., 2013](https://dl.acm.org/citation.cfm?id=2488200)
([pdf](https://dl.acm.org/ft_gateway.cfm?id=2488200&ftid=1388399&dwn=1&CFID=32233078&CFTOKEN=d60fe57a294c056a-CB75C374-F915-E7A6-1573FBBC7BF7D526))
"""
super(FtrlOptimizer, self).__init__(use_locking, name)
if initial_accumulator_value < 0.0:
raise ValueError(
"initial_accumulator_value %f needs to be positive or zero" %
initial_accumulator_value)
if learning_rate_power > 0.0:
raise ValueError("learning_rate_power %f needs to be negative or zero" %
learning_rate_power)
if l1_regularization_strength < 0.0:
raise ValueError(
"l1_regularization_strength %f needs to be positive or zero" %
l1_regularization_strength)
if l2_regularization_strength < 0.0:
raise ValueError(
"l2_regularization_strength %f needs to be positive or zero" %
l2_regularization_strength)
if l2_shrinkage_regularization_strength < 0.0:
raise ValueError(
"l2_shrinkage_regularization_strength %f needs to be positive"
" or zero" % l2_shrinkage_regularization_strength)
self._learning_rate = learning_rate
self._learning_rate_power = learning_rate_power
self._initial_accumulator_value = initial_accumulator_value
self._l1_regularization_strength = l1_regularization_strength
self._l2_regularization_strength = l2_regularization_strength
self._l2_shrinkage_regularization_strength = (
l2_shrinkage_regularization_strength)
self._learning_rate_tensor = None
self._learning_rate_power_tensor = None
self._l1_regularization_strength_tensor = None
self._l2_regularization_strength_tensor = None
self._l2_shrinkage_regularization_strength_tensor = None
self._accum_name = accum_name
self._linear_name = linear_name
def _create_slots(self, var_list):
# Create the "accum" and "linear" slots.
for v in var_list:
val = constant_op.constant(
self._initial_accumulator_value, dtype=v.dtype, shape=v.get_shape())
self._get_or_make_slot(v, val, "accum", self._accum_name or self._name)
self._zeros_slot(v, "linear", self._linear_name or self._name)
def _prepare(self):
self._learning_rate_tensor = ops.convert_to_tensor(
self._learning_rate, name="learning_rate")
self._l1_regularization_strength_tensor = ops.convert_to_tensor(
self._l1_regularization_strength, name="l1_regularization_strength")
self._l2_regularization_strength_tensor = ops.convert_to_tensor(
self._l2_regularization_strength, name="l2_regularization_strength")
self._l2_shrinkage_regularization_strength_tensor = ops.convert_to_tensor(
self._l2_shrinkage_regularization_strength,
name="l2_shrinkage_regularization_strength")
self._learning_rate_power_tensor = ops.convert_to_tensor(
self._learning_rate_power, name="learning_rate_power")
def _apply_dense(self, grad, var):
accum = self.get_slot(var, "accum")
linear = self.get_slot(var, "linear")
if self._l2_shrinkage_regularization_strength <= 0.0:
return training_ops.apply_ftrl(
var,
accum,
linear,
grad,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._l1_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._l2_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._learning_rate_power_tensor, var.dtype.base_dtype),
use_locking=self._use_locking)
else:
return training_ops.apply_ftrl_v2(
var,
accum,
linear,
grad,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._l1_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._l2_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._l2_shrinkage_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._learning_rate_power_tensor, var.dtype.base_dtype),
use_locking=self._use_locking)
def _resource_apply_dense(self, grad, var):
accum = self.get_slot(var, "accum")
linear = self.get_slot(var, "linear")
if self._l2_shrinkage_regularization_strength <= 0.0:
return training_ops.resource_apply_ftrl(
var.handle,
accum.handle,
linear.handle,
grad,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._l1_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._l2_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._learning_rate_power_tensor, var.dtype.base_dtype),
use_locking=self._use_locking)
else:
return training_ops.resource_apply_ftrl_v2(
var.handle,
accum.handle,
linear.handle,
grad,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._l1_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._l2_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._l2_shrinkage_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._learning_rate_power_tensor, var.dtype.base_dtype),
use_locking=self._use_locking)
def _apply_sparse(self, grad, var):
accum = self.get_slot(var, "accum")
linear = self.get_slot(var, "linear")
if self._l2_shrinkage_regularization_strength <= 0.0:
return training_ops.sparse_apply_ftrl(
var,
accum,
linear,
grad.values,
grad.indices,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._l1_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._l2_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._learning_rate_power_tensor, var.dtype.base_dtype),
use_locking=self._use_locking)
else:
return training_ops.sparse_apply_ftrl_v2(
var,
accum,
linear,
grad.values,
grad.indices,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._l1_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._l2_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._l2_shrinkage_regularization_strength_tensor,
grad.dtype.base_dtype),
math_ops.cast(self._learning_rate_power_tensor, var.dtype.base_dtype),
use_locking=self._use_locking)
def _resource_apply_sparse(self, grad, var, indices):
accum = self.get_slot(var, "accum")
linear = self.get_slot(var, "linear")
if self._l2_shrinkage_regularization_strength <= 0.0:
return training_ops.resource_sparse_apply_ftrl(
var.handle,
accum.handle,
linear.handle,
grad,
indices,
math_ops.cast(self._learning_rate_tensor, grad.dtype),
math_ops.cast(self._l1_regularization_strength_tensor, grad.dtype),
math_ops.cast(self._l2_regularization_strength_tensor, grad.dtype),
math_ops.cast(self._learning_rate_power_tensor, grad.dtype),
use_locking=self._use_locking)
else:
return training_ops.resource_sparse_apply_ftrl_v2(
var.handle,
accum.handle,
linear.handle,
grad,
indices,
math_ops.cast(self._learning_rate_tensor, grad.dtype),
math_ops.cast(self._l1_regularization_strength_tensor, grad.dtype),
math_ops.cast(self._l2_regularization_strength_tensor, grad.dtype),
math_ops.cast(self._l2_shrinkage_regularization_strength_tensor,
grad.dtype),
math_ops.cast(self._learning_rate_power_tensor, grad.dtype),
use_locking=self._use_locking)
|
|
# coding: utf-8
import random
from ._cffi import C, ffi, zmq_version, new_uint64_pointer, \
new_int64_pointer, \
new_int_pointer, \
new_binary_data, \
value_uint64_pointer, \
value_int64_pointer, \
value_int_pointer, \
value_binary_data
from .constants import *
from .error import *
from .utils import jsonapi
from .utils.strtypes import bytes, unicode
__all__ = ['Context', 'select', 'Socket', 'zmq_version', 'Poller']
class Context(object):
_state = {}
def __init__(self, iothreads=1):
if not iothreads > 0:
raise ZMQError(EINVAL)
self.__dict__ = self._state
self.zmq_ctx = C.zmq_init(iothreads)
self.iothreads = iothreads
self._closed = False
self.n_sockets = 0
self.max_sockets = 32
self._sockets = {}
self.sockopts = {LINGER: 1}
self.linger = 1
def term(self):
if self.closed:
return
for k, s in self._sockets.items():
if not s.closed:
s.close()
del self._sockets[k]
C.zmq_term(self.zmq_ctx)
self.zmq_ctx = None
self._closed = True
self.n_sockets = 0
@property
def closed(self):
return self._closed
def _add_socket(self, socket):
self._sockets[self.n_sockets] = socket
self.n_sockets += 1
return self.n_sockets
def _rm_socket(self, n):
del self._sockets[n]
def socket(self, sock_type):
if self._closed:
raise ZMQError(ENOTSUP)
socket = Socket(self, sock_type)
for option, option_value in self.sockopts.items():
socket.setsockopt(option, option_value)
return socket
def set_linger(self, value):
self.sockopts[LINGER] = value
self.linger = value
def new_pointer_from_opt(option, length=0):
if option in uint64_opts:
return new_uint64_pointer()
elif option in int64_opts:
return new_int64_pointer()
elif option in int_opts:
return new_int_pointer()
elif option in binary_opts:
return new_binary_data(length)
else:
raise ValueError('Invalid option')
def value_from_opt_pointer(option, opt_pointer, length=0):
if option in uint64_opts:
return int(opt_pointer[0])
elif option in int64_opts:
return int(opt_pointer[0])
elif option in int_opts:
return int(opt_pointer[0])
elif option in binary_opts:
return ffi.string(opt_pointer)
else:
raise ValueError('Invalid option')
def initialize_opt_pointer(option, value, length=0):
if option in uint64_opts:
return value_uint64_pointer(value)
elif option in int64_opts:
return value_int64_pointer(value)
elif option in int_opts:
return value_int_pointer(value)
elif option in binary_opts:
return value_binary_data(value, length)
else:
raise ValueError('Invalid option')
class Socket(object):
def __init__(self, context, sock_type):
self.context = context
self.sock_type = sock_type
self.zmq_socket = C.zmq_socket(context.zmq_ctx, sock_type)
if not self.zmq_socket:
raise ZMQError()
self._closed = False
self._attrs = {}
self.n = self.context._add_socket(self)
self.last_errno = None
@property
def closed(self):
return self._closed
def close(self):
if not self._closed:
C.zmq_close(self.zmq_socket)
self._closed = True
def bind(self, address):
ret = C.zmq_bind(self.zmq_socket, address)
return ret
def connect(self, address):
ret = C.zmq_connect(self.zmq_socket, address)
return ret
def setsockopt(self, option, value):
length = None
if isinstance(value, str):
length = len(value)
low_level_data = initialize_opt_pointer(option, value, length)
low_level_value_pointer = low_level_data[0]
low_level_sizet = low_level_data[1]
ret = C.zmq_setsockopt(self.zmq_socket,
option,
ffi.cast('void*', low_level_value_pointer),
low_level_sizet)
return ret
def setsockopt_string(self, option, value, encoding='utf-8'):
if not isinstance(value, unicode):
raise TypeError("unicode strings only")
return self.setsockopt(option, value.encode(encoding))
setsockopt_unicode = setsockopt_string
def getsockopt(self, option, length=0):
low_level_data = new_pointer_from_opt(option, length=length)
low_level_value_pointer = low_level_data[0]
low_level_sizet_pointer = low_level_data[1]
ret = C.zmq_getsockopt(self.zmq_socket,
option,
low_level_value_pointer,
low_level_sizet_pointer)
if ret < 0:
self.last_errno = C.zmq_errno()
return -1
return value_from_opt_pointer(option, low_level_value_pointer)
def send(self, message, flags=0, copy=False, track=False):
zmq_msg = ffi.new('zmq_msg_t*')
c_message = ffi.new('char[]', message)
C.zmq_msg_init_size(zmq_msg, len(message))
C.memcpy(C.zmq_msg_data(zmq_msg), c_message, len(message))
if zmq_version == 2:
ret = C.zmq_send(self.zmq_socket, zmq_msg, flags)
else:
ret = C.zmq_sendmsg(self. zmq_socket, zmq_msg, flags)
C.zmq_msg_close(zmq_msg)
if ret < 0:
self.last_errno = C.zmq_errno()
return ret
def send_json(self, obj, flags=0, copy=False, track=False):
if jsonapi.jsonmod is None:
raise ImportError('jsonlib{1,2}, json or simplejson library is required.')
else:
msg = jsonapi.dumps(obj)
return self.send(msg, flags, copy, track)
def recv(self, flags=0, copy=False, track=False):
zmq_msg = ffi.new('zmq_msg_t*')
C.zmq_msg_init(zmq_msg)
if zmq_version == 2:
ret = C.zmq_recv(self.zmq_socket, zmq_msg, flags)
else:
ret = C.zmq_recvmsg(self.zmq_socket, zmq_msg, flags)
if ret < 0:
C.zmq_msg_close(zmq_msg)
raise ZMQError(errno=C.zmq_errno())
value = ffi.buffer(C.zmq_msg_data(zmq_msg), int(C.zmq_msg_size(zmq_msg)))[:]
C.zmq_msg_close(zmq_msg)
return value
def recv_json(self, flags=0, copy=False, track=False):
if jsonapi.jsonmod is None:
raise ImportError('jsonlib{1,2}, json or simplejson library is required.')
else:
msg = self.recv(flags, copy, track)
return jsonapi.loads(msg)
# Following methods from pyzmq.pysocket
def bind_to_random_port(self, addr, min_port=49152, max_port=65536, max_tries=100):
"""s.bind_to_random_port(addr, min_port=49152, max_port=65536, max_tries=100)
Bind this socket to a random port in a range.
Parameters
----------
addr : str
The address string without the port to pass to ``Socket.bind()``.
min_port : int, optional
The minimum port in the range of ports to try (inclusive).
max_port : int, optional
The maximum port in the range of ports to try (exclusive).
max_tries : int, optional
The maximum number of bind attempts to make.
Returns
-------
port : int
The port the socket was bound to.
Raises
------
ZMQBindError
if `max_tries` reached before successful bind
"""
for i in range(max_tries):
try:
port = random.randrange(min_port, max_port)
self.bind('%s:%s' % (addr, port))
except ZMQError as exception:
if not exception.errno == zmq.EADDRINUSE:
raise
else:
return port
raise ZMQBindError("Could not bind socket to random port.")
def send_multipart(self, msg_parts, flags=0, copy=True, track=False):
"""s.send_multipart(msg_parts, flags=0, copy=True, track=False)
Send a sequence of buffers as a multipart message.
Parameters
----------
msg_parts : iterable
A sequence of objects to send as a multipart message. Each element
can be any sendable object (Frame, bytes, buffer-providers)
flags : int, optional
SNDMORE is handled automatically for frames before the last.
copy : bool, optional
Should the frame(s) be sent in a copying or non-copying manner.
track : bool, optional
Should the frame(s) be tracked for notification that ZMQ has
finished with it (ignored if copy=True).
Returns
-------
None : if copy or not track
MessageTracker : if track and not copy
a MessageTracker object, whose `pending` property will
be True until the last send is completed.
"""
for msg in msg_parts[:-1]:
self.send(msg, SNDMORE|flags, copy=copy, track=track)
# Send the last part without the extra SNDMORE flag.
return self.send(msg_parts[-1], flags, copy=copy, track=track)
def recv_multipart(self, flags=0, copy=True, track=False):
"""s.recv_multipart(flags=0, copy=True, track=False)
Receive a multipart message as a list of bytes or Frame objects.
Parameters
----------
flags : int, optional
Any supported flag: NOBLOCK. If NOBLOCK is set, this method
will raise a ZMQError with EAGAIN if a message is not ready.
If NOBLOCK is not set, then this method will block until a
message arrives.
copy : bool, optional
Should the message frame(s) be received in a copying or non-copying manner?
If False a Frame object is returned for each part, if True a copy of
the bytes is made for each frame.
track : bool, optional
Should the message frame(s) be tracked for notification that ZMQ has
finished with it? (ignored if copy=True)
Returns
-------
msg_parts : list
A list of frames in the multipart message; either Frames or bytes,
depending on `copy`.
"""
parts = [self.recv(flags, copy=copy, track=track)]
# have first part already, only loop while more to receive
while self.getsockopt(RCVMORE):
part = self.recv(flags, copy=copy, track=track)
parts.append(part)
return parts
def send_string(self, u, flags=0, copy=False, track=False, encoding='utf-8'):
"""send a Python unicode string as a message with an encoding
0MQ communicates with raw bytes, so you must encode/decode
text (unicode on py2, str on py3) around 0MQ.
Parameters
----------
u : Python unicode string (unicode on py2, str on py3)
The unicode string to send.
flags : int, optional
Any valid send flag.
encoding : str [default: 'utf-8']
The encoding to be used
"""
if not isinstance(u, basestring):
raise TypeError("unicode/str objects only")
return self.send(u.encode(encoding), flags=flags, copy=copy, track=track)
send_unicode = send_string
def recv_string(self, flags=0, track=False, encoding='utf-8'):
"""receive a unicode string, as sent by send_string
Parameters
----------
flags : int
Any valid recv flag.
encoding : str [default: 'utf-8']
The encoding to be used
Returns
-------
s : unicode string (unicode on py2, str on py3)
The Python unicode string that arrives as encoded bytes.
"""
msg = self.recv(flags=flags, copy=False, track=track)
return codecs.decode(msg.bytes, encoding)
recv_unicode = recv_string
def _make_zmq_pollitem(socket, flags):
zmq_socket = socket.zmq_socket
zmq_pollitem = ffi.new('zmq_pollitem_t*')
zmq_pollitem.socket = zmq_socket
zmq_pollitem.fd = 0
zmq_pollitem.events = flags
zmq_pollitem.revents = 0
return zmq_pollitem[0]
def _make_zmq_pollitem_fromfd(socket_fd, flags):
zmq_pollitem = ffi.new('zmq_pollitem_t*')
zmq_pollitem.socket = ffi.NULL
zmq_pollitem.fd = socket_fd
zmq_pollitem.events = flags
zmq_pollitem.revents = 0
return zmq_pollitem[0]
def _cffi_poll(zmq_pollitem_list, poller, timeout=-1):
if zmq_version == 2:
timeout = timeout * 1000
items = ffi.new('zmq_pollitem_t[]', zmq_pollitem_list)
list_length = ffi.cast('int', len(zmq_pollitem_list))
c_timeout = ffi.cast('long', timeout)
C.zmq_poll(items, list_length, c_timeout)
result = []
for index in range(len(items)):
if items[index].revents > 0:
if not items[index].socket == ffi.NULL:
result.append((poller._sockets[items[index].socket],
items[index].revents))
else:
result.append((items[index].fd, items[index].revents))
return result
def _poll(sockets, timeout):
cffi_pollitem_list = []
low_level_to_socket_obj = {}
for item in sockets:
low_level_to_socket_obj[item[0].zmq_socket] = item
cffi_pollitem_list.append(_make_zmq_pollitem(item[0], item[1]))
items = ffi.new('zmq_pollitem_t[]', cffi_pollitem_list)
list_length = ffi.cast('int', len(cffi_pollitem_list))
c_timeout = ffi.cast('long', timeout)
C.zmq_poll(items, list_length, c_timeout)
result = []
for index in range(len(items)):
if items[index].revents > 0:
result.append((low_level_to_socket_obj[items[index].socket][0],
items[index].revents))
return result
class Poller(object):
def __init__(self):
self.sockets_flags = {}
self._sockets = {}
self.c_sockets = {}
@property
def sockets(self):
return self.sockets_flags
def register(self, socket, flags=POLLIN|POLLOUT):
if flags:
self.sockets_flags[socket] = flags
if isinstance(socket, int):
self.c_sockets[socket] = _make_zmq_pollitem_fromfd(socket, flags)
else:
self.c_sockets[socket] = _make_zmq_pollitem(socket, flags)
self._sockets[socket.zmq_socket] = socket
elif socket in self.sockets_flags:
# uregister sockets registered with no events
self.unregister(socket)
else:
# ignore new sockets with no events
pass
def modify(self, socket, flags=POLLIN|POLLOUT):
self.register(socket, flags)
def unregister(self, socket):
del self.sockets_flags[socket]
del self.c_sockets[socket]
if not isinstance(socket, int):
del self._sockets[socket.zmq_socket]
def poll(self, timeout=None):
if timeout is None:
timeout = -1
timeout = int(timeout)
if timeout < 0:
timeout = -1
items = _cffi_poll(self.c_sockets.values(),
self,
timeout=timeout)
return items
def select(rlist, wlist, xlist, timeout=None):
if timeout is None:
timeout = -1
# Convert from sec -> us for zmq_poll.
# zmq_poll accepts 3.x style timeout in ms
timeout = int(timeout*1000.0)
if timeout < 0:
timeout = -1
sockets = []
for s in set(rlist + wlist + xlist):
flags = 0
if s in rlist:
flags |= POLLIN
if s in wlist:
flags |= POLLOUT
if s in xlist:
flags |= POLLERR
sockets.append((s, flags))
return_sockets = _poll(sockets, timeout)
rlist, wlist, xlist = [], [], []
for s, flags in return_sockets:
if flags & POLLIN:
rlist.append(s)
if flags & POLLOUT:
wlist.append(s)
if flags & POLLERR:
xlist.append(s)
return rlist, wlist, xlist
|
|
import inspect
import sys
import os
import onyxexceptions as ex
import re
from functools import wraps
import time
import random
import string
import unicodedata
def exit_with_error(error="Non Specified Error"):
"""
Terminates crawley with an error
"""
print(error)
sys.exit(1)
def search_class(base_klass, entities_list, return_class=False):
for klass in entities_list:
if issubclass(klass, base_klass) and not klass is base_klass:
return klass
def check_for_file(settings, file_name):
"""
Checks if a project file exists
"""
return os.path.exists(os.path.join(settings.PROJECT_ROOT, file_name))
def fix_file_extension(file_name, extension):
"""
Fixes the file extensions
"""
if not file_name.endswith(".%s" % extension):
file_name = "%s.%s" % (file_name, extension)
return file_name
def add_to_path(path):
"""
Adds the [path] variable to python path
"""
if not path in sys.path:
sys.path.insert(0, path)
def onyx_wrap_exception():
wrap_exception(ex.OnyxException(), "Onyx Spider General Exception")
def log_exception():
import traceback
from config.AppContext import getOnyxLogger
log = getOnyxLogger()
log_if_possible(traceback.format_exc(), log, 'error')
def wrap_exception(exceptionName, message):
from config.AppContext import appContext
trace = sys.exc_info()[2]
try:
value = appContext.getInstance().getValue("SHOW_DEBUG_INFO")
except ex.ConfigError:
value = 0
if value == 1:
log_exception()
raise exceptionName(message).with_traceback(trace)
def benchmark(logger=None):
"""
A decorator that prints the time a function takes
to execute.
"""
def decoBenchmark(func):
import time
def wrapper(*args, **kwargs):
t = time.time()
res = func(*args, **kwargs)
if logger:
logger.info(func.__name__, time.time() - t, 'secs wall time')
else:
print(func.__name__, time.time() - t, 'secs wall time')
return res
return wrapper
return decoBenchmark
# Type checking
def isObjOfType(obj, _type):
return type(obj) in ([_type] + _type.__subclasses__())
def buffer_optimal_size(IOSize):
return 16384 if IOSize < 1 * 1024 * 1024 or IOSize == 0 else 32 * 1024
def report_run(func):
"""
A decorator that calls a reportRuntime method to report runtime.
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
t = time.time()
res = func(self, *args, **kwargs)
self.reportRuntime(time.time() - t)
return res
return wrapper
def retry(ExceptionToCheck, tries=4, delay=3, backoff=2, logger=None):
"""Retry calling the decorated function using an exponential backoff.
original from: http://wiki.python.org/moin/PythonDecoratorLibrary#Retry
:param ExceptionToCheck: the exception to check. may be a tuple of
exceptions to check
:type ExceptionToCheck: Exception or tuple
:param tries: number of times to try (not retry) before giving up
:type tries: int
:param delay: initial delay between retries in seconds
:type delay: int
:param backoff: backoff multiplier e.g. value of 2 will double the delay
each retry
:type backoff: int
:param logger: logger to use. If None, print
:type logger: logging.Logger instance
"""
def deco_retry(f):
@wraps(f)
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay
while mtries > 1:
try:
return f(*args, **kwargs)
except ExceptionToCheck as e:
msg = "%s, Retrying in %d seconds..." % (str(e), mdelay)
if logger:
logger.warning(msg)
else:
print(msg)
time.sleep(mdelay)
mtries -= 1
mdelay *= backoff
return f(*args, **kwargs)
return f_retry # true decorator
return deco_retry
def delay(delayFactor=1, Mindelay=0, Indeviation=0):
def decoDelay(f):
@wraps(f)
def delay(*args, **kwargs):
import random
import time
FACTOR = 1000.0
deviation = Indeviation * FACTOR
randomize = random.randint(-deviation, deviation) / FACTOR
delay = (Mindelay + randomize) * delayFactor
time.sleep(delay)
return f(*args, **kwargs)
return delay
return decoDelay
toKB = lambda bytes: bytes / 1024
toMB = lambda bytes: toKB(bytes) / 1024
def fixZeroResult(result):
return 0.01 if result == 0 else result
# http://code.activestate.com/recipes/145672-multi-line-string-block-formatting/
def format_block(block):
"""Format the given block of text, trimming leading/trailing
empty lines and any leading whitespace that is common to all lines.
The purpose is to let us list a code block as a multiline,
triple-quoted Python string, taking care of indentation concerns."""
# separate block into lines
lines = str(block).split('\n')
# remove leading/trailing empty lines
while lines and not lines[0]: del lines[0]
while lines and not lines[-1]: del lines[-1]
# look at first line to see how much indentation to trim
ws = re.match(r'\s*', lines[0]).group(0)
if ws:
lines = [x.replace(ws, '', 1) for x in lines]
# remove leading/trailing blank lines (after leading ws removal)
# we do this again in case there were pure-whitespace lines
while lines and not lines[0]: del lines[0]
while lines and not lines[-1]: del lines[-1]
return '\n'.join(lines) + '\n'
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def remove_accents(data):
dataToProcess = str(data) if isObjOfType(data, str) else data
return str(' '.join(''.join(x for x in unicodedata.normalize('NFD', d) if x in string.ascii_letters) for d in
dataToProcess.split(' ')))
isFileNull = lambda file: os.stat(file)[6] == 0
goodStatusCode = lambda code: 200 <= code < 300
#Logging
def log_if_possible(msg, logger=None, level='info'):
if logger:
try:
log = getattr(logger, level)
log(msg)
except AttributeError:
wrap_exception(ex.LoggerException, 'Invalid logger method')
else:
print(msg)
def logBeforeAfter(before, after, logger=None, level='info'):
def deco_logBeforeAfter(f):
@wraps(f)
def f_logbefaft(*args, **kwargs):
mbefore, mafter = before, after
log_if_possible(mbefore, logger, level)
ret = f(*args, **kwargs)
log_if_possible(mbefore, logger, level)
return ret
return f_logbefaft
return deco_logBeforeAfter
def time_now():
from datetime import datetime
return datetime.now()
#Introspection
def get_current_method_name():
"""Auxiliary function to not to do DRY"""
return inspect.stack()[1][3]
|
|
# Copyright 2010 OpenStack Foundation
# Copyright 2011 Piston Cloud Computing, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import os
import re
from oslo.config import cfg
from oslo import messaging
import six
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack.compute import ips
from nova.api.openstack.compute.views import servers as views_servers
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import block_device
from nova import compute
from nova.compute import flavors
from nova import exception
from nova.i18n import _
from nova.i18n import _LW
from nova import objects
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
from nova import policy
from nova import utils
server_opts = [
cfg.BoolOpt('enable_instance_password',
default=True,
help='Enables returning of the instance password by the'
' relevant server API calls such as create, rebuild'
' or rescue, If the hypervisor does not support'
' password injection then the password returned will'
' not be correct'),
]
CONF = cfg.CONF
CONF.register_opts(server_opts)
CONF.import_opt('network_api_class', 'nova.network')
CONF.import_opt('reclaim_instance_interval', 'nova.compute.manager')
LOG = logging.getLogger(__name__)
XML_WARNING = False
def make_fault(elem):
fault = xmlutil.SubTemplateElement(elem, 'fault', selector='fault')
fault.set('code')
fault.set('created')
msg = xmlutil.SubTemplateElement(fault, 'message')
msg.text = 'message'
det = xmlutil.SubTemplateElement(fault, 'details')
det.text = 'details'
def make_server(elem, detailed=False):
elem.set('name')
elem.set('id')
global XML_WARNING
if not XML_WARNING:
LOG.warn(_LW('XML support has been deprecated and may be removed '
'as early as the Juno release.'))
XML_WARNING = True
if detailed:
elem.set('userId', 'user_id')
elem.set('tenantId', 'tenant_id')
elem.set('updated')
elem.set('created')
elem.set('hostId')
elem.set('accessIPv4')
elem.set('accessIPv6')
elem.set('status')
elem.set('progress')
elem.set('reservation_id')
# Attach image node
image = xmlutil.SubTemplateElement(elem, 'image', selector='image')
image.set('id')
xmlutil.make_links(image, 'links')
# Attach flavor node
flavor = xmlutil.SubTemplateElement(elem, 'flavor', selector='flavor')
flavor.set('id')
xmlutil.make_links(flavor, 'links')
# Attach fault node
make_fault(elem)
# Attach metadata node
elem.append(common.MetadataTemplate())
# Attach addresses node
elem.append(ips.AddressesTemplate())
xmlutil.make_links(elem, 'links')
server_nsmap = {None: xmlutil.XMLNS_V11, 'atom': xmlutil.XMLNS_ATOM}
class ServerTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server', selector='server')
make_server(root, detailed=True)
return xmlutil.MasterTemplate(root, 1, nsmap=server_nsmap)
class MinimalServersTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('servers')
elem = xmlutil.SubTemplateElement(root, 'server', selector='servers')
make_server(elem)
xmlutil.make_links(root, 'servers_links')
return xmlutil.MasterTemplate(root, 1, nsmap=server_nsmap)
class ServersTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('servers')
elem = xmlutil.SubTemplateElement(root, 'server', selector='servers')
make_server(elem, detailed=True)
return xmlutil.MasterTemplate(root, 1, nsmap=server_nsmap)
class ServerAdminPassTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server')
root.set('adminPass')
return xmlutil.SlaveTemplate(root, 1, nsmap=server_nsmap)
class ServerMultipleCreateTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server')
root.set('reservation_id')
return xmlutil.MasterTemplate(root, 1, nsmap=server_nsmap)
def FullServerTemplate():
master = ServerTemplate()
master.attach(ServerAdminPassTemplate())
return master
class CommonDeserializer(wsgi.MetadataXMLDeserializer):
"""Common deserializer to handle xml-formatted server create requests.
Handles standard server attributes as well as optional metadata
and personality attributes
"""
metadata_deserializer = common.MetadataXMLDeserializer()
def _extract_personality(self, server_node):
"""Marshal the personality attribute of a parsed request."""
node = self.find_first_child_named(server_node, "personality")
if node is not None:
personality = []
for file_node in self.find_children_named(node, "file"):
item = {}
if file_node.hasAttribute("path"):
item["path"] = file_node.getAttribute("path")
item["contents"] = self.extract_text(file_node)
personality.append(item)
return personality
else:
return None
def _extract_server(self, node):
"""Marshal the server attribute of a parsed request."""
server = {}
server_node = self.find_first_child_named(node, 'server')
attributes = ["name", "imageRef", "flavorRef", "adminPass",
"accessIPv4", "accessIPv6", "key_name",
"availability_zone", "min_count", "max_count"]
for attr in attributes:
if server_node.getAttribute(attr):
server[attr] = server_node.getAttribute(attr)
res_id = server_node.getAttribute('return_reservation_id')
if res_id:
server['return_reservation_id'] = \
strutils.bool_from_string(res_id)
scheduler_hints = self._extract_scheduler_hints(server_node)
if scheduler_hints:
server['OS-SCH-HNT:scheduler_hints'] = scheduler_hints
metadata_node = self.find_first_child_named(server_node, "metadata")
if metadata_node is not None:
server["metadata"] = self.extract_metadata(metadata_node)
user_data_node = self.find_first_child_named(server_node, "user_data")
if user_data_node is not None:
server["user_data"] = self.extract_text(user_data_node)
personality = self._extract_personality(server_node)
if personality is not None:
server["personality"] = personality
networks = self._extract_networks(server_node)
if networks is not None:
server["networks"] = networks
security_groups = self._extract_security_groups(server_node)
if security_groups is not None:
server["security_groups"] = security_groups
# NOTE(vish): this is not namespaced in json, so leave it without a
# namespace for now
block_device_mapping = self._extract_block_device_mapping(server_node)
if block_device_mapping is not None:
server["block_device_mapping"] = block_device_mapping
block_device_mapping_v2 = self._extract_block_device_mapping_v2(
server_node)
if block_device_mapping_v2 is not None:
server["block_device_mapping_v2"] = block_device_mapping_v2
# NOTE(vish): Support this incorrect version because it was in the code
# base for a while and we don't want to accidentally break
# anyone that might be using it.
auto_disk_config = server_node.getAttribute('auto_disk_config')
if auto_disk_config:
server['OS-DCF:diskConfig'] = auto_disk_config
auto_disk_config = server_node.getAttribute('OS-DCF:diskConfig')
if auto_disk_config:
server['OS-DCF:diskConfig'] = auto_disk_config
config_drive = server_node.getAttribute('config_drive')
if config_drive:
server['config_drive'] = config_drive
return server
def _extract_block_device_mapping(self, server_node):
"""Marshal the block_device_mapping node of a parsed request."""
node = self.find_first_child_named(server_node, "block_device_mapping")
if node:
block_device_mapping = []
for child in self.extract_elements(node):
if child.nodeName != "mapping":
continue
mapping = {}
attributes = ["volume_id", "snapshot_id", "device_name",
"virtual_name", "volume_size"]
for attr in attributes:
value = child.getAttribute(attr)
if value:
mapping[attr] = value
attributes = ["delete_on_termination", "no_device"]
for attr in attributes:
value = child.getAttribute(attr)
if value:
mapping[attr] = strutils.bool_from_string(value)
block_device_mapping.append(mapping)
return block_device_mapping
else:
return None
def _extract_block_device_mapping_v2(self, server_node):
"""Marshal the new block_device_mappings."""
node = self.find_first_child_named(server_node,
"block_device_mapping_v2")
if node:
block_device_mapping = []
for child in self.extract_elements(node):
if child.nodeName != "mapping":
continue
block_device_mapping.append(
dict((attr, child.getAttribute(attr))
for attr in block_device.bdm_new_api_fields
if child.getAttribute(attr)))
return block_device_mapping
def _extract_scheduler_hints(self, server_node):
"""Marshal the scheduler hints attribute of a parsed request."""
node = self.find_first_child_named_in_namespace(server_node,
"http://docs.openstack.org/compute/ext/scheduler-hints/api/v2",
"scheduler_hints")
if node:
scheduler_hints = {}
for child in self.extract_elements(node):
scheduler_hints.setdefault(child.nodeName, [])
value = self.extract_text(child).strip()
scheduler_hints[child.nodeName].append(value)
return scheduler_hints
else:
return None
def _extract_networks(self, server_node):
"""Marshal the networks attribute of a parsed request."""
node = self.find_first_child_named(server_node, "networks")
if node is not None:
networks = []
for network_node in self.find_children_named(node,
"network"):
item = {}
if network_node.hasAttribute("uuid"):
item["uuid"] = network_node.getAttribute("uuid")
if network_node.hasAttribute("fixed_ip"):
item["fixed_ip"] = network_node.getAttribute("fixed_ip")
if network_node.hasAttribute("port"):
item["port"] = network_node.getAttribute("port")
networks.append(item)
return networks
else:
return None
def _extract_security_groups(self, server_node):
"""Marshal the security_groups attribute of a parsed request."""
node = self.find_first_child_named(server_node, "security_groups")
if node is not None:
security_groups = []
for sg_node in self.find_children_named(node, "security_group"):
item = {}
name = self.find_attribute_or_element(sg_node, 'name')
if name:
item["name"] = name
security_groups.append(item)
return security_groups
else:
return None
class ActionDeserializer(CommonDeserializer):
"""Deserializer to handle xml-formatted server action requests.
Handles standard server attributes as well as optional metadata
and personality attributes
"""
def default(self, string):
dom = xmlutil.safe_minidom_parse_string(string)
action_node = dom.childNodes[0]
action_name = action_node.tagName
action_deserializer = {
'createImage': self._action_create_image,
'changePassword': self._action_change_password,
'reboot': self._action_reboot,
'rebuild': self._action_rebuild,
'resize': self._action_resize,
'confirmResize': self._action_confirm_resize,
'revertResize': self._action_revert_resize,
}.get(action_name, super(ActionDeserializer, self).default)
action_data = action_deserializer(action_node)
return {'body': {action_name: action_data}}
def _action_create_image(self, node):
return self._deserialize_image_action(node, ('name',))
def _action_change_password(self, node):
if not node.hasAttribute("adminPass"):
raise AttributeError("No adminPass was specified in request")
return {"adminPass": node.getAttribute("adminPass")}
def _action_reboot(self, node):
if not node.hasAttribute("type"):
raise AttributeError("No reboot type was specified in request")
return {"type": node.getAttribute("type")}
def _action_rebuild(self, node):
rebuild = {}
if node.hasAttribute("name"):
name = node.getAttribute("name")
if not name:
raise AttributeError("Name cannot be blank")
rebuild['name'] = name
if node.hasAttribute("auto_disk_config"):
rebuild['OS-DCF:diskConfig'] = node.getAttribute(
"auto_disk_config")
if node.hasAttribute("OS-DCF:diskConfig"):
rebuild['OS-DCF:diskConfig'] = node.getAttribute(
"OS-DCF:diskConfig")
metadata_node = self.find_first_child_named(node, "metadata")
if metadata_node is not None:
rebuild["metadata"] = self.extract_metadata(metadata_node)
personality = self._extract_personality(node)
if personality is not None:
rebuild["personality"] = personality
if not node.hasAttribute("imageRef"):
raise AttributeError("No imageRef was specified in request")
rebuild["imageRef"] = node.getAttribute("imageRef")
if node.hasAttribute("adminPass"):
rebuild["adminPass"] = node.getAttribute("adminPass")
if node.hasAttribute("accessIPv4"):
rebuild["accessIPv4"] = node.getAttribute("accessIPv4")
if node.hasAttribute("accessIPv6"):
rebuild["accessIPv6"] = node.getAttribute("accessIPv6")
if node.hasAttribute("preserve_ephemeral"):
rebuild["preserve_ephemeral"] = strutils.bool_from_string(
node.getAttribute("preserve_ephemeral"), strict=True)
return rebuild
def _action_resize(self, node):
resize = {}
if node.hasAttribute("flavorRef"):
resize["flavorRef"] = node.getAttribute("flavorRef")
else:
raise AttributeError("No flavorRef was specified in request")
if node.hasAttribute("auto_disk_config"):
resize['OS-DCF:diskConfig'] = node.getAttribute("auto_disk_config")
if node.hasAttribute("OS-DCF:diskConfig"):
resize['OS-DCF:diskConfig'] = node.getAttribute(
"OS-DCF:diskConfig")
return resize
def _action_confirm_resize(self, node):
return None
def _action_revert_resize(self, node):
return None
def _deserialize_image_action(self, node, allowed_attributes):
data = {}
for attribute in allowed_attributes:
value = node.getAttribute(attribute)
if value:
data[attribute] = value
metadata_node = self.find_first_child_named(node, 'metadata')
if metadata_node is not None:
metadata = self.metadata_deserializer.extract_metadata(
metadata_node)
data['metadata'] = metadata
return data
class CreateDeserializer(CommonDeserializer):
"""Deserializer to handle xml-formatted server create requests.
Handles standard server attributes as well as optional metadata
and personality attributes
"""
def default(self, string):
"""Deserialize an xml-formatted server create request."""
dom = xmlutil.safe_minidom_parse_string(string)
server = self._extract_server(dom)
return {'body': {'server': server}}
class Controller(wsgi.Controller):
"""The Server API base controller class for the OpenStack API."""
_view_builder_class = views_servers.ViewBuilder
@staticmethod
def _add_location(robj):
# Just in case...
if 'server' not in robj.obj:
return robj
link = filter(lambda l: l['rel'] == 'self',
robj.obj['server']['links'])
if link:
robj['Location'] = utils.utf8(link[0]['href'])
# Convenience return
return robj
def __init__(self, ext_mgr=None, **kwargs):
super(Controller, self).__init__(**kwargs)
self.compute_api = compute.API()
self.ext_mgr = ext_mgr
@wsgi.serializers(xml=MinimalServersTemplate)
def index(self, req):
"""Returns a list of server names and ids for a given user."""
try:
servers = self._get_servers(req, is_detail=False)
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
return servers
@wsgi.serializers(xml=ServersTemplate)
def detail(self, req):
"""Returns a list of server details for a given user."""
try:
servers = self._get_servers(req, is_detail=True)
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
return servers
def _get_servers(self, req, is_detail):
"""Returns a list of servers, based on any search options specified."""
search_opts = {}
search_opts.update(req.GET)
context = req.environ['nova.context']
remove_invalid_options(context, search_opts,
self._get_server_search_options())
# Verify search by 'status' contains a valid status.
# Convert it to filter by vm_state or task_state for compute_api.
search_opts.pop('status', None)
if 'status' in req.GET.keys():
statuses = req.GET.getall('status')
states = common.task_and_vm_state_from_status(statuses)
vm_state, task_state = states
if not vm_state and not task_state:
return {'servers': []}
search_opts['vm_state'] = vm_state
# When we search by vm state, task state will return 'default'.
# So we don't need task_state search_opt.
if 'default' not in task_state:
search_opts['task_state'] = task_state
if 'changes-since' in search_opts:
try:
parsed = timeutils.parse_isotime(search_opts['changes-since'])
except ValueError:
msg = _('Invalid changes-since value')
raise exc.HTTPBadRequest(explanation=msg)
search_opts['changes-since'] = parsed
# By default, compute's get_all() will return deleted instances.
# If an admin hasn't specified a 'deleted' search option, we need
# to filter out deleted instances by setting the filter ourselves.
# ... Unless 'changes-since' is specified, because 'changes-since'
# should return recently deleted images according to the API spec.
if 'deleted' not in search_opts:
if 'changes-since' not in search_opts:
# No 'changes-since', so we only want non-deleted servers
search_opts['deleted'] = False
if search_opts.get("vm_state") == ['deleted']:
if context.is_admin:
search_opts['deleted'] = True
else:
msg = _("Only administrators may list deleted instances")
raise exc.HTTPForbidden(explanation=msg)
# If all tenants is passed with 0 or false as the value
# then remove it from the search options. Nothing passed as
# the value for all_tenants is considered to enable the feature
all_tenants = search_opts.get('all_tenants')
if all_tenants:
try:
if not strutils.bool_from_string(all_tenants, True):
del search_opts['all_tenants']
except ValueError as err:
raise exception.InvalidInput(six.text_type(err))
if 'all_tenants' in search_opts:
policy.enforce(context, 'compute:get_all_tenants',
{'project_id': context.project_id,
'user_id': context.user_id})
del search_opts['all_tenants']
else:
if context.project_id:
search_opts['project_id'] = context.project_id
else:
search_opts['user_id'] = context.user_id
limit, marker = common.get_limit_and_marker(req)
try:
instance_list = self.compute_api.get_all(context,
search_opts=search_opts,
limit=limit,
marker=marker,
want_objects=True)
except exception.MarkerNotFound:
msg = _('marker [%s] not found') % marker
raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound:
LOG.debug("Flavor '%s' could not be found", search_opts['flavor'])
# TODO(mriedem): Move to ObjectListBase.__init__ for empty lists.
instance_list = objects.InstanceList(objects=[])
if is_detail:
instance_list.fill_faults()
response = self._view_builder.detail(req, instance_list)
else:
response = self._view_builder.index(req, instance_list)
req.cache_db_instances(instance_list)
return response
def _get_server(self, context, req, instance_uuid):
"""Utility function for looking up an instance by uuid."""
try:
instance = self.compute_api.get(context, instance_uuid,
want_objects=True)
except exception.NotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
req.cache_db_instance(instance)
return instance
def _check_string_length(self, value, name, max_length=None):
try:
if isinstance(value, six.string_types):
value = value.strip()
utils.check_string_length(value, name, min_length=1,
max_length=max_length)
except exception.InvalidInput as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
def _validate_server_name(self, value):
self._check_string_length(value, 'Server name', max_length=255)
def _get_injected_files(self, personality):
"""Create a list of injected files from the personality attribute.
At this time, injected_files must be formatted as a list of
(file_path, file_content) pairs for compatibility with the
underlying compute service.
"""
injected_files = []
for item in personality:
try:
path = item['path']
contents = item['contents']
except KeyError as key:
expl = _('Bad personality format: missing %s') % key
raise exc.HTTPBadRequest(explanation=expl)
except TypeError:
expl = _('Bad personality format')
raise exc.HTTPBadRequest(explanation=expl)
if self._decode_base64(contents) is None:
expl = _('Personality content for %s cannot be decoded') % path
raise exc.HTTPBadRequest(explanation=expl)
injected_files.append((path, contents))
return injected_files
def _get_requested_networks(self, requested_networks):
"""Create a list of requested networks from the networks attribute."""
networks = []
network_uuids = []
for network in requested_networks:
request = objects.NetworkRequest()
try:
try:
request.port_id = network.get('port', None)
except ValueError:
msg = _("Bad port format: port uuid is "
"not in proper format "
"(%s)") % network.get('port')
raise exc.HTTPBadRequest(explanation=msg)
if request.port_id:
request.network_id = None
if not utils.is_neutron():
# port parameter is only for neutron v2.0
msg = _("Unknown argument : port")
raise exc.HTTPBadRequest(explanation=msg)
else:
request.network_id = network['uuid']
if (not request.port_id and not
uuidutils.is_uuid_like(request.network_id)):
br_uuid = request.network_id.split('-', 1)[-1]
if not uuidutils.is_uuid_like(br_uuid):
msg = _("Bad networks format: network uuid is "
"not in proper format "
"(%s)") % request.network_id
raise exc.HTTPBadRequest(explanation=msg)
# fixed IP address is optional
# if the fixed IP address is not provided then
# it will use one of the available IP address from the network
try:
request.address = network.get('fixed_ip', None)
except ValueError:
msg = _("Invalid fixed IP address (%s)") % request.address
raise exc.HTTPBadRequest(explanation=msg)
# duplicate networks are allowed only for neutron v2.0
if (not utils.is_neutron() and request.network_id and
request.network_id in network_uuids):
expl = (_("Duplicate networks"
" (%s) are not allowed") %
request.network_id)
raise exc.HTTPBadRequest(explanation=expl)
network_uuids.append(request.network_id)
networks.append(request)
except KeyError as key:
expl = _('Bad network format: missing %s') % key
raise exc.HTTPBadRequest(explanation=expl)
except TypeError:
expl = _('Bad networks format')
raise exc.HTTPBadRequest(explanation=expl)
return objects.NetworkRequestList(objects=networks)
# NOTE(vish): Without this regex, b64decode will happily
# ignore illegal bytes in the base64 encoded
# data.
B64_REGEX = re.compile('^(?:[A-Za-z0-9+\/]{4})*'
'(?:[A-Za-z0-9+\/]{2}=='
'|[A-Za-z0-9+\/]{3}=)?$')
def _decode_base64(self, data):
data = re.sub(r'\s', '', data)
if not self.B64_REGEX.match(data):
return None
try:
return base64.b64decode(data)
except TypeError:
return None
def _validate_user_data(self, user_data):
"""Check if the user_data is encoded properly."""
if not user_data:
return
if self._decode_base64(user_data) is None:
expl = _('Userdata content cannot be decoded')
raise exc.HTTPBadRequest(explanation=expl)
def _validate_access_ipv4(self, address):
if not utils.is_valid_ipv4(address):
expl = _('accessIPv4 is not proper IPv4 format')
raise exc.HTTPBadRequest(explanation=expl)
def _validate_access_ipv6(self, address):
if not utils.is_valid_ipv6(address):
expl = _('accessIPv6 is not proper IPv6 format')
raise exc.HTTPBadRequest(explanation=expl)
@wsgi.serializers(xml=ServerTemplate)
def show(self, req, id):
"""Returns server details by server id."""
try:
context = req.environ['nova.context']
instance = self.compute_api.get(context, id,
want_objects=True)
req.cache_db_instance(instance)
return self._view_builder.show(req, instance)
except exception.NotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=CreateDeserializer)
def create(self, req, body):
"""Creates a new server for a given user."""
if not self.is_valid_body(body, 'server'):
raise exc.HTTPUnprocessableEntity()
context = req.environ['nova.context']
server_dict = body['server']
password = self._get_server_admin_password(server_dict)
if 'name' not in server_dict:
msg = _("Server name is not defined")
raise exc.HTTPBadRequest(explanation=msg)
name = server_dict['name']
self._validate_server_name(name)
name = name.strip()
image_uuid = self._image_from_req_data(body)
personality = server_dict.get('personality')
config_drive = None
if self.ext_mgr.is_loaded('os-config-drive'):
config_drive = server_dict.get('config_drive')
injected_files = []
if personality:
injected_files = self._get_injected_files(personality)
sg_names = []
if self.ext_mgr.is_loaded('os-security-groups'):
security_groups = server_dict.get('security_groups')
if security_groups is not None:
sg_names = [sg['name'] for sg in security_groups
if sg.get('name')]
if not sg_names:
sg_names.append('default')
sg_names = list(set(sg_names))
requested_networks = None
if (self.ext_mgr.is_loaded('os-networks')
or utils.is_neutron()):
requested_networks = server_dict.get('networks')
if requested_networks is not None:
if not isinstance(requested_networks, list):
expl = _('Bad networks format')
raise exc.HTTPBadRequest(explanation=expl)
requested_networks = self._get_requested_networks(
requested_networks)
(access_ip_v4, ) = server_dict.get('accessIPv4'),
if access_ip_v4 is not None:
self._validate_access_ipv4(access_ip_v4)
(access_ip_v6, ) = server_dict.get('accessIPv6'),
if access_ip_v6 is not None:
self._validate_access_ipv6(access_ip_v6)
try:
flavor_id = self._flavor_id_from_req_data(body)
except ValueError as error:
msg = _("Invalid flavorRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
# optional openstack extensions:
key_name = None
if self.ext_mgr.is_loaded('os-keypairs'):
key_name = server_dict.get('key_name')
user_data = None
if self.ext_mgr.is_loaded('os-user-data'):
user_data = server_dict.get('user_data')
self._validate_user_data(user_data)
availability_zone = None
if self.ext_mgr.is_loaded('os-availability-zone'):
availability_zone = server_dict.get('availability_zone')
block_device_mapping = None
block_device_mapping_v2 = None
legacy_bdm = True
if self.ext_mgr.is_loaded('os-volumes'):
block_device_mapping = server_dict.get('block_device_mapping', [])
for bdm in block_device_mapping:
try:
block_device.validate_device_name(bdm.get("device_name"))
block_device.validate_and_default_volume_size(bdm)
except exception.InvalidBDMFormat as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
if 'delete_on_termination' in bdm:
bdm['delete_on_termination'] = strutils.bool_from_string(
bdm['delete_on_termination'])
if self.ext_mgr.is_loaded('os-block-device-mapping-v2-boot'):
# Consider the new data format for block device mapping
block_device_mapping_v2 = server_dict.get(
'block_device_mapping_v2', [])
# NOTE (ndipanov): Disable usage of both legacy and new
# block device format in the same request
if block_device_mapping and block_device_mapping_v2:
expl = _('Using different block_device_mapping syntaxes '
'is not allowed in the same request.')
raise exc.HTTPBadRequest(explanation=expl)
# Assume legacy format
legacy_bdm = not bool(block_device_mapping_v2)
try:
block_device_mapping_v2 = [
block_device.BlockDeviceDict.from_api(bdm_dict)
for bdm_dict in block_device_mapping_v2]
except exception.InvalidBDMFormat as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
block_device_mapping = (block_device_mapping or
block_device_mapping_v2)
ret_resv_id = False
# min_count and max_count are optional. If they exist, they may come
# in as strings. Verify that they are valid integers and > 0.
# Also, we want to default 'min_count' to 1, and default
# 'max_count' to be 'min_count'.
min_count = 1
max_count = 1
if self.ext_mgr.is_loaded('os-multiple-create'):
ret_resv_id = server_dict.get('return_reservation_id', False)
min_count = server_dict.get('min_count', 1)
max_count = server_dict.get('max_count', min_count)
try:
min_count = utils.validate_integer(
min_count, "min_count", min_value=1)
max_count = utils.validate_integer(
max_count, "max_count", min_value=1)
except exception.InvalidInput as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
if min_count > max_count:
msg = _('min_count must be <= max_count')
raise exc.HTTPBadRequest(explanation=msg)
auto_disk_config = False
if self.ext_mgr.is_loaded('OS-DCF'):
auto_disk_config = server_dict.get('auto_disk_config')
scheduler_hints = {}
if self.ext_mgr.is_loaded('OS-SCH-HNT'):
scheduler_hints = server_dict.get('scheduler_hints', {})
check_server_group_quota = \
self.ext_mgr.is_loaded('os-server-group-quotas')
try:
_get_inst_type = flavors.get_flavor_by_flavor_id
inst_type = _get_inst_type(flavor_id, ctxt=context,
read_deleted="no")
(instances, resv_id) = self.compute_api.create(context,
inst_type,
image_uuid,
display_name=name,
display_description=name,
key_name=key_name,
metadata=server_dict.get('metadata', {}),
access_ip_v4=access_ip_v4,
access_ip_v6=access_ip_v6,
injected_files=injected_files,
admin_password=password,
min_count=min_count,
max_count=max_count,
requested_networks=requested_networks,
security_group=sg_names,
user_data=user_data,
availability_zone=availability_zone,
config_drive=config_drive,
block_device_mapping=block_device_mapping,
auto_disk_config=auto_disk_config,
scheduler_hints=scheduler_hints,
legacy_bdm=legacy_bdm,
check_server_group_quota=check_server_group_quota)
except (exception.QuotaError,
exception.PortLimitExceeded) as error:
raise exc.HTTPForbidden(
explanation=error.format_message(),
headers={'Retry-After': 0})
except exception.InvalidMetadataSize as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.format_message())
except exception.ImageNotFound as error:
msg = _("Can not find requested image")
raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound as error:
msg = _("Invalid flavorRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.KeypairNotFound as error:
msg = _("Invalid key_name provided.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.ConfigDriveInvalidValue:
msg = _("Invalid config_drive provided.")
raise exc.HTTPBadRequest(explanation=msg)
except messaging.RemoteError as err:
msg = "%(err_type)s: %(err_msg)s" % {'err_type': err.exc_type,
'err_msg': err.value}
raise exc.HTTPBadRequest(explanation=msg)
except UnicodeDecodeError as error:
msg = "UnicodeError: %s" % unicode(error)
raise exc.HTTPBadRequest(explanation=msg)
except (exception.ImageNotActive,
exception.FlavorDiskTooSmall,
exception.FlavorMemoryTooSmall,
exception.NetworkNotFound,
exception.PortNotFound,
exception.FixedIpAlreadyInUse,
exception.SecurityGroupNotFound,
exception.InstanceUserDataTooLarge,
exception.InstanceUserDataMalformed) as error:
raise exc.HTTPBadRequest(explanation=error.format_message())
except (exception.ImageNUMATopologyIncomplete,
exception.ImageNUMATopologyForbidden,
exception.ImageNUMATopologyAsymmetric,
exception.ImageNUMATopologyCPUOutOfRange,
exception.ImageNUMATopologyCPUDuplicates,
exception.ImageNUMATopologyCPUsUnassigned,
exception.ImageNUMATopologyMemoryOutOfRange) as error:
raise exc.HTTPBadRequest(explanation=error.format_message())
except (exception.PortInUse,
exception.NoUniqueMatch) as error:
raise exc.HTTPConflict(explanation=error.format_message())
except exception.Invalid as error:
raise exc.HTTPBadRequest(explanation=error.format_message())
# If the caller wanted a reservation_id, return it
if ret_resv_id:
return wsgi.ResponseObject({'reservation_id': resv_id},
xml=ServerMultipleCreateTemplate)
req.cache_db_instances(instances)
server = self._view_builder.create(req, instances[0])
if CONF.enable_instance_password:
server['server']['adminPass'] = password
robj = wsgi.ResponseObject(server)
return self._add_location(robj)
def _delete(self, context, req, instance_uuid):
instance = self._get_server(context, req, instance_uuid)
if CONF.reclaim_instance_interval:
try:
self.compute_api.soft_delete(context, instance)
except exception.InstanceInvalidState:
# Note(yufang521247): instance which has never been active
# is not allowed to be soft_deleted. Thus we have to call
# delete() to clean up the instance.
self.compute_api.delete(context, instance)
else:
self.compute_api.delete(context, instance)
@wsgi.serializers(xml=ServerTemplate)
def update(self, req, id, body):
"""Update server then pass on to version-specific controller."""
if not self.is_valid_body(body, 'server'):
raise exc.HTTPUnprocessableEntity()
ctxt = req.environ['nova.context']
update_dict = {}
if 'name' in body['server']:
name = body['server']['name']
self._validate_server_name(name)
update_dict['display_name'] = name.strip()
if 'accessIPv4' in body['server']:
access_ipv4 = body['server']['accessIPv4']
if access_ipv4:
self._validate_access_ipv4(access_ipv4)
update_dict['access_ip_v4'] = (
access_ipv4 and access_ipv4.strip() or None)
if 'accessIPv6' in body['server']:
access_ipv6 = body['server']['accessIPv6']
if access_ipv6:
self._validate_access_ipv6(access_ipv6)
update_dict['access_ip_v6'] = (
access_ipv6 and access_ipv6.strip() or None)
if 'auto_disk_config' in body['server']:
auto_disk_config = strutils.bool_from_string(
body['server']['auto_disk_config'])
update_dict['auto_disk_config'] = auto_disk_config
if 'hostId' in body['server']:
msg = _("HostId cannot be updated.")
raise exc.HTTPBadRequest(explanation=msg)
if 'personality' in body['server']:
msg = _("Personality cannot be updated.")
raise exc.HTTPBadRequest(explanation=msg)
try:
instance = self.compute_api.get(ctxt, id,
want_objects=True)
req.cache_db_instance(instance)
policy.enforce(ctxt, 'compute:update', instance)
instance.update(update_dict)
instance.save()
except exception.NotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
return self._view_builder.show(req, instance)
@wsgi.response(204)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('confirmResize')
def _action_confirm_resize(self, req, id, body):
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
try:
self.compute_api.confirm_resize(context, instance)
except exception.MigrationNotFound:
msg = _("Instance has not been resized.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'confirmResize')
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('revertResize')
def _action_revert_resize(self, req, id, body):
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
try:
self.compute_api.revert_resize(context, instance)
except exception.MigrationNotFound:
msg = _("Instance has not been resized.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound:
msg = _("Flavor used by the instance could not be found.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'revertResize')
return webob.Response(status_int=202)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('reboot')
def _action_reboot(self, req, id, body):
if 'reboot' in body and 'type' in body['reboot']:
if not isinstance(body['reboot']['type'], six.string_types):
msg = _("Argument 'type' for reboot must be a string")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
valid_reboot_types = ['HARD', 'SOFT']
reboot_type = body['reboot']['type'].upper()
if not valid_reboot_types.count(reboot_type):
msg = _("Argument 'type' for reboot is not HARD or SOFT")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
else:
msg = _("Missing argument 'type' for reboot")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
try:
self.compute_api.reboot(context, instance, reboot_type)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'reboot')
return webob.Response(status_int=202)
def _resize(self, req, instance_id, flavor_id, **kwargs):
"""Begin the resize process with given instance/flavor."""
context = req.environ["nova.context"]
instance = self._get_server(context, req, instance_id)
try:
self.compute_api.resize(context, instance, flavor_id, **kwargs)
except exception.QuotaError as error:
raise exc.HTTPForbidden(
explanation=error.format_message(),
headers={'Retry-After': 0})
except exception.FlavorNotFound:
msg = _("Unable to locate requested flavor.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.CannotResizeToSameFlavor:
msg = _("Resize requires a flavor change.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.CannotResizeDisk as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'resize')
except exception.ImageNotAuthorized:
msg = _("You are not authorized to access the image "
"the instance was started with.")
raise exc.HTTPUnauthorized(explanation=msg)
except exception.ImageNotFound:
msg = _("Image that the instance was started "
"with could not be found.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.Invalid:
msg = _("Invalid instance image.")
raise exc.HTTPBadRequest(explanation=msg)
except (exception.NoValidHost,
exception.AutoDiskConfigDisabledByImage) as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
return webob.Response(status_int=202)
@wsgi.response(204)
def delete(self, req, id):
"""Destroys a server."""
try:
self._delete(req.environ['nova.context'], req, id)
except exception.NotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'delete')
def _image_ref_from_req_data(self, data):
try:
return unicode(data['server']['imageRef'])
except (TypeError, KeyError):
msg = _("Missing imageRef attribute")
raise exc.HTTPBadRequest(explanation=msg)
def _image_uuid_from_href(self, image_href):
if not image_href:
msg = _("Invalid imageRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
# If the image href was generated by nova api, strip image_href
# down to an id and use the default glance connection params
image_uuid = image_href.split('/').pop()
if not uuidutils.is_uuid_like(image_uuid):
msg = _("Invalid imageRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
return image_uuid
def _image_from_req_data(self, data):
"""Get image data from the request or raise appropriate
exceptions
If no image is supplied - checks to see if there is
block devices set and proper extesions loaded.
"""
image_ref = data['server'].get('imageRef')
bdm = data['server'].get('block_device_mapping')
bdm_v2 = data['server'].get('block_device_mapping_v2')
if (not image_ref and (
(bdm and self.ext_mgr.is_loaded('os-volumes')) or
(bdm_v2 and
self.ext_mgr.is_loaded('os-block-device-mapping-v2-boot')))):
return ''
else:
image_href = self._image_ref_from_req_data(data)
image_uuid = self._image_uuid_from_href(image_href)
return image_uuid
def _flavor_id_from_req_data(self, data):
try:
flavor_ref = data['server']['flavorRef']
except (TypeError, KeyError):
msg = _("Missing flavorRef attribute")
raise exc.HTTPBadRequest(explanation=msg)
return common.get_id_from_href(flavor_ref)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('changePassword')
def _action_change_password(self, req, id, body):
context = req.environ['nova.context']
if ('changePassword' not in body
or 'adminPass' not in body['changePassword']):
msg = _("No adminPass was specified")
raise exc.HTTPBadRequest(explanation=msg)
password = self._get_server_admin_password(body['changePassword'])
server = self._get_server(context, req, id)
try:
self.compute_api.set_admin_password(context, server, password)
except NotImplementedError:
msg = _("Unable to set password on instance")
raise exc.HTTPNotImplemented(explanation=msg)
return webob.Response(status_int=202)
def _validate_metadata(self, metadata):
"""Ensure that we can work with the metadata given."""
try:
metadata.iteritems()
except AttributeError:
msg = _("Unable to parse metadata key/value pairs.")
LOG.debug(msg)
raise exc.HTTPBadRequest(explanation=msg)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('resize')
def _action_resize(self, req, id, body):
"""Resizes a given instance to the flavor size requested."""
try:
flavor_ref = str(body["resize"]["flavorRef"])
if not flavor_ref:
msg = _("Resize request has invalid 'flavorRef' attribute.")
raise exc.HTTPBadRequest(explanation=msg)
except (KeyError, TypeError):
msg = _("Resize requests require 'flavorRef' attribute.")
raise exc.HTTPBadRequest(explanation=msg)
kwargs = {}
if 'auto_disk_config' in body['resize']:
kwargs['auto_disk_config'] = body['resize']['auto_disk_config']
return self._resize(req, id, flavor_ref, **kwargs)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('rebuild')
def _action_rebuild(self, req, id, body):
"""Rebuild an instance with the given attributes."""
body = body['rebuild']
try:
image_href = body["imageRef"]
except (KeyError, TypeError):
msg = _("Could not parse imageRef from request.")
raise exc.HTTPBadRequest(explanation=msg)
image_href = self._image_uuid_from_href(image_href)
password = self._get_server_admin_password(body)
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
attr_map = {
'personality': 'files_to_inject',
'name': 'display_name',
'accessIPv4': 'access_ip_v4',
'accessIPv6': 'access_ip_v6',
'metadata': 'metadata',
'auto_disk_config': 'auto_disk_config',
}
kwargs = {}
# take the preserve_ephemeral value into account only when the
# corresponding extension is active
if (self.ext_mgr.is_loaded('os-preserve-ephemeral-rebuild')
and 'preserve_ephemeral' in body):
kwargs['preserve_ephemeral'] = strutils.bool_from_string(
body['preserve_ephemeral'], strict=True)
if 'accessIPv4' in body:
self._validate_access_ipv4(body['accessIPv4'])
if 'accessIPv6' in body:
self._validate_access_ipv6(body['accessIPv6'])
if 'name' in body:
self._validate_server_name(body['name'])
for request_attribute, instance_attribute in attr_map.items():
try:
kwargs[instance_attribute] = body[request_attribute]
except (KeyError, TypeError):
pass
self._validate_metadata(kwargs.get('metadata', {}))
if 'files_to_inject' in kwargs:
personality = kwargs.pop('files_to_inject')
files_to_inject = self._get_injected_files(personality)
else:
files_to_inject = None
try:
self.compute_api.rebuild(context,
instance,
image_href,
password,
files_to_inject=files_to_inject,
**kwargs)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'rebuild')
except exception.InstanceNotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
except exception.InvalidMetadataSize as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.format_message())
except exception.ImageNotFound:
msg = _("Cannot find image for rebuild")
raise exc.HTTPBadRequest(explanation=msg)
except exception.QuotaError as error:
raise exc.HTTPForbidden(explanation=error.format_message())
except (exception.ImageNotActive,
exception.FlavorDiskTooSmall,
exception.FlavorMemoryTooSmall,
exception.InvalidMetadata,
exception.AutoDiskConfigDisabledByImage) as error:
raise exc.HTTPBadRequest(explanation=error.format_message())
instance = self._get_server(context, req, id)
view = self._view_builder.show(req, instance)
# Add on the adminPass attribute since the view doesn't do it
# unless instance passwords are disabled
if CONF.enable_instance_password:
view['server']['adminPass'] = password
robj = wsgi.ResponseObject(view)
return self._add_location(robj)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('createImage')
@common.check_snapshots_enabled
def _action_create_image(self, req, id, body):
"""Snapshot a server instance."""
context = req.environ['nova.context']
entity = body.get("createImage", {})
image_name = entity.get("name")
if not image_name:
msg = _("createImage entity requires name attribute")
raise exc.HTTPBadRequest(explanation=msg)
props = {}
metadata = entity.get('metadata', {})
common.check_img_metadata_properties_quota(context, metadata)
try:
props.update(metadata)
except ValueError:
msg = _("Invalid metadata")
raise exc.HTTPBadRequest(explanation=msg)
instance = self._get_server(context, req, id)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
try:
if self.compute_api.is_volume_backed_instance(context, instance,
bdms):
img = instance['image_ref']
if not img:
properties = bdms.root_metadata(
context, self.compute_api.image_api,
self.compute_api.volume_api)
image_meta = {'properties': properties}
else:
image_meta = self.compute_api.image_api.get(context, img)
image = self.compute_api.snapshot_volume_backed(
context,
instance,
image_meta,
image_name,
extra_properties=props)
else:
image = self.compute_api.snapshot(context,
instance,
image_name,
extra_properties=props)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'createImage')
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
# build location of newly-created image entity
image_id = str(image['id'])
url_prefix = self._view_builder._update_glance_link_prefix(
req.application_url)
image_ref = os.path.join(url_prefix,
context.project_id,
'images',
image_id)
resp = webob.Response(status_int=202)
resp.headers['Location'] = image_ref
return resp
def _get_server_admin_password(self, server):
"""Determine the admin password for a server on creation."""
try:
password = server['adminPass']
self._validate_admin_password(password)
except KeyError:
password = utils.generate_password()
except ValueError:
raise exc.HTTPBadRequest(explanation=_("Invalid adminPass"))
return password
def _validate_admin_password(self, password):
if not isinstance(password, six.string_types):
raise ValueError()
def _get_server_search_options(self):
"""Return server search options allowed by non-admin."""
return ('reservation_id', 'name', 'status', 'image', 'flavor',
'ip', 'changes-since', 'all_tenants')
def create_resource(ext_mgr):
return wsgi.Resource(Controller(ext_mgr))
def remove_invalid_options(context, search_options, allowed_search_options):
"""Remove search options that are not valid for non-admin API/context."""
if context.is_admin:
# Allow all options
return
# Otherwise, strip out all unknown options
unknown_options = [opt for opt in search_options
if opt not in allowed_search_options]
LOG.debug("Removing options '%s' from query",
", ".join(unknown_options))
for opt in unknown_options:
search_options.pop(opt, None)
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow collective Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import gen_collective_ops
def all_reduce(t,
group_size,
group_key,
instance_key,
merge_op='Add',
final_op='Id',
subdiv_offsets=(0,),
communication_hint='auto',
timeout=0):
"""Reduces tensors collectively, across devices.
Args:
t: the tensor to be reduced.
group_size: the total number of tensors to be collectively reduced.
Each must reside on a different device. Should be a positive integer.
group_key: an integer identifying the group of devices.
instance_key: an integer identifying the participating group of Ops.
merge_op: string naming the binary Op to be applied to compute each
partial reduction.
final_op: string naming the unary Op to be applied to each fully
reduced value. Can be 'Id' for no operation.
subdiv_offsets: a list of integer offsets into the tensor at which each
independent subdivision should begin. Use [0] if no subdivision should
be done.
communication_hint: preferred collective communication. The implementation
may fall back to another mechanism. Options include `auto`, `ring`, and
`nccl`.
timeout: a float. If set to a non zero, set a completion timeout to detect
staleness. If the timer goes off, a DeadlineExceededError is raised. The
timeout value in seconds. This feature is experimental.
Returns:
An Op implementing the distributed reduction.
Raises:
ValueError: if any of the input parameter constraints are not met.
"""
if group_size < 1:
raise ValueError('Parameter group_size to all_reduce must be at least 1.')
return gen_collective_ops.collective_reduce(
t,
group_size=group_size,
group_key=group_key,
instance_key=instance_key,
merge_op=merge_op,
final_op=final_op,
subdiv_offsets=subdiv_offsets,
communication_hint=communication_hint.lower(),
timeout_seconds=timeout)
def all_reduce_v2(t,
group_size,
group_key,
instance_key,
merge_op='Add',
final_op='Id',
communication_hint='auto',
timeout=0,
ordering_token=None):
"""Reduces tensors collectively, across devices.
Args:
t: the tensor to be reduced.
group_size: an int32 tensor. The total number of tensors to be collectively
reduced. Each must reside on a different device. Should be a positive
integer.
group_key: an int32 tensor identifying the group of devices.
instance_key: an int32 tensor identifying the participating group of Ops.
merge_op: string naming the binary Op to be applied to compute each partial
reduction.
final_op: string naming the unary Op to be applied to each fully reduced
value. Can be 'Id' for no operation.
communication_hint: preferred collective communication. The implementation
may fall back to another mechanism. Options include `auto`, `ring`, and
`nccl`.
timeout: a float. If set to a non zero, set a completion timeout to detect
staleness. If the timer goes off, a DeadlineExceededError is raised. The
timeout value in seconds. This feature is experimental.
ordering_token: an optional resource tensor to pass to the op as inputs.
They aren't used by the kernel but allow AutoControlDependency to order
the collectives with control dependencies.
Returns:
An Op implementing the distributed reduction.
"""
if ordering_token is not None:
ordering_token = [ordering_token]
return gen_collective_ops.collective_reduce_v2(
t,
group_size=group_size,
group_key=group_key,
instance_key=instance_key,
merge_op=merge_op,
final_op=final_op,
communication_hint=communication_hint.lower(),
timeout_seconds=timeout,
ordering_token=ordering_token or [])
def all_gather(t,
group_size,
group_key,
instance_key,
communication_hint='auto',
timeout=0):
"""Accumulates tensors collectively, across devices, along first dimension.
Args:
t: the tensor to participate in the accumulation.
group_size: the total number of tensors to be collectively accumulated.
Each must reside on a different device. Should be a positive integer.
group_key: an integer identifying the group of devices.
instance_key: an integer identifying the participating group of Ops.
communication_hint: preferred collective communication. The implementation
may fall back to another mechanism. Options include `auto`, `ring`, and
`nccl`.
timeout: a float. If set to a non zero, set a completion timeout to detect
staleness. If the timer goes off, a DeadlineExceededError is raised. The
timeout value in seconds. This feature is experimental.
Returns:
An Op implementing the distributed operation.
Raises:
ValueError: if any of the input parameter constraints are not met.
"""
if group_size < 1:
raise ValueError('Parameter group_size to all_gather must be at least 1.')
return gen_collective_ops.collective_gather(
t,
shape=[0],
group_size=group_size,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication_hint.lower(),
timeout_seconds=timeout)
def all_gather_v2(t,
group_size,
group_key,
instance_key,
communication_hint='auto',
timeout=0,
ordering_token=None):
"""Accumulates tensors collectively, across devices, along first dimension.
Args:
t: the tensor to participate in the accumulation.
group_size: an int32 tensor, the total number of tensors to be collectively
accumulated. Each must reside on a different device. Should be a positive
integer.
group_key: an int32 tensor identifying the group of devices.
instance_key: an int32 tensor identifying the participating group of Ops.
communication_hint: preferred collective communication. The implementation
may fall back to another mechanism. Options include `auto`, `ring`, and
`nccl`.
timeout: a float. If set to a non zero, set a completion timeout to detect
staleness. If the timer goes off, a DeadlineExceededError is raised. The
timeout value in seconds. This feature is experimental.
ordering_token: an optional resource tensor to pass to the op as inputs.
They aren't used by the kernel but allow AutoControlDependency to order
the collectives with control dependencies.
Returns:
An Op implementing the distributed operation.
"""
if ordering_token is not None:
ordering_token = [ordering_token]
return gen_collective_ops.collective_gather_v2(
t,
group_size=group_size,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication_hint.lower(),
timeout_seconds=timeout,
ordering_token=ordering_token or [])
def broadcast_send(t,
shape,
dtype,
group_size,
group_key,
instance_key,
communication_hint='auto',
timeout=0):
"""Broadcasts one tensor to a group of others, across devices.
Args:
t: the tensor to be sent.
shape: the shape of the tensor being sent, which must agree with t.
dtype: the type of the tensor being sent, which must agree with t.
group_size: one plus the number of receiving tensors, i.e. the total
number of devices participating. Each tensor must reside on a
different device.
group_key: an integer identifying the group of devices.
instance_key: an integer identifying the participating group of Ops.
communication_hint: preferred collective communication. The implementation
may fall back to another mechanism. Options include `auto`, `ring`, and
`nccl`.
timeout: If set to a non zero, set a completion timeout to detect staleness.
If the timer goes off, a DeadlineExceededError is raised.
The timeout value in seconds. This feature is experimental.
Returns:
An Op implementing the distributed broadcast send.
Raises:
ValueError: if any of the input parameter constraints are not met.
Note that the shape and dtype arguments appear redundant since they
should be obtainable from t. The are two reasons for including
them. First, the shape and type of tensors passed via broadcast must
be known ahead of time in their most specific form so that the receive
side can allocate memory for the operation and shape/type inference can
carry forward from there. Including the same declarations on the
send side clarifies a commitment already made. Secondly, having nearly
identical use syntax for send and receive sides may simplify tool-driven
generation of broadcast.
"""
if group_size <= 1:
raise ValueError(
'Parameter group_size to broadcast_send must be at least 2.')
if t.shape != shape:
raise ValueError(
'Shape of broadcast_send tensor not equal to declared shape')
if t.dtype != dtype:
raise ValueError(
'Type of broadcast_send tensor not equal to declared type')
return gen_collective_ops.collective_bcast_send(
t,
shape=shape,
group_size=group_size,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication_hint.lower(),
timeout_seconds=timeout)
def broadcast_send_v2(t,
group_size,
group_key,
instance_key,
communication_hint='auto',
timeout=0):
"""Broadcasts one tensor to a group of others, across devices.
Args:
t: the tensor to be sent.
group_size: an int32 tensor. One plus the number of receiving tensors, i.e.
the total number of devices participating. Each tensor must reside on a
different device.
group_key: an int32 tensor identifying the group of devices.
instance_key: an int32 tensor identifying the participating group of Ops.
communication_hint: preferred collective communication. The implementation
may fall back to another mechanism. Options include `auto`, `ring`, and
`nccl`.
timeout: If set to a non zero, set a completion timeout to detect staleness.
If the timer goes off, a DeadlineExceededError is raised.
The timeout value in seconds. This feature is experimental.
Returns:
An Op implementing the distributed broadcast send.
"""
return gen_collective_ops.collective_bcast_send_v2(
t,
group_size=group_size,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication_hint.lower(),
timeout_seconds=timeout)
def broadcast_recv(shape,
dtype,
group_size,
group_key,
instance_key,
communication_hint='auto',
timeout=0):
"""Receives a broadcasts tensor, across devices.
Args:
shape: Shape of the tensor to be received.
dtype: Type of the tensor to be received.
group_size: one plus the number of receiving tensors, i.e. the total
number of devices participating. Each tensor must reside on a
different device.
group_key: an integer identifying the group of devices.
instance_key: an integer identifying the participating group of Ops.
communication_hint: preferred collective communication. The implementation
may fall back to another mechanism. Options include `auto`, `ring`, and
`nccl`.
timeout: If set to a non zero, set a completion timeout to detect staleness.
If the timer goes off, a DeadlineExceededError is raised.
The timeout value in seconds. This feature is experimental.
Returns:
An Op implementing the broadcast receive.
Raises:
ValueError: if any of the input parameter constraints are not met.
"""
if group_size <= 1:
raise ValueError(
'Parameter group_size to broadcast_send must be at least 2.')
return gen_collective_ops.collective_bcast_recv(
shape=shape,
T=dtype,
group_size=group_size,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication_hint.lower(),
timeout_seconds=timeout)
def broadcast_recv_v2(shape,
dtype,
group_size,
group_key,
instance_key,
communication_hint='auto',
timeout=0):
"""Receives a broadcasts tensor, across devices.
Args:
shape: an int tensor. Shape of the tensor to be received.
dtype: Type of the tensor to be received.
group_size: an int32 tensor. One plus the number of receiving tensors, i.e.
the total number of devices participating. Each tensor must reside on a
different device.
group_key: an int32 tensor identifying the group of devices.
instance_key: an int32 tensor identifying the participating group of Ops.
communication_hint: preferred collective communication. The implementation
may fall back to another mechanism. Options include `auto`, `ring`, and
`nccl`.
timeout: If set to a non zero, set a completion timeout to detect staleness.
If the timer goes off, a DeadlineExceededError is raised.
The timeout value in seconds. This feature is experimental.
Returns:
An Op implementing the broadcast receive.
"""
return gen_collective_ops.collective_bcast_recv_v2(
T=dtype,
group_size=group_size,
group_key=group_key,
instance_key=instance_key,
shape=shape,
communication_hint=communication_hint.lower(),
timeout_seconds=timeout)
|
|
# -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
# Copyright (c) 2017, SLAC National Laboratory / Kisensum Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation
# are those of the authors and should not be interpreted as representing
# official policies, either expressed or implied, of the FreeBSD
# Project.
#
# This material was prepared as an account of work sponsored by an
# agency of the United States Government. Neither the United States
# Government nor the United States Department of Energy, nor SLAC / Kisensum,
# nor any of their employees, nor any jurisdiction or organization that
# has cooperated in the development of these materials, makes any
# warranty, express or implied, or assumes any legal liability or
# responsibility for the accuracy, completeness, or usefulness or any
# information, apparatus, product, software, or process disclosed, or
# represents that its use would not infringe privately owned rights.
#
# Reference herein to any specific commercial product, process, or
# service by trade name, trademark, manufacturer, or otherwise does not
# necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# SLAC / Kisensum. The views and opinions of authors
# expressed herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# }}}
import bisect
from datetime import timedelta
import fnmatch
import gevent
import logging
import sys
from zmq.utils import jsonapi
from volttron.platform.vip.agent import Agent, RPC
from volttron.platform.agent import utils
from driver import DriverAgent
from driver_locks import configure_socket_lock, configure_publish_lock
from interfaces import DriverInterfaceError
utils.setup_logging()
_log = logging.getLogger(__name__)
__version__ = '1.0'
class OverrideError(DriverInterfaceError):
pass
def simulation_driver_agent(config_path, **kwargs):
def get_config(name, default=None):
try:
return kwargs.pop(name)
except KeyError:
return config.get(name, default)
config = utils.load_config(config_path)
return SimulationDriverAgent(get_config('driver_config_list'),
get_config('driver_scrape_interval', 0.02),
heartbeat_autostart=True,
**kwargs)
class SimulationDriverAgent(Agent):
"""
Driver Agent for simulation interfaces.
SimulationDriverAgent is a simplified copy of MasterDriverAgent.
Its strategy for scheduling device-driver scrapes attempts to match that of the Master Driver.
Please see services.core.MasterDriverAgent.master_driver.agent.py for additional commentary
about this agent's implementation.
"""
def __init__(self, driver_config_list, driver_scrape_interval=0.02, **kwargs):
super(SimulationDriverAgent, self).__init__(**kwargs)
self.instances = {}
try:
self.driver_scrape_interval = float(driver_scrape_interval)
except ValueError:
self.driver_scrape_interval = 0.02
self.freed_time_slots = []
self._name_map = {}
self._override_devices = set()
self._override_patterns = None
self._override_interval_events = {}
self.default_config = {"driver_scrape_interval": driver_scrape_interval}
self.vip.config.set_default("config", self.default_config)
self.vip.config.subscribe(self.configure_main, actions=["NEW", "UPDATE"], pattern="config")
self.vip.config.subscribe(self.update_driver, actions=["NEW", "UPDATE"], pattern="devices/*")
self.vip.config.subscribe(self.remove_driver, actions="DELETE", pattern="devices/*")
def configure_main(self, config_name, action, contents):
config = self.default_config.copy()
config.update(contents)
if action == "NEW":
try:
configure_socket_lock()
configure_publish_lock(10000)
except ValueError as e:
_log.error("ERROR PROCESSING STARTUP CRITICAL CONFIGURATION SETTINGS: {}".format(e))
_log.error("SIMULATION DRIVER SHUTTING DOWN")
sys.exit(1)
self.update_override_patterns()
self.update_scrape_schedule(config)
def update_override_patterns(self):
if self._override_patterns is None:
try:
values = self.vip.config.get("override_patterns")
values = jsonapi.loads(values)
if isinstance(values, dict):
self._override_patterns = set()
for pattern, end_time in values.items():
# check the end_time
now = utils.get_aware_utc_now()
# If end time is indefinite, set override with indefinite duration
if end_time == "0.0":
self._set_override_on(pattern, 0.0, from_config_store=True)
else:
end_time = utils.parse_timestamp_string(end_time)
# If end time > current time, set override with new duration
if end_time > now:
delta = end_time - now
self._set_override_on(pattern, delta.total_seconds(), from_config_store=True)
else:
self._override_patterns = set()
except KeyError:
self._override_patterns = set()
except ValueError:
_log.error("Override patterns is not set correctly in config store")
self._override_patterns = set()
def update_scrape_schedule(self, config):
try:
driver_scrape_interval = float(config["driver_scrape_interval"])
except ValueError as e:
_log.error("ERROR PROCESSING CONFIGURATION: {}".format(e))
_log.error("Master driver scrape interval settings unchanged")
driver_scrape_interval = None
if self.driver_scrape_interval != driver_scrape_interval:
self.driver_scrape_interval = driver_scrape_interval
_log.info("Setting time delta between driver device scrapes to " + str(driver_scrape_interval))
# Reset all scrape schedules
self.freed_time_slots = []
time_slot = 0
for driver in self.instances.itervalues():
driver.update_scrape_schedule(time_slot, self.driver_scrape_interval)
time_slot += 1
def stop_driver(self, device_topic):
real_name = self._name_map.pop(device_topic.lower(), device_topic)
driver = self.instances.pop(real_name, None)
if driver:
_log.info("Stopping driver: {}".format(real_name))
try:
driver.core.stop(timeout=5.0)
except StandardError as e:
_log.error("Failure during {} driver shutdown: {}".format(real_name, e))
bisect.insort(self.freed_time_slots, driver.time_slot)
def update_driver(self, config_name, action, contents):
topic = self.derive_device_topic(config_name)
self.stop_driver(topic)
slot = self.freed_time_slots.pop(0) if self.freed_time_slots else len(self.instances)
_log.info("Starting driver: {}".format(topic))
driver = DriverAgent(self, contents, slot, self.driver_scrape_interval, topic)
gevent.spawn(driver.core.run)
self.instances[topic] = driver
self._name_map[topic.lower()] = topic
self._update_override_state(topic, 'add')
def remove_driver(self, config_name, action, contents):
topic = self.derive_device_topic(config_name)
self.stop_driver(topic)
self._update_override_state(topic, 'remove')
@staticmethod
def derive_device_topic(config_name):
_, topic = config_name.split('/', 1)
return topic
@RPC.export
def get_point(self, path, point_name, **kwargs):
return self.instances[path].get_point(point_name, **kwargs)
@RPC.export
def set_point(self, path, point_name, value, **kwargs):
if path in self._override_devices:
raise OverrideError(
"Cannot set point on device {} since global override is set".format(path))
else:
return self.instances[path].set_point(point_name, value, **kwargs)
@RPC.export
def scrape_all(self, path):
return self.instances[path].scrape_all()
@RPC.export
def get_multiple_points(self, path, point_names, **kwargs):
return self.instances[path].get_multiple_points(point_names, **kwargs)
@RPC.export
def set_multiple_points(self, path, point_names_values, **kwargs):
if path in self._override_devices:
raise OverrideError(
"Cannot set point on device {} since global override is set".format(path))
else:
return self.instances[path].set_multiple_points(point_names_values, **kwargs)
@RPC.export
def heart_beat(self):
_log.debug("sending heartbeat")
for device in self.instances.values():
device.heart_beat()
@RPC.export
def revert_point(self, path, point_name, **kwargs):
if path in self._override_devices:
raise OverrideError(
"Cannot revert point on device {} since global override is set".format(path))
else:
self.instances[path].revert_point(point_name, **kwargs)
@RPC.export
def revert_device(self, path, **kwargs):
if path in self._override_devices:
raise OverrideError(
"Cannot revert device {} since global override is set".format(path))
else:
self.instances[path].revert_all(**kwargs)
@RPC.export
def set_override_on(self, pattern, duration=0.0, failsafe_revert=True, staggered_revert=False):
self._set_override_on(pattern, duration, failsafe_revert, staggered_revert)
def _set_override_on(self,
pattern,
duration=0.0,
failsafe_revert=True,
staggered_revert=False,
from_config_store=False):
stagger_interval = 0.05 # sec
pattern = pattern.lower()
# Add to override patterns set
self._override_patterns.add(pattern)
device_topic_actual = self.instances.keys()
i = 0
for name in device_topic_actual:
name = name.lower()
i += 1
if fnmatch.fnmatch(name, pattern):
# If revert to default state is needed
if failsafe_revert:
if staggered_revert:
self.core.spawn_later(i*stagger_interval, self.instances[name].revert_all())
else:
self.core.spawn(self.instances[name].revert_all())
# Set override
self._override_devices.add(name)
# Set timer for interval of override condition
config_update = self._update_override_interval(duration, pattern)
if config_update and not from_config_store:
# Update config store
patterns = dict()
for pat in self._override_patterns:
if self._override_interval_events[pat] is None:
patterns[pat] = str(0.0)
else:
evt, end_time = self._override_interval_events[pat]
patterns[pat] = utils.format_timestamp(end_time)
self.vip.config.set("override_patterns", jsonapi.dumps(patterns))
@RPC.export
def set_override_off(self, pattern):
return self._set_override_off(pattern)
@RPC.export
def get_override_devices(self):
return list(self._override_devices)
@RPC.export
def clear_overrides(self):
for pattern, evt in self._override_interval_events.items():
if evt is not None:
evt[0].cancel()
self._override_interval_events.clear()
self._override_devices.clear()
self._override_patterns.clear()
self.vip.config.set("override_patterns", {})
@RPC.export
def get_override_patterns(self):
return list(self._override_patterns)
def _set_override_off(self, pattern):
pattern = pattern.lower()
# If pattern exactly matches
if pattern in self._override_patterns:
self._override_patterns.discard(pattern)
# Cancel any pending override events
self._cancel_override_events(pattern)
self._override_devices.clear()
patterns = dict()
# Build override devices list again
for pat in self._override_patterns:
for device in self.instances:
device = device.lower()
if fnmatch.fnmatch(device, pat):
self._override_devices.add(device)
if self._override_interval_events[pat] is None:
patterns[pat] = str(0.0)
else:
evt, end_time = self._override_interval_events[pat]
patterns[pat] = utils.format_timestamp(end_time)
self.vip.config.set("override_patterns", jsonapi.dumps(patterns))
else:
_log.error("Override Pattern did not match!")
raise OverrideError(
"Pattern {} does not exist in list of override patterns".format(pattern))
def _update_override_interval(self, interval, pattern):
if interval <= 0.0: # indicative of indefinite duration
if pattern in self._override_interval_events:
# If override duration is indifinite, do nothing
if self._override_interval_events[pattern] is None:
return False
else:
# Cancel the old event
evt = self._override_interval_events.pop(pattern)
evt[0].cancel()
self._override_interval_events[pattern] = None
return True
else:
override_start = utils.get_aware_utc_now()
override_end = override_start + timedelta(seconds=interval)
if pattern in self._override_interval_events:
evt = self._override_interval_events[pattern]
# If event is indefinite or greater than new end time, do nothing
if evt is None or override_end < evt[1]:
return False
else:
evt = self._override_interval_events.pop(pattern)
evt[0].cancel()
# Schedule new override event
event = self.core.schedule(override_end, self._cancel_override, pattern)
self._override_interval_events[pattern] = (event, override_end)
return True
def _cancel_override_events(self, pattern):
if pattern in self._override_interval_events:
# Cancel the override cancellation timer event
evt = self._override_interval_events.pop(pattern, None)
if evt is not None:
evt[0].cancel()
def _cancel_override(self, pattern):
self._set_override_off(pattern)
def _update_override_state(self, device, state):
device = device.lower()
if state == 'add':
# If device falls under the existing overriden patterns, then add it to list of overriden devices.
for pattern in self._override_patterns:
if fnmatch.fnmatch(device, pattern):
self._override_devices.add(device)
return
else:
# If device is in list of overriden devices, remove it.
if device in self._override_devices:
self._override_devices.remove(device)
def main(argv=sys.argv):
"""Main method called to start the agent."""
utils.vip_main(simulation_driver_agent, identity='simulation.driver', version=__version__)
if __name__ == '__main__':
# Entry point for script
try:
sys.exit(main())
except KeyboardInterrupt:
pass
|
|
# coding: utf-8
from __future__ import (absolute_import,
division, print_function)
import six
# TODO : make a s object for strings with split(regex|iterable),
# replace(regex|iterable)
# TODO : flags can be passed as strings. Ex: s.search('regex', flags='ig')
# TODO : make s.search(regex) return a wrapper with __bool__ evaluating to
# false if no match instead of None and allow default value for group(x)
# also allow match[1] to return group(1) and match['foo'] to return
# groupdict['foo']
# TODO .groups would be a g() object
# TODO : add encoding detection, fuzzy_decode() to make the best of shitty
# decoding, unidecode, slug, etc,
# f() for format, if no args are passed, it uses local. Also allow f >> ""
# t() or t >> for a jinja2 template (optional dependency ?)
# something for translation ?
# TODO: match.__repr__ should show match, groups, groupsdict in summary
import re
import inspect
from textwrap import dedent
import chardet
try:
from formatizer import LiteralFormatter
FORMATTER = LiteralFormatter()
except ImportError:
FORMATTER = str
from six import with_metaclass
from past.builtins import basestring
from .g import g
from .utils import ensure_tuple
# TODO: make sure we copy all methods from str but return s()
try:
unicode = unicode
except NameError:
unicode = str
REGEX_FLAGS = {
'm': re.MULTILINE,
'x': re.VERBOSE,
'v': re.VERBOSE,
's': re.DOTALL,
'.': re.DOTALL,
'd': re.DEBUG,
'i': re.IGNORECASE,
'u': re.UNICODE,
'l': re.LOCALE,
}
try:
# Python2 doesn't support re.ASCII flag
REGEX_FLAGS['a'] = re.ASCII
except AttributeError:
pass
FORMATTER = LiteralFormatter()
class MetaS(type):
""" Allow s >> 'text' as a shortcut to dedent strings """
def __rshift__(self, other):
return s(dedent(other))
class MetaF(type):
""" Allow f >> 'text' as a shortcut to dedent f-string """
def __rshift__(self, other):
caller_frame = inspect.currentframe().f_back
caller_globals = caller_frame.f_globals
caller_locals = caller_frame.f_locals
return s(dedent(
FORMATTER.format(other, caller_globals, caller_locals)
))
class StringWrapper(with_metaclass(MetaS, unicode)):
# TODO: check for bytes in __new__. Say we don't accept it and recommand
# to either use u'' in front of the string, from __future__ or
# s.from_bytes(bytes, encoding)
def _parse_flags(self, flags):
bflags = 0
if isinstance(flags, basestring):
for flag in flags:
bflags |= REGEX_FLAGS[flag]
return bflags
return flags
def split(self, *separators, **kwargs):
for sep in separators:
if not isinstance(sep, basestring):
msg = s >> """
Separators must be string, not "{sep}" ({sep_type}).
A common cause of this error is to call split([a, b, c])
instead of split(a, b, c).
""".format(sep=sep, sep_type=type(sep))
raise TypeError(msg)
return g(self._split(separators, kwargs.get('maxsplit', 0),
self._parse_flags(kwargs.get('flags', 0))))
def _split(self, separators, maxsplit=0, flags=0):
try:
sep = separators[0]
# TODO: find a better error message
for chunk in re.split(sep, self, maxsplit, flags):
for item in s(chunk)._split(separators[1:],
maxsplit=0, flags=0):
yield item
except IndexError:
yield self
def replace(self, patterns, substitutions, maxreplace=0, flags=0):
patterns = ensure_tuple(patterns)
substitutions = ensure_tuple(substitutions)
num_of_subs = len(substitutions)
num_of_patterns = len(patterns)
if num_of_subs == 1:
substitutions *= num_of_patterns
else:
if len(patterns) != num_of_subs:
raise ValueError("You must have exactly one substitution "
"for each pattern or only one substitution")
flags = self._parse_flags(flags)
res = self
for pattern, sub in zip(patterns, substitutions):
res = re.sub(pattern, sub, res, count=maxreplace, flags=flags)
return s(res)
def dedent(self):
return s(dedent(self))
def join(self, iterable, formatter=lambda s, t: t.format(s),
template="{}"):
return s(unicode.join(self,
(formatter(st, template) for st in iterable)))
@staticmethod
def from_bytes(byte_string, encoding=None, errors='strict'):
if encoding is None:
encoding = chardet.detect(byte_string)['encoding']
raise ValueError(f >> """
from_bytes() expects a second argument:
'encoding'. If you don't know which encoding,
try '{encoding}' or 'utf8'. If it fails and you
can't find out what has been used, you can get
a partial decoding with encoding="ascii" and
errors='replace' or 'ignore'.
""")
return s(byte_string.decode(encoding, errors=errors))
def format(self, *args, **kwargs):
if not args and not kwargs:
pframe = inspect.currentframe().f_back
return s(unicode.format(self, **pframe.f_locals))
return s(unicode.format(self, *args, **kwargs))
def to_bool(self, val, default=None):
try:
return {
'1': True,
'0': False,
'true': True,
'false': False,
'on': True,
'off': False,
'yes': True,
'no': False,
'': False
}[val.casefold()]
except KeyError:
if default is not None:
return default
raise ValueError(f >> """
'{vals!r}' cannot be converted to a boolean. Clean
your input or set the 'default' parameter to True
or False.
""")
if six.PY3: # we want unified representation between versions
def __repr__(self):
return 'u{}'.format(super(StringWrapper, self).__repr__())
# shortcut from StringWrapper
s = StringWrapper
# TODO: make sure each class call self._class instead of s(), g(), etc
class f(with_metaclass(MetaF)):
def __new__(cls, string):
caller_frame = inspect.currentframe().f_back
caller_globals = caller_frame.f_globals
caller_locals = caller_frame.f_locals
return s(FORMATTER.format(string, caller_globals, caller_locals))
|
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
'''
gpfdist -- file distribution web server
Usage: gpfdist [-?v] [-p port] [-d dir] [-q qchar] [-x xchar] [-h] [-l logfile]
-? : print this help screen
-v : verbose mode
-p port : which port to serve HTTP. Default to 8080
-d dir : serve files under the specified directory.
Default to '.'
-q qchar : set quote character. If not specified, the
program will not parse for CSV, and will
assume each record is separated by a newline
character that never occurs inside a record.
-x xchar : set escape character (default to qchar)
-h : first data row in data is a header row. skip
it (only allowed in CSV format).
-l : fully qualified path and name of log file
'''
import SocketServer, BaseHTTPServer, os, sys, getopt, threading, time, socket
MAX_CONCURRENT_SESSION = 64
opt = {}
opt['-p'] = 8080
opt['-v'] = False
opt['-V'] = False
opt['-h'] = False
opt['-d'] = '.'
qc = None
xc = None
def usage(exitarg):
print __doc__
sys.exit(exitarg)
def parseInt(val):
try: return int(val)
except ValueError: return 0
def parseCommandLine():
global opt, qc, xc
try:
(options, args) = getopt.getopt(sys.argv[1:], '?Vvhp:d:q:x:')
except Exception, e:
usage('Error: ' + str(e))
for (switch, val) in options:
if (switch == '-?'): usage(0)
elif (switch[1] in 'Vvh'): opt[switch] = True
elif (switch[1] in 'dqx'): opt[switch] = val
elif (switch[1] in 'p'): opt[switch] = parseInt(val)
elif (switch == '-q'): qc = val; opt['-f'] = False
elif (switch == '-x'): xc = val; opt['-f'] = False
if not opt['-p'] > 0:
usage('Error: please specify port number for -p switch')
if not os.path.isdir(opt['-d']):
usage('Error: please specify a directory for -d switch')
opt['-d'] = os.path.abspath(opt['-d'])
if (opt['-d'] == '/'):
usage('Security Error: cannot run under root (/) directory')
if '-q' in opt: qc = opt['-q']; opt['-f'] = False
if '-x' in opt: xc = opt['-x']; opt['-f'] = False
if qc and len(qc) != 1:
usage('Error: please specify a character for -q switch')
if xc and len(xc) != 1:
usage('Error: please specify a character for -x switch')
if not qc and xc:
usage('Error: you must specify -q qchar with -x xchar')
if not qc and opt['-h']:
usage('Error: header may only be used in CSV format. please specify -q switch')
if len(args) != 0:
usage(1)
if opt['-V']: opt['-v'] = true
# a File Session - shared among all GET request threads
class Session:
def __init__(self, fd, fname):
self.m_fname = fname
self.m_fd = fd
self.m_sem = threading.Semaphore(1)
self.m_residue = None
self.m_off = 0
self.m_max = 0
self.m_linecnt = 0
self.m_threadcnt = 0
def readLine(self):
line = ''
inQuote = False
lastWasEsc = False
self.m_sem.acquire()
start = self.m_off
try:
while True:
if not self.m_residue:
if not self.m_fd: break
self.m_residue = self.m_fd.read(1024*64)
if not self.m_residue:
self.m_fd.close()
self.m_fd = None
break
start = 0
self.m_off = 0
self.m_max = len(self.m_residue)
elif (self.m_off >= self.m_max):
line = line + self.m_residue[start:]
self.m_residue = None
continue
c = self.m_residue[self.m_off]
self.m_off = self.m_off + 1
if c == '\n' and not inQuote:
line = line + self.m_residue[start:self.m_off]
break
if inQuote and c == xc:
lastWasEsc = not lastWasEsc
if c == qc and not lastWasEsc:
inQuote = not inQuote
if c != xc:
lastWasEsc = False
finally:
self.m_sem.release()
self.m_linecnt = self.m_linecnt + 1
if self.m_linecnt % 10000 == 0:
print self.m_linecnt, 'lines'
return line
def readFile(self):
lines = []
self.m_sem.acquire()
try:
if self.m_fd:
if self.m_residue:
lines.append(self.m_residue);
self.m_residue = None
while not self.m_residue and self.m_fd:
chunk = self.m_fd.read(1024*64)
if not chunk:
self.m_fd.close()
self.m_fd = None
else:
c = chunk.split('\n', 1)
if (len(c) == 1):
lines.append(c[0])
else:
lines.append(c[0])
lines.append('\n')
self.m_residue = c[1]
finally:
self.m_sem.release()
return lines
#
# Session dictionary
# sess[TID][fname] is a Session object
#
sess = {}
sessSem = threading.Semaphore(1)
def findSession(TID, fname):
global sess, sessSem
key = (TID, fname)
sessSem.acquire()
try:
if key not in sess:
fd = open(fname, 'r', 1024*1024)
sess[key] = Session(fd, fname)
if opt['-v']: print '[INFO] initiated session', key
else:
if opt['-v']: print '[INFO] joined session', key
finally:
sessSem.release();
return sess[key]
#
# Class to handle individual request
#
class GPFDistRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
counter = 0
def log_request(self, code):
print "%s - %s" % (self.client_address[0], code)
def send400(self, msg):
print 'ERROR: %s' % msg
self.send_response(400)
return None
def send200empty(self):
if opt['-v']: print ' [ignore] thread %s' % self.client_address[0]
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.send_header("Content-length", "0")
self.end_headers()
return None
def do_GET(self):
try:
GPFDistRequestHandler.counter = GPFDistRequestHandler.counter + 1
TID = self.headers.getheader('X-GP-TID')
if not TID:
# start an non-transaction session
TID = 'auto-tid.' + str(GPFDistRequestHandler.counter)
fname = self.path
if fname.find('/') == 0: fname = fname[1:]
fname = os.path.join(opt['-d'], fname)
fname = os.path.normpath(fname)
if fname.find(opt['-d']) != 0:
msg = 'bad path specified (%s)' % (self.path)
return self.send400(msg)
try:
s = findSession(TID, fname)
if not s:
msg = 'unable to serve TID %s' % TID
return self.send400(msg)
if s.m_threadcnt >= MAX_CONCURRENT_SESSION:
return send200empty(self)
s.m_threadcnt = s.m_threadcnt + 1
except IOError, e:
msg = str(e)
return self.send400(msg)
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.end_headers()
# parse in CSV format if quote char was specified
if qc:
# skip the first line if 'header' option was specified
if opt['-h']:
line = s.readLine()
while 1:
line = s.readLine()
if not line: break
self.wfile.write(line)
# parse in text format otherwise
else:
while 1:
chunks = s.readFile()
if not chunks: break
for c in chunks:
self.wfile.write(c)
s.m_threadcnt = s.m_threadcnt - 1
if opt['-V']: print ' %s done' % fname
if TID[0] == 'a' and TID.find('auto-tid.') == 0:
del sess[(TID, fname)]
except socket.error, e:
print 'socket error: ', str(e), 'while serving', self.path
class GPFDistServer(SocketServer.ThreadingTCPServer):
allow_reuse_address = 1
request_queue_size = 256
def server_bind(self):
SocketServer.ThreadingTCPServer.server_bind(self)
host, port = self.socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_post = port
try:
parseCommandLine()
serverAddress = ('', opt['-p'])
GPFDistRequestHandler.protocol_version = "HTTP/1.0"
httpd = GPFDistServer(serverAddress, GPFDistRequestHandler)
sa = httpd.socket.getsockname()
print "Serving HTTP on %s:%d, directory %s ..." % (sa[0], sa[1], os.path.abspath(opt['-d']))
httpd.serve_forever()
except KeyboardInterrupt:
sys.exit('[Interrupted ...]')
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import unittest
from datetime import datetime
from unittest import mock
import numpy
from airflow.models import Connection
from airflow.providers.oracle.hooks.oracle import OracleHook
# pylint: disable=c-extension-no-member
try:
import cx_Oracle
except ImportError:
cx_Oracle = None
@unittest.skipIf(cx_Oracle is None, 'cx_Oracle package not present')
class TestOracleHookConn(unittest.TestCase):
def setUp(self):
super().setUp()
self.connection = Connection(login='login', password='password', host='host', port=1521)
self.db_hook = OracleHook()
self.db_hook.get_connection = mock.Mock()
self.db_hook.get_connection.return_value = self.connection
@mock.patch('airflow.providers.oracle.hooks.oracle.cx_Oracle.connect')
def test_get_conn_host(self, mock_connect):
self.db_hook.get_conn()
assert mock_connect.call_count == 1
args, kwargs = mock_connect.call_args
self.assertEqual(args, ())
self.assertEqual(kwargs['user'], 'login')
self.assertEqual(kwargs['password'], 'password')
self.assertEqual(kwargs['dsn'], 'host')
@mock.patch('airflow.providers.oracle.hooks.oracle.cx_Oracle.connect')
def test_get_conn_sid(self, mock_connect):
dsn_sid = {'dsn': 'dsn', 'sid': 'sid'}
self.connection.extra = json.dumps(dsn_sid)
self.db_hook.get_conn()
assert mock_connect.call_count == 1
args, kwargs = mock_connect.call_args
self.assertEqual(args, ())
self.assertEqual(
kwargs['dsn'], cx_Oracle.makedsn(dsn_sid['dsn'], self.connection.port, dsn_sid['sid'])
)
@mock.patch('airflow.providers.oracle.hooks.oracle.cx_Oracle.connect')
def test_get_conn_service_name(self, mock_connect):
dsn_service_name = {'dsn': 'dsn', 'service_name': 'service_name'}
self.connection.extra = json.dumps(dsn_service_name)
self.db_hook.get_conn()
assert mock_connect.call_count == 1
args, kwargs = mock_connect.call_args
self.assertEqual(args, ())
self.assertEqual(
kwargs['dsn'],
cx_Oracle.makedsn(
dsn_service_name['dsn'], self.connection.port, service_name=dsn_service_name['service_name']
),
)
@mock.patch('airflow.providers.oracle.hooks.oracle.cx_Oracle.connect')
def test_get_conn_encoding_without_nencoding(self, mock_connect):
self.connection.extra = json.dumps({'encoding': 'UTF-8'})
self.db_hook.get_conn()
assert mock_connect.call_count == 1
args, kwargs = mock_connect.call_args
self.assertEqual(args, ())
self.assertEqual(kwargs['encoding'], 'UTF-8')
self.assertEqual(kwargs['nencoding'], 'UTF-8')
@mock.patch('airflow.providers.oracle.hooks.oracle.cx_Oracle.connect')
def test_get_conn_encoding_with_nencoding(self, mock_connect):
self.connection.extra = json.dumps({'encoding': 'UTF-8', 'nencoding': 'gb2312'})
self.db_hook.get_conn()
assert mock_connect.call_count == 1
args, kwargs = mock_connect.call_args
self.assertEqual(args, ())
self.assertEqual(kwargs['encoding'], 'UTF-8')
self.assertEqual(kwargs['nencoding'], 'gb2312')
@mock.patch('airflow.providers.oracle.hooks.oracle.cx_Oracle.connect')
def test_get_conn_nencoding(self, mock_connect):
self.connection.extra = json.dumps({'nencoding': 'UTF-8'})
self.db_hook.get_conn()
assert mock_connect.call_count == 1
args, kwargs = mock_connect.call_args
self.assertEqual(args, ())
self.assertNotIn('encoding', kwargs)
self.assertEqual(kwargs['nencoding'], 'UTF-8')
@mock.patch('airflow.providers.oracle.hooks.oracle.cx_Oracle.connect')
def test_get_conn_mode(self, mock_connect):
mode = {
'sysdba': cx_Oracle.SYSDBA,
'sysasm': cx_Oracle.SYSASM,
'sysoper': cx_Oracle.SYSOPER,
'sysbkp': cx_Oracle.SYSBKP,
'sysdgd': cx_Oracle.SYSDGD,
'syskmt': cx_Oracle.SYSKMT,
}
first = True
for mod in mode:
self.connection.extra = json.dumps({'mode': mod})
self.db_hook.get_conn()
if first:
assert mock_connect.call_count == 1
first = False
args, kwargs = mock_connect.call_args
self.assertEqual(args, ())
self.assertEqual(kwargs['mode'], mode.get(mod))
@mock.patch('airflow.providers.oracle.hooks.oracle.cx_Oracle.connect')
def test_get_conn_threaded(self, mock_connect):
self.connection.extra = json.dumps({'threaded': True})
self.db_hook.get_conn()
assert mock_connect.call_count == 1
args, kwargs = mock_connect.call_args
self.assertEqual(args, ())
self.assertEqual(kwargs['threaded'], True)
@mock.patch('airflow.providers.oracle.hooks.oracle.cx_Oracle.connect')
def test_get_conn_events(self, mock_connect):
self.connection.extra = json.dumps({'events': True})
self.db_hook.get_conn()
assert mock_connect.call_count == 1
args, kwargs = mock_connect.call_args
self.assertEqual(args, ())
self.assertEqual(kwargs['events'], True)
@mock.patch('airflow.providers.oracle.hooks.oracle.cx_Oracle.connect')
def test_get_conn_purity(self, mock_connect):
purity = {
'new': cx_Oracle.ATTR_PURITY_NEW,
'self': cx_Oracle.ATTR_PURITY_SELF,
'default': cx_Oracle.ATTR_PURITY_DEFAULT,
}
first = True
for pur in purity:
self.connection.extra = json.dumps({'purity': pur})
self.db_hook.get_conn()
if first:
assert mock_connect.call_count == 1
first = False
args, kwargs = mock_connect.call_args
self.assertEqual(args, ())
self.assertEqual(kwargs['purity'], purity.get(pur))
@unittest.skipIf(cx_Oracle is None, 'cx_Oracle package not present')
class TestOracleHook(unittest.TestCase):
def setUp(self):
super().setUp()
self.cur = mock.MagicMock()
self.conn = mock.MagicMock()
self.conn.cursor.return_value = self.cur
conn = self.conn
class UnitTestOracleHook(OracleHook):
conn_name_attr = 'test_conn_id'
def get_conn(self):
return conn
self.db_hook = UnitTestOracleHook()
def test_run_without_parameters(self):
sql = 'SQL'
self.db_hook.run(sql)
self.cur.execute.assert_called_once_with(sql)
assert self.conn.commit.called
def test_run_with_parameters(self):
sql = 'SQL'
param = ('p1', 'p2')
self.db_hook.run(sql, parameters=param)
self.cur.execute.assert_called_once_with(sql, param)
assert self.conn.commit.called
def test_insert_rows_with_fields(self):
rows = [
(
"'basestr_with_quote",
None,
numpy.NAN,
numpy.datetime64('2019-01-24T01:02:03'),
datetime(2019, 1, 24),
1,
10.24,
'str',
)
]
target_fields = [
'basestring',
'none',
'numpy_nan',
'numpy_datetime64',
'datetime',
'int',
'float',
'str',
]
self.db_hook.insert_rows('table', rows, target_fields)
self.cur.execute.assert_called_once_with(
"INSERT /*+ APPEND */ INTO table "
"(basestring, none, numpy_nan, numpy_datetime64, datetime, int, float, str) "
"VALUES ('''basestr_with_quote',NULL,NULL,'2019-01-24T01:02:03',"
"to_date('2019-01-24 00:00:00','YYYY-MM-DD HH24:MI:SS'),1,10.24,'str')"
)
def test_insert_rows_without_fields(self):
rows = [
(
"'basestr_with_quote",
None,
numpy.NAN,
numpy.datetime64('2019-01-24T01:02:03'),
datetime(2019, 1, 24),
1,
10.24,
'str',
)
]
self.db_hook.insert_rows('table', rows)
self.cur.execute.assert_called_once_with(
"INSERT /*+ APPEND */ INTO table "
" VALUES ('''basestr_with_quote',NULL,NULL,'2019-01-24T01:02:03',"
"to_date('2019-01-24 00:00:00','YYYY-MM-DD HH24:MI:SS'),1,10.24,'str')"
)
def test_bulk_insert_rows_with_fields(self):
rows = [(1, 2, 3), (4, 5, 6), (7, 8, 9)]
target_fields = ['col1', 'col2', 'col3']
self.db_hook.bulk_insert_rows('table', rows, target_fields)
self.cur.prepare.assert_called_once_with("insert into table (col1, col2, col3) values (:1, :2, :3)")
self.cur.executemany.assert_called_once_with(None, rows)
def test_bulk_insert_rows_with_commit_every(self):
rows = [(1, 2, 3), (4, 5, 6), (7, 8, 9)]
target_fields = ['col1', 'col2', 'col3']
self.db_hook.bulk_insert_rows('table', rows, target_fields, commit_every=2)
calls = [
mock.call("insert into table (col1, col2, col3) values (:1, :2, :3)"),
mock.call("insert into table (col1, col2, col3) values (:1, :2, :3)"),
]
self.cur.prepare.assert_has_calls(calls)
calls = [
mock.call(None, rows[:2]),
mock.call(None, rows[2:]),
]
self.cur.executemany.assert_has_calls(calls, any_order=True)
def test_bulk_insert_rows_without_fields(self):
rows = [(1, 2, 3), (4, 5, 6), (7, 8, 9)]
self.db_hook.bulk_insert_rows('table', rows)
self.cur.prepare.assert_called_once_with("insert into table values (:1, :2, :3)")
self.cur.executemany.assert_called_once_with(None, rows)
def test_bulk_insert_rows_no_rows(self):
rows = []
self.assertRaises(ValueError, self.db_hook.bulk_insert_rows, 'table', rows)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.exceptions import DeserializationError
from msrestazure.azure_operation import AzureOperationPoller
from .. import models
class PublicIPAddressesOperations(object):
"""PublicIPAddressesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client API version. Constant value: "2016-12-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-12-01"
self.config = config
def _delete_initial(
self, resource_group_name, public_ip_address_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, public_ip_address_name, custom_headers=None, raw=False, **operation_config):
"""Deletes the specified public IP address.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_address_name: The name of the subnet.
:type public_ip_address_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns None or
ClientRawResponse if raw=true
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
public_ip_address_name=public_ip_address_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get(
self, resource_group_name, public_ip_address_name, expand=None, custom_headers=None, raw=False, **operation_config):
"""Gets the specified public IP address in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_address_name: The name of the subnet.
:type public_ip_address_name: str
:param expand: Expands referenced resources.
:type expand: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PublicIPAddress or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2016_12_01.models.PublicIPAddress or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PublicIPAddress', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def _create_or_update_initial(
self, resource_group_name, public_ip_address_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'PublicIPAddress')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PublicIPAddress', response)
if response.status_code == 201:
deserialized = self._deserialize('PublicIPAddress', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, public_ip_address_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a static or dynamic public IP address.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_address_name: The name of the public IP address.
:type public_ip_address_name: str
:param parameters: Parameters supplied to the create or update public
IP address operation.
:type parameters:
~azure.mgmt.network.v2016_12_01.models.PublicIPAddress
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns
PublicIPAddress or ClientRawResponse if raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2016_12_01.models.PublicIPAddress]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
public_ip_address_name=public_ip_address_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = self._deserialize('PublicIPAddress', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def list_all(
self, custom_headers=None, raw=False, **operation_config):
"""Gets all the public IP addresses in a subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of PublicIPAddress
:rtype:
~azure.mgmt.network.v2016_12_01.models.PublicIPAddressPaged[~azure.mgmt.network.v2016_12_01.models.PublicIPAddress]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Network/publicIPAddresses'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.PublicIPAddressPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.PublicIPAddressPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets all public IP addresses in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of PublicIPAddress
:rtype:
~azure.mgmt.network.v2016_12_01.models.PublicIPAddressPaged[~azure.mgmt.network.v2016_12_01.models.PublicIPAddress]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.PublicIPAddressPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.PublicIPAddressPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
|
|
"""Support for Ambient Weather Station binary sensors."""
from __future__ import annotations
from dataclasses import dataclass
from typing import Literal
from homeassistant.components.binary_sensor import (
BinarySensorDeviceClass,
BinarySensorEntity,
BinarySensorEntityDescription,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_NAME
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity import EntityCategory
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from . import AmbientWeatherEntity
from .const import ATTR_LAST_DATA, DOMAIN
TYPE_BATT1 = "batt1"
TYPE_BATT10 = "batt10"
TYPE_BATT2 = "batt2"
TYPE_BATT3 = "batt3"
TYPE_BATT4 = "batt4"
TYPE_BATT5 = "batt5"
TYPE_BATT6 = "batt6"
TYPE_BATT7 = "batt7"
TYPE_BATT8 = "batt8"
TYPE_BATT9 = "batt9"
TYPE_BATT_CO2 = "batt_co2"
TYPE_BATTOUT = "battout"
TYPE_PM25_BATT = "batt_25"
TYPE_PM25IN_BATT = "batt_25in"
TYPE_RELAY1 = "relay1"
TYPE_RELAY10 = "relay10"
TYPE_RELAY2 = "relay2"
TYPE_RELAY3 = "relay3"
TYPE_RELAY4 = "relay4"
TYPE_RELAY5 = "relay5"
TYPE_RELAY6 = "relay6"
TYPE_RELAY7 = "relay7"
TYPE_RELAY8 = "relay8"
TYPE_RELAY9 = "relay9"
@dataclass
class AmbientBinarySensorDescriptionMixin:
"""Define an entity description mixin for binary sensors."""
on_state: Literal[0, 1]
@dataclass
class AmbientBinarySensorDescription(
BinarySensorEntityDescription, AmbientBinarySensorDescriptionMixin
):
"""Describe an Ambient PWS binary sensor."""
BINARY_SENSOR_DESCRIPTIONS = (
AmbientBinarySensorDescription(
key=TYPE_BATTOUT,
name="Battery",
device_class=BinarySensorDeviceClass.BATTERY,
entity_category=EntityCategory.DIAGNOSTIC,
on_state=0,
),
AmbientBinarySensorDescription(
key=TYPE_BATT1,
name="Battery 1",
device_class=BinarySensorDeviceClass.BATTERY,
entity_category=EntityCategory.DIAGNOSTIC,
on_state=0,
),
AmbientBinarySensorDescription(
key=TYPE_BATT2,
name="Battery 2",
device_class=BinarySensorDeviceClass.BATTERY,
entity_category=EntityCategory.DIAGNOSTIC,
on_state=0,
),
AmbientBinarySensorDescription(
key=TYPE_BATT3,
name="Battery 3",
device_class=BinarySensorDeviceClass.BATTERY,
entity_category=EntityCategory.DIAGNOSTIC,
on_state=0,
),
AmbientBinarySensorDescription(
key=TYPE_BATT4,
name="Battery 4",
device_class=BinarySensorDeviceClass.BATTERY,
entity_category=EntityCategory.DIAGNOSTIC,
on_state=0,
),
AmbientBinarySensorDescription(
key=TYPE_BATT5,
name="Battery 5",
device_class=BinarySensorDeviceClass.BATTERY,
entity_category=EntityCategory.DIAGNOSTIC,
on_state=0,
),
AmbientBinarySensorDescription(
key=TYPE_BATT6,
name="Battery 6",
device_class=BinarySensorDeviceClass.BATTERY,
entity_category=EntityCategory.DIAGNOSTIC,
on_state=0,
),
AmbientBinarySensorDescription(
key=TYPE_BATT7,
name="Battery 7",
device_class=BinarySensorDeviceClass.BATTERY,
entity_category=EntityCategory.DIAGNOSTIC,
on_state=0,
),
AmbientBinarySensorDescription(
key=TYPE_BATT8,
name="Battery 8",
device_class=BinarySensorDeviceClass.BATTERY,
entity_category=EntityCategory.DIAGNOSTIC,
on_state=0,
),
AmbientBinarySensorDescription(
key=TYPE_BATT9,
name="Battery 9",
device_class=BinarySensorDeviceClass.BATTERY,
entity_category=EntityCategory.DIAGNOSTIC,
on_state=0,
),
AmbientBinarySensorDescription(
key=TYPE_BATT10,
name="Battery 10",
device_class=BinarySensorDeviceClass.BATTERY,
entity_category=EntityCategory.DIAGNOSTIC,
on_state=0,
),
AmbientBinarySensorDescription(
key=TYPE_BATT_CO2,
name="CO2 Battery",
device_class=BinarySensorDeviceClass.BATTERY,
entity_category=EntityCategory.DIAGNOSTIC,
on_state=0,
),
AmbientBinarySensorDescription(
key=TYPE_PM25IN_BATT,
name="PM25 Indoor Battery",
device_class=BinarySensorDeviceClass.BATTERY,
entity_category=EntityCategory.DIAGNOSTIC,
on_state=0,
),
AmbientBinarySensorDescription(
key=TYPE_PM25_BATT,
name="PM25 Battery",
device_class=BinarySensorDeviceClass.BATTERY,
entity_category=EntityCategory.DIAGNOSTIC,
on_state=0,
),
AmbientBinarySensorDescription(
key=TYPE_RELAY1,
name="Relay 1",
device_class=BinarySensorDeviceClass.CONNECTIVITY,
entity_category=EntityCategory.DIAGNOSTIC,
on_state=1,
),
AmbientBinarySensorDescription(
key=TYPE_RELAY2,
name="Relay 2",
device_class=BinarySensorDeviceClass.CONNECTIVITY,
entity_category=EntityCategory.DIAGNOSTIC,
on_state=1,
),
AmbientBinarySensorDescription(
key=TYPE_RELAY3,
name="Relay 3",
device_class=BinarySensorDeviceClass.CONNECTIVITY,
entity_category=EntityCategory.DIAGNOSTIC,
on_state=1,
),
AmbientBinarySensorDescription(
key=TYPE_RELAY4,
name="Relay 4",
device_class=BinarySensorDeviceClass.CONNECTIVITY,
entity_category=EntityCategory.DIAGNOSTIC,
on_state=1,
),
AmbientBinarySensorDescription(
key=TYPE_RELAY5,
name="Relay 5",
device_class=BinarySensorDeviceClass.CONNECTIVITY,
entity_category=EntityCategory.DIAGNOSTIC,
on_state=1,
),
AmbientBinarySensorDescription(
key=TYPE_RELAY6,
name="Relay 6",
device_class=BinarySensorDeviceClass.CONNECTIVITY,
entity_category=EntityCategory.DIAGNOSTIC,
on_state=1,
),
AmbientBinarySensorDescription(
key=TYPE_RELAY7,
name="Relay 7",
device_class=BinarySensorDeviceClass.CONNECTIVITY,
entity_category=EntityCategory.DIAGNOSTIC,
on_state=1,
),
AmbientBinarySensorDescription(
key=TYPE_RELAY8,
name="Relay 8",
device_class=BinarySensorDeviceClass.CONNECTIVITY,
entity_category=EntityCategory.DIAGNOSTIC,
on_state=1,
),
AmbientBinarySensorDescription(
key=TYPE_RELAY9,
name="Relay 9",
device_class=BinarySensorDeviceClass.CONNECTIVITY,
entity_category=EntityCategory.DIAGNOSTIC,
on_state=1,
),
AmbientBinarySensorDescription(
key=TYPE_RELAY10,
name="Relay 10",
device_class=BinarySensorDeviceClass.CONNECTIVITY,
entity_category=EntityCategory.DIAGNOSTIC,
on_state=1,
),
)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up Ambient PWS binary sensors based on a config entry."""
ambient = hass.data[DOMAIN][entry.entry_id]
async_add_entities(
[
AmbientWeatherBinarySensor(
ambient, mac_address, station[ATTR_NAME], description
)
for mac_address, station in ambient.stations.items()
for description in BINARY_SENSOR_DESCRIPTIONS
if description.key in station[ATTR_LAST_DATA]
]
)
class AmbientWeatherBinarySensor(AmbientWeatherEntity, BinarySensorEntity):
"""Define an Ambient binary sensor."""
entity_description: AmbientBinarySensorDescription
@callback
def update_from_latest_data(self) -> None:
"""Fetch new state data for the entity."""
self._attr_is_on = (
self._ambient.stations[self._mac_address][ATTR_LAST_DATA][
self.entity_description.key
]
== self.entity_description.on_state
)
|
|
"""`BaseModel`, `Model`, `NotBuiltError`, `Percept`, `SpatialModel`,
`TemporalModel`"""
import sys
from abc import ABCMeta, abstractmethod
from copy import deepcopy
import numpy as np
from ..implants import ProsthesisSystem
from ..stimuli import Stimulus
from ..percepts import Percept
from ..utils import (Curcio1990Map, PrettyPrint, Frozen, FreezeError,
Grid2D, bisect)
from ..utils.constants import ZORDER
class NotBuiltError(ValueError, AttributeError):
"""Exception class used to raise if model is used before building
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
"""
class BaseModel(Frozen, PrettyPrint, metaclass=ABCMeta):
"""Abstract base class for all models
Provides the following functionality:
* Pretty-print class attributes (via ``_pprint_params`` and
``PrettyPrint``)
* Build a model (via ``build``) and flip the ``is_built`` switch
* User-settable parameters must be listed in ``get_default_params``
* New class attributes can only be added in the constructor
(enforced via ``Frozen`` and ``FreezeError``).
"""
def __init__(self, **params):
"""BaseModel constructor
Parameters
----------
**params : optional keyword arguments
All keyword arguments must be listed in ``get_default_params``
"""
# Set all default arguments:
defaults = self.get_default_params()
for key, val in defaults.items():
setattr(self, key, val)
# Then overwrite any arguments also given in `params`:
for key, val in params.items():
if key in defaults:
setattr(self, key, val)
else:
err_str = ("'%s' is not a valid model parameter. Choose "
"from: %s." % (key, ', '.join(defaults.keys())))
raise AttributeError(err_str)
# This flag will be flipped once the ``build`` method was called
self._is_built = False
@abstractmethod
def get_default_params(self):
"""Return a dict of user-settable model parameters"""
raise NotImplementedError
def set_params(self, **params):
"""Set the parameters of this model"""
for key, value in params.items():
setattr(self, key, value)
def _pprint_params(self):
"""Return a dict of class attributes to display when pretty-printing"""
return {key: getattr(self, key)
for key, _ in self.get_default_params().items()}
def _build(self):
"""Customize the building process by implementing this method"""
pass
def build(self, **build_params):
"""Build the model
Every model must have a ```build`` method, which is meant to perform
all expensive one-time calculations. You must call ``build`` before
calling ``predict_percept``.
.. important::
Don't override this method if you are building your own model.
Customize ``_build`` instead.
Parameters
----------
build_params : additional parameters to set
You can overwrite parameters that are listed in
``get_default_params``. Trying to add new class attributes outside
of that will cause a ``FreezeError``.
Example: ``model.build(param1=val)``
"""
for key, val in build_params.items():
setattr(self, key, val)
self._build()
self.is_built = True
return self
@property
def is_built(self):
"""A flag indicating whether the model has been built"""
return self._is_built
@is_built.setter
def is_built(self, val):
"""This flag can only be set in the constructor or ``build``"""
# getframe(0) is '_is_built', getframe(1) is 'set_attr'.
# getframe(2) is the one we are looking for, and has to be either the
# construct or ``build``:
f_caller_2 = sys._getframe(2).f_code.co_name
f_caller_3 = sys._getframe(3).f_code.co_name
if f_caller_2 in ["__init__", "build"] or \
f_caller_3 in ["__init__", "build"]:
self._is_built = val
else:
err_s = ("The attribute `is_built` can only be set in the "
"constructor or in ``build``, not in ``%s``." % f_caller_2)
raise AttributeError(err_s)
class SpatialModel(BaseModel, metaclass=ABCMeta):
"""Abstract base class for all spatial models
Provides basic functionality for all spatial models:
* ``build``: builds the spatial grid used to calculate the percept.
You can add your own ``_build`` method (note the underscore) that
performs additional expensive one-time calculations.
* ``predict_percept``: predicts the percepts based on an implant/stimulus.
Don't customize this method - implement your own ``_predict_spatial``
instead (see below).
A user must call ``build`` before calling ``predict_percept``.
To create your own spatial model, you must subclass ``SpatialModel`` and
provide an implementation for:
* ``_predict_spatial``: This method should accept an ElectrodeArray as well
as a Stimulus, and compute the brightness at all spatial coordinates of
``self.grid``, returned as a 2D NumPy array (space x time).
.. note ::
The ``_`` in the method name indicates that this is a private method,
meaning that it should not be called by the user. Instead, the user
should call ``predict_percept``, which in turn will call
``_predict_spatial``.
The same logic applies to ``build`` (called by the user; don't touch)
and ``_build`` (called by ``build``; customize this instead).
In addition, you can customize the following:
* ``__init__``: the constructor can be used to define additional
parameters (note that you cannot add parameters on-the-fly)
* ``get_default_params``: all settable model parameters must be listed by
this method
* ``_build`` (optional): a way to add one-time computations to the build
process
.. versionadded:: 0.6
.. note ::
You will not be able to add more parameters outside the constructor;
e.g., ``model.newparam = 1`` will lead to a ``FreezeError``.
.. seealso ::
* `Basic Concepts > Computational Models > Building your own model
<topics-models-building-your-own>`
"""
def __init__(self, **params):
super().__init__(**params)
self.grid = None
def get_default_params(self):
"""Return a dictionary of default values for all model parameters"""
params = {
# We will be simulating a patch of the visual field (xrange/yrange
# in degrees of visual angle), at a given spatial resolution (step
# size):
'xrange': (-15, 15), # dva
'yrange': (-15, 15), # dva
'xystep': 0.25, # dva
'grid_type': 'rectangular',
# Below threshold, percept has brightness zero:
'thresh_percept': 0,
# Retinotopic map to be used:
'retinotopy': Curcio1990Map(),
# JobLib or Dask can be used to parallelize computations:
'engine': 'serial',
'scheduler': 'threading',
'n_jobs': 1,
# True: print status messages, 0: silent
'verbose': True
}
return params
def build(self, **build_params):
"""Build the model
Performs expensive one-time calculations, such as building the spatial
grid used to predict a percept. You must call ``build`` before
calling ``predict_percept``.
.. important::
Don't override this method if you are building your own model.
Customize ``_build`` instead.
Parameters
----------
build_params: additional parameters to set
You can overwrite parameters that are listed in
``get_default_params``. Trying to add new class attributes outside
of that will cause a ``FreezeError``.
Example: ``model.build(param1=val)``
"""
for key, val in build_params.items():
setattr(self, key, val)
# Build the spatial grid:
self.grid = Grid2D(self.xrange, self.yrange, step=self.xystep,
grid_type=self.grid_type)
self.grid.xret, self.grid.yret = self.retinotopy.dva2ret(self.grid.x,
self.grid.y)
self._build()
self.is_built = True
return self
@abstractmethod
def _predict_spatial(self, earray, stim):
"""Customized spatial response
Called by the user from ``predict_percept`` after error checking.
Parameters
----------
earray: :py:class:`~pulse2percept.implants.ElectrodeArray`
A valid electrode array.
stim : :py:meth:`~pulse2percept.stimuli.Stimulus`
A valid stimulus with a 2D data container (n_electrodes, n_time).
Returns
-------
percept: np.ndarray
A 2D NumPy array that has the same dimensions as the input stimulus
(n_electrodes, n_time).
"""
raise NotImplementedError
def predict_percept(self, implant, t_percept=None):
"""Predict the spatial response
.. important::
Don't override this method if you are creating your own model.
Customize ``_predict_spatial`` instead.
Parameters
----------
implant: :py:class:`~pulse2percept.implants.ProsthesisSystem`
A valid prosthesis system. A stimulus can be passed via
:py:meth:`~pulse2percept.implants.ProsthesisSystem.stim`.
t_percept: float or list of floats, optional
The time points at which to output a percept (ms).
If None, ``implant.stim.time`` is used.
Returns
-------
percept: :py:class:`~pulse2percept.models.Percept`
A Percept object whose ``data`` container has dimensions Y x X x T.
Will return None if ``implant.stim`` is None.
"""
if not self.is_built:
raise NotBuiltError("Yout must call ``build`` first.")
if not isinstance(implant, ProsthesisSystem):
raise TypeError(("'implant' must be a ProsthesisSystem object, "
"not %s.") % type(implant))
if implant.stim is None:
# Nothing to see here:
return None
if implant.stim.time is None and t_percept is not None:
raise ValueError("Cannot calculate spatial response at times "
"t_percept=%s, because stimulus does not "
"have a time component." % t_percept)
# Make sure we don't change the user's Stimulus object:
stim = deepcopy(implant.stim)
# Make sure to operate on the compressed stim:
if not stim.is_compressed:
stim.compress()
if t_percept is None:
t_percept = stim.time
n_time = 1 if t_percept is None else np.array([t_percept]).size
if stim.data.size == 0:
# Stimulus was compressed to zero:
resp = np.zeros((self.grid.x.size, n_time), dtype=np.float32)
else:
# Calculate the Stimulus at requested time points:
if t_percept is not None:
# Save electrode parameters
stim = Stimulus(stim) # make sure stimulus is in proper format
stim = Stimulus(stim[:, t_percept].reshape((-1, n_time)),
electrodes=stim.electrodes, time=t_percept,
metadata=stim.metadata)
resp = self._predict_spatial(implant.earray, stim)
return Percept(resp.reshape(list(self.grid.x.shape) + [-1]),
space=self.grid, time=t_percept,
metadata={'stim': stim})
def find_threshold(self, implant, bright_th, amp_range=(0, 999), amp_tol=1,
bright_tol=0.1, max_iter=100):
"""Find the threshold current for a certain stimulus
Estimates ``amp_th`` such that the output of
``model.predict_percept(stim(amp_th))`` is approximately ``bright_th``.
Parameters
----------
implant : :py:class:`~pulse2percept.implants.ProsthesisSystem`
The implant and its stimulus to use. Stimulus amplitude will be
up and down regulated until ``amp_th`` is found.
bright_th : float
Model output (brightness) that's considered "at threshold".
amp_range : (amp_lo, amp_hi), optional
Range of amplitudes to search (uA).
amp_tol : float, optional
Search will stop if candidate range of amplitudes is within
``amp_tol``
bright_tol : float, optional
Search will stop if model brightness is within ``bright_tol`` of
``bright_th``
max_iter : int, optional
Search will stop after ``max_iter`` iterations
Returns
-------
amp_th : float
Threshold current (uA), estimated so that the output of
``model.predict_percept(stim(amp_th))`` is within ``bright_tol`` of
``bright_th``.
"""
if not isinstance(implant, ProsthesisSystem):
raise TypeError("'implant' must be a ProsthesisSystem, not "
"%s." % type(implant))
def inner_predict(amp, fnc_predict, implant):
_implant = deepcopy(implant)
scale = amp / implant.stim.data.max()
_implant.stim = Stimulus(scale * implant.stim.data,
electrodes=implant.stim.electrodes,
time=implant.stim.time)
return fnc_predict(_implant).data.max()
return bisect(bright_th, inner_predict,
args=[self.predict_percept, implant],
x_lo=amp_range[0], x_hi=amp_range[1], x_tol=amp_tol,
y_tol=bright_tol, max_iter=max_iter)
def plot(self, use_dva=False, style='hull', autoscale=True, ax=None,
figsize=None):
"""Plot the model
Parameters
----------
use_dva : bool, optional
Uses degrees of visual angle (dva) if True, else retinal
coordinates (microns)
style : {'hull', 'scatter', 'cell'}, optional
Grid plotting style:
* 'hull': Show the convex hull of the grid (that is, the outline of
the smallest convex set that contains all grid points).
* 'scatter': Scatter plot all grid points
* 'cell': Show the outline of each grid cell as a polygon. Note that
this can be costly for a high-resolution grid.
autoscale : bool, optional
Whether to adjust the x,y limits of the plot to fit the implant
ax : matplotlib.axes._subplots.AxesSubplot, optional
A Matplotlib axes object. If None, will either use the current axes
(if exists) or create a new Axes object.
figsize : (float, float), optional
Desired (width, height) of the figure in inches
Returns
-------
ax : ``matplotlib.axes.Axes``
Returns the axis object of the plot
"""
if not self.is_built:
self.build()
if use_dva:
ax = self.grid.plot(autoscale=autoscale, ax=ax, style=style,
zorder=ZORDER['background'], figsize=figsize)
ax.set_xlabel('x (dva)')
ax.set_ylabel('y (dva)')
else:
ax = self.grid.plot(transform=self.retinotopy.dva2ret, ax=ax,
zorder=ZORDER['background'] + 1, style=style,
figsize=figsize, autoscale=autoscale)
ax.set_xlabel('x (microns)')
ax.set_ylabel('y (microns)')
return ax
class TemporalModel(BaseModel, metaclass=ABCMeta):
"""Abstract base class for all temporal models
Provides basic functionality for all temporal models:
* ``build``: builds the model in order to calculate the percept.
You can add your own ``_build`` method (note the underscore) that
performs additional expensive one-time calculations.
* ``predict_percept``: predicts the percepts based on an implant/stimulus.
You can add your own ``_predict_temporal`` method to customize this
step. A user must call ``build`` before calling ``predict_percept``.
To create your own temporal model, you must subclass ``SpatialModel`` and
provide an implementation for:
* ``_predict_temporal``: a method that accepts either a
:py:class:`~pulse2percept.stimuli.Stimulus` or a
:py:class:`~pulse2percept.percepts.Percept` object and a list of time
points at which to calculate the resulting percept, returned as a 2D
NumPy array (space x time).
In addition, you can customize the following:
* ``__init__``: the constructor can be used to define additional
parameters (note that you cannot add parameters on-the-fly)
* ``get_default_params``: all settable model parameters must be listed by
this method
* ``_build`` (optional): a way to add one-time computations to the build
process
.. versionadded:: 0.6
.. note ::
You will not be able to add more parameters outside the constructor;
e.g., ``model.newparam = 1`` will lead to a ``FreezeError``.
.. seealso ::
* `Basic Concepts > Computational Models > Building your own model
<topics-models-building-your-own>`
"""
def get_default_params(self):
"""Return a dictionary of default values for all model parameters"""
params = {
# Simulation time step (ms):
'dt': 0.005,
# Below threshold, percept has brightness zero:
'thresh_percept': 0,
# True: print status messages, False: silent
'verbose': True
}
return params
@abstractmethod
def _predict_temporal(self, stim, t_percept):
"""Customized temporal response
Called by the user from ``predict_percept`` after error checking.
Parameters
----------
stim : :py:meth:`~pulse2percept.stimuli.Stimulus`
A valid stimulus with a 2D data container (n_electrodes, n_time).
t_percept : list of floats
The time points at which to output a percept (ms).
Returns
-------
percept: np.ndarray
A 2D NumPy array (space x time) that specifies the percept at each
spatial location and time step.
"""
raise NotImplementedError
def predict_percept(self, stim, t_percept=None):
"""Predict the temporal response
.. important ::
Don't override this method if you are creating your own model.
Customize ``_predict_temporal`` instead.
Parameters
----------
stim: : py: class: `~pulse2percept.stimuli.Stimulus` or
: py: class: `~pulse2percept.models.Percept`
Either a Stimulus or a Percept object. The temporal model will be
applied to each spatial location in the stimulus/percept.
t_percept : float or list of floats, optional
The time points at which to output a percept (ms).
If None, the percept will be output once very 20 ms (50 Hz frame
rate).
.. note ::
If your stimulus is shorter than 20 ms, you should specify
the desired time points manually.
Returns
-------
percept : :py:class:`~pulse2percept.models.Percept`
A Percept object whose ``data`` container has dimensions Y x X x T.
Will return None if ``stim`` is None.
Notes
-----
* If a list of time points is provided for ``t_percept``, the values
will automatically be sorted.
"""
if not self.is_built:
raise NotBuiltError("Yout must call ``build`` first.")
if stim is None:
# Nothing to see here:
return None
if not isinstance(stim, (Stimulus, Percept)):
raise TypeError(("'stim' must be a Stimulus or Percept object, "
"not %s.") % type(stim))
if stim.time is None:
raise ValueError("Cannot calculate temporal response, because "
"stimulus/percept does not have a time "
"component.")
# Make sure we don't change the user's Stimulus/Percept object:
_stim = deepcopy(stim)
if isinstance(stim, Stimulus):
# Make sure to operate on the compressed stim:
if not _stim.is_compressed:
_stim.compress()
_space = [len(stim.electrodes), 1]
elif isinstance(stim, Percept):
_space = [len(stim.ydva), len(stim.xdva)]
_time = stim.time
if t_percept is None:
# If no time vector is given, output at 50 Hz frame rate. We always
# start at zero and include the last time point:
t_percept = np.arange(0, np.maximum(20, _time[-1]) + 1, 20)
# We need to make sure the requested `t_percept` are sorted and
# multiples of `dt`:
t_percept = np.sort([t_percept]).flatten()
remainder = np.mod(t_percept, self.dt) / self.dt
atol = 1e-3
within_atol = (remainder < atol) | (np.abs(1 - remainder) < atol)
if not np.all(within_atol):
raise ValueError("t=%s are not multiples of dt=%.2e." %
(t_percept[np.logical_not(within_atol)],
self.dt))
if _stim.data.size == 0:
# Stimulus was compressed to zero:
resp = np.zeros(_space + [t_percept.size], dtype=np.float32)
else:
# Calculate the Stimulus at requested time points:
resp = self._predict_temporal(_stim, t_percept)
return Percept(resp.reshape(_space + [t_percept.size]),
space=None, time=t_percept,
metadata={'stim': stim})
def find_threshold(self, stim, bright_th, amp_range=(0, 999), amp_tol=1,
bright_tol=0.1, max_iter=100, t_percept=None):
"""Find the threshold current for a certain stimulus
Estimates ``amp_th`` such that the output of
``model.predict_percept(stim(amp_th))`` is approximately ``bright_th``.
Parameters
----------
stim : :py:class:`~pulse2percept.stimuli.Stimulus`
The stimulus to use. Stimulus amplitude will be up and down
regulated until ``amp_th`` is found.
bright_th : float
Model output (brightness) that's considered "at threshold".
amp_range : (amp_lo, amp_hi), optional
Range of amplitudes to search (uA).
amp_tol : float, optional
Search will stop if candidate range of amplitudes is within
``amp_tol``
bright_tol : float, optional
Search will stop if model brightness is within ``bright_tol`` of
``bright_th``
max_iter : int, optional
Search will stop after ``max_iter`` iterations
t_percept: float or list of floats, optional
The time points at which to output a percept (ms).
If None, ``implant.stim.time`` is used.
Returns
-------
amp_th : float
Threshold current (uA), estimated so that the output of
``model.predict_percept(stim(amp_th))`` is within ``bright_tol`` of
``bright_th``.
"""
if not isinstance(stim, Stimulus):
raise TypeError("'stim' must be a Stimulus, not %s." % type(stim))
def inner_predict(amp, fnc_predict, stim, **kwargs):
_stim = Stimulus(amp * stim.data / stim.data.max(),
electrodes=stim.electrodes, time=stim.time)
return fnc_predict(_stim, **kwargs).data.max()
return bisect(bright_th, inner_predict,
args=[self.predict_percept, stim],
kwargs={'t_percept': t_percept},
x_lo=amp_range[0], x_hi=amp_range[1], x_tol=amp_tol,
y_tol=bright_tol, max_iter=max_iter)
class Model(PrettyPrint):
"""Computational model
To build your own model, you can mix and match spatial and temporal models
at will.
For example, to create a model that combines the scoreboard model described
in [Beyeler2019]_ with the temporal model cascade described in
[Nanduri2012]_, use the following:
.. code-block :: python
model = Model(spatial=ScoreboardSpatial(),
temporal=Nanduri2012Temporal())
.. seealso ::
* `Basic Concepts > Computational Models > Building your own model
<topics-models-building-your-own>`
.. versionadded:: 0.6
Parameters
----------
spatial: :py:class:`~pulse2percept.models.SpatialModel` or None
blah
temporal: :py:class:`~pulse2percept.models.TemporalModel` or None
blah
**params:
Additional keyword arguments(e.g., ``verbose=True``) to be passed to
either the spatial model, the temporal model, or both.
"""
def __init__(self, spatial=None, temporal=None, **params):
# Set the spatial model:
if spatial is not None and not isinstance(spatial, SpatialModel):
if issubclass(spatial, SpatialModel):
# User should have passed an instance, not a class:
spatial = spatial()
else:
raise TypeError("'spatial' must be a SpatialModel instance, "
"not %s." % type(spatial))
self.spatial = spatial
# Set the temporal model:
if temporal is not None and not isinstance(temporal, TemporalModel):
if issubclass(temporal, TemporalModel):
# User should have passed an instance, not a class:
temporal = temporal()
else:
raise TypeError("'temporal' must be a TemporalModel instance, "
"not %s." % type(temporal))
self.temporal = temporal
# Use user-specified parameter values instead of defaults:
self.set_params(params)
def __getattr__(self, attr):
"""Called when the default attr access fails with an AttributeError
This method is called when the user tries to access an attribute(e.g.,
``model.a``), but ``a`` could not be found(either because it is part
of the spatial / temporal model or because it doesn't exist).
Returns
-------
attr: any
Checks both spatial and temporal models and:
* returns the attribute if found.
* if the attribute exists in both spatial / temporal model,
returns a dictionary ``{'spatial': attr, 'temporal': attr}``.
* if the attribtue is not found, raises an AttributeError.
"""
# Check the spatial/temporal model:
try:
spatial = getattr(self.spatial, attr)
spatial_valid = True
except AttributeError:
spatial_valid = False
try:
temporal = getattr(self.temporal, attr)
temporal_valid = True
except AttributeError:
temporal_valid = False
if not spatial_valid and not temporal_valid:
# If we are in the constructor, this will be caught later and
# a new variable will be constructed
raise AttributeError("%s has no attribute "
"'%s'." % (self.__class__.__name__,
attr))
if not spatial_valid:
return temporal
if not temporal_valid:
return spatial
return {'spatial': spatial, 'temporal': temporal}
def __setattr__(self, name, value):
"""Called when an attribute is set
This method is called when a new attribute is set(e.g.,
``model.a=2``). This is allowed in the constructor, but will raise a
``FreezeError`` elsewhere.
``model.a = X`` can be used as a shorthand to set ``model.spatial.a``
and / or ``model.temporal.a``.
"""
if sys._getframe(1).f_code.co_name == '__init__':
# Allow setting new attributes in the constructor:
if isinstance(sys._getframe(1).f_locals['self'], self.__class__):
super().__setattr__(name, value)
return
# Outside the constructor, we cannot add new attributes (FreezeError).
# But, we have to check whether the attribute is part of the spatial
# model, the temporal model, or both:
found = False
try:
self.spatial.__setattr__(name, value)
found = True
except (AttributeError, FreezeError):
pass
try:
self.temporal.__setattr__(name, value)
found = True
except (AttributeError, FreezeError):
pass
if not found:
err_str = ("'%s' not found. You cannot add attributes to %s "
"outside the constructor." % (name,
self.__class__.__name__))
raise FreezeError(err_str)
def _pprint_params(self):
"""Return a dictionary of parameters to pretty - print"""
params = {'spatial': self.spatial, 'temporal': self.temporal}
# Also display the parameters from the spatial/temporal model:
if self.has_space:
params.update(self.spatial._pprint_params())
if self.has_time:
params.update(self.temporal._pprint_params())
return params
def set_params(self, params):
"""Set model parameters
This is a convenience function to set parameters that might be part of
the spatial model, the temporal model, or both.
Alternatively, you can set the parameter directly, e.g.
``model.spatial.verbose = True``.
.. note::
If a parameter exists in both spatial and temporal models(e.g.,
``verbose``), both models will be updated.
Parameters
----------
params: dict
A dictionary of parameters to set.
"""
for key, val in params.items():
setattr(self, key, val)
def build(self, **build_params):
"""Build the model
Performs expensive one-time calculations, such as building the spatial
grid used to predict a percept.
Parameters
----------
build_params: additional parameters to set
You can overwrite parameters that are listed in
``get_default_params``. Trying to add new class attributes outside
of that will cause a ``FreezeError``.
Example: ``model.build(param1=val)``
Returns
-------
self
"""
self.set_params(build_params)
if self.has_space:
self.spatial.build()
if self.has_time:
self.temporal.build()
return self
def predict_percept(self, implant, t_percept=None):
"""Predict a percept
.. important ::
You must call ``build`` before calling ``predict_percept``.
Parameters
----------
implant: :py:class:`~pulse2percept.implants.ProsthesisSystem`
A valid prosthesis system. A stimulus can be passed via
:py:meth:`~pulse2percept.implants.ProsthesisSystem.stim`.
t_percept: float or list of floats, optional
The time points at which to output a percept (ms).
If None, ``implant.stim.time`` is used.
Returns
-------
percept: :py:class:`~pulse2percept.models.Percept`
A Percept object whose ``data`` container has dimensions Y x X x T.
Will return None if ``implant.stim`` is None.
"""
if not self.is_built:
raise NotBuiltError("Yout must call ``build`` first.")
if not isinstance(implant, ProsthesisSystem):
raise TypeError("'implant' must be a ProsthesisSystem object, not "
"%s." % type(implant))
if implant.stim is None or (not self.has_space and not self.has_time):
# Nothing to see here:
return None
if implant.stim.time is None and t_percept is not None:
raise ValueError("Cannot calculate temporal response at times "
"t_percept=%s, because stimulus/percept does not "
"have a time component." % t_percept)
if self.has_space and self.has_time:
# Need to calculate the spatial response at all stimulus points
# (i.e., whenever the stimulus changes):
resp = self.spatial.predict_percept(implant, t_percept=None)
if implant.stim.time is not None:
# Then pass that to the temporal model, which will output at
# all `t_percept` time steps:
resp = self.temporal.predict_percept(resp, t_percept=t_percept)
elif self.has_space:
resp = self.spatial.predict_percept(implant, t_percept=t_percept)
elif self.has_time:
resp = self.temporal.predict_percept(implant.stim,
t_percept=t_percept)
return resp
def find_threshold(self, implant, bright_th, amp_range=(0, 999), amp_tol=1,
bright_tol=0.1, max_iter=100, t_percept=None):
"""Find the threshold current for a certain stimulus
Estimates ``amp_th`` such that the output of
``model.predict_percept(stim(amp_th))`` is approximately ``bright_th``.
Parameters
----------
implant : :py:class:`~pulse2percept.implants.ProsthesisSystem`
The implant and its stimulus to use. Stimulus amplitude will be
up and down regulated until ``amp_th`` is found.
bright_th : float
Model output (brightness) that's considered "at threshold".
amp_range : (amp_lo, amp_hi), optional
Range of amplitudes to search (uA).
amp_tol : float, optional
Search will stop if candidate range of amplitudes is within
``amp_tol``
bright_tol : float, optional
Search will stop if model brightness is within ``bright_tol`` of
``bright_th``
max_iter : int, optional
Search will stop after ``max_iter`` iterations
t_percept: float or list of floats, optional
The time points at which to output a percept (ms).
If None, ``implant.stim.time`` is used.
Returns
-------
amp_th : float
Threshold current (uA), estimated so that the output of
``model.predict_percept(stim(amp_th))`` is within ``bright_tol`` of
``bright_th``.
"""
if not isinstance(implant, ProsthesisSystem):
raise TypeError("'implant' must be a ProsthesisSystem, not "
"%s." % type(implant))
def inner_predict(amp, fnc_predict, implant, **kwargs):
_implant = deepcopy(implant)
scale = amp / implant.stim.data.max()
_implant.stim = Stimulus(scale * implant.stim.data,
electrodes=implant.stim.electrodes,
time=implant.stim.time)
return fnc_predict(_implant, **kwargs).data.max()
return bisect(bright_th, inner_predict,
args=[self.predict_percept, implant],
kwargs={'t_percept': t_percept},
x_lo=amp_range[0], x_hi=amp_range[1], x_tol=amp_tol,
y_tol=bright_tol, max_iter=max_iter)
@property
def has_space(self):
"""Returns True if the model has a spatial component"""
return self.spatial is not None
@property
def has_time(self):
"""Returns True if the model has a temporal component"""
return self.temporal is not None
@property
def is_built(self):
"""Returns True if the ``build`` model has been called"""
_is_built = True
if self.has_space:
_is_built &= self.spatial.is_built
if self.has_time:
_is_built &= self.temporal.is_built
return _is_built
|
|
"""
Utils called from project_root/docs/conf.py when Sphinx
documentation is generated.
"""
from __future__ import division, print_function, unicode_literals
from future.builtins import map, open, str
from datetime import datetime
import os.path
from shutil import copyfile, move
from string import letters
from socket import gethostname
from warnings import warn
from django.template.defaultfilters import urlize
from django.utils.datastructures import SortedDict
try:
from django.utils.encoding import force_text
except ImportError:
# Backward compatibility for Py2 and Django < 1.5
from django.utils.encoding import force_unicode as force_text
from django.utils.functional import Promise
from PIL import Image
from mezzanine import __version__
from mezzanine.conf import registry
from mezzanine.utils.importing import import_dotted_path, path_for_import
def deep_force_unicode(value):
"""
Recursively call force_text on value.
"""
if isinstance(value, (list, tuple, set)):
value = type(value)(map(deep_force_unicode, value))
elif isinstance(value, dict):
value = type(value)(map(deep_force_unicode, value.items()))
elif isinstance(value, Promise):
value = force_text(value)
return value
def build_settings_docs(docs_path, prefix=None):
"""
Converts names, descriptions and defaults for settings in
``mezzanine.conf.registry`` into RST format for use in docs,
optionally filtered by setting names with the given prefix.
"""
# String to use instead of setting value for dynamic defaults
dynamic = "[dynamic]"
lines = [".. THIS DOCUMENT IS AUTO GENERATED VIA conf.py"]
for name in sorted(registry.keys()):
if prefix and not name.startswith(prefix):
continue
setting = registry[name]
settings_name = "``%s``" % name
setting_default = setting["default"]
if isinstance(setting_default, str):
if gethostname() in setting_default or (
setting_default.startswith("/") and
os.path.exists(setting_default)):
setting_default = dynamic
if setting_default != dynamic:
setting_default = repr(deep_force_unicode(setting_default))
lines.extend(["", settings_name, "-" * len(settings_name)])
lines.extend(["", urlize(setting["description"]
).replace("<a href=\"", "`"
).replace("\" rel=\"nofollow\">", " <").replace("</a>", ">`_")])
if setting["choices"]:
choices = ", ".join(["%s: ``%s``" % (str(v), force_text(k))
for k, v in setting["choices"]])
lines.extend(["", "Choices: %s" % choices, ""])
lines.extend(["", "Default: ``%s``" % setting_default])
with open(os.path.join(docs_path, "settings.rst"), "w") as f:
f.write("\n".join(lines).replace("u'", "'").replace("yo'", "you'"))
def build_deploy_docs(docs_path):
try:
from fabric.main import load_fabfile
except ImportError:
warn("Couldn't build fabfile.rst, fabric not installed")
return
project_template_path = path_for_import("mezzanine.project_template")
commands = load_fabfile(os.path.join(project_template_path, "fabfile"))[1]
lines = []
for name in sorted(commands.keys()):
doc = commands[name].__doc__.strip().split("\n")[0]
lines.append(" * ``fab %s`` - %s" % (name, doc))
with open(os.path.join(docs_path, "fabfile.rst"), "w") as f:
f.write("\n".join(lines))
# Python complains if this is inside build_changelog which uses exec.
_changeset_date = lambda cs: datetime.fromtimestamp(cs.date()[0])
def build_changelog(docs_path, package_name="mezzanine"):
"""
Converts Mercurial commits into a changelog in RST format.
"""
project_path = os.path.join(docs_path, "..")
version_file = os.path.join(package_name, "__init__.py")
version_var = "__version__"
changelog_filename = "CHANGELOG"
changelog_file = os.path.join(project_path, changelog_filename)
versions = SortedDict()
repo = None
ignore = ("AUTHORS", "formatting", "typo", "pep8", "pep 8",
"whitespace", "README", "trans", "print debug",
"debugging", "tabs", "style", "sites", "ignore",
"tweak", "cleanup", "minor", "for changeset",
".com``", "oops", "syntax")
hotfixes = {
"40cbc47b8d8a": "1.0.9",
"a25749986abc": "1.0.10",
}
# Load the repo.
try:
from mercurial import ui, hg, error
from mercurial.commands import tag
except ImportError:
pass
else:
try:
ui = ui.ui()
repo = hg.repository(ui, project_path)
except error.RepoError:
return
if repo is None:
return
# Go through each changeset and assign it to the versions dict.
changesets = [repo.changectx(changeset) for changeset in repo.changelog]
for cs in sorted(changesets, reverse=True, key=_changeset_date):
# Check if the file with the version number is in this changeset
# and if it is, pull it out and assign it as a variable.
files = cs.files()
new_version = False
# Commit message cleanup hacks.
description = cs.description().decode("utf-8")
description = description.rstrip(".").replace("\n", ". ")
while " " in description:
description = description.replace(" ", " ")
description = description.replace(". . ", ". ").replace("...", ",")
while ".." in description:
description = description.replace("..", ".")
description = description.replace(":.", ":").replace("n'. t", "n't")
words = description.split()
# Format var names in commit.
for i, word in enumerate(words):
if (set("._") & set(word[:-1]) and set(letters) & set(word)
and "`" not in word and not word[0].isdigit()):
last = ""
if word[-1] in ",.":
last, word = word[-1], word[:-1]
words[i] = "``%s``%s" % (word, last)
description = " ".join(words)
if version_file in files:
for line in cs[version_file].data().split("\n"):
if line.startswith(version_var):
exec(line)
if locals()[version_var] == "0.1.0":
locals()[version_var] = "1.0.0"
break
versions[locals()[version_var]] = {
"changes": [],
"date": _changeset_date(cs).strftime("%b %d, %Y")
}
new_version = len(files) == 1
# Tag new versions.
hotfix = hotfixes.get(cs.hex()[:12])
if hotfix or new_version:
if hotfix:
version_tag = hotfix
else:
try:
version_tag = locals()[version_var]
except KeyError:
version_tag = None
if version_tag and version_tag not in cs.tags():
try:
tag(ui, repo, version_tag, rev=cs.hex())
print("Tagging version %s" % version_tag)
except:
pass
# Ignore changesets that are merges, bumped the version, closed
# a branch, regenerated the changelog itself, contain an ignore
# word, or are one word long.
merge = len(cs.parents()) > 1
branch_closed = len(files) == 0
changelog_update = changelog_filename in files
ignored = [w for w in ignore if w.lower() in description.lower()]
one_word = len(description.split()) == 1
if (merge or new_version or branch_closed or changelog_update or
ignored or one_word):
continue
# Ensure we have a current version and if so, add this changeset's
# description to it.
version = None
try:
version = locals()[version_var]
except KeyError:
if not hotfix:
continue
user = cs.user().decode("utf-8").split("<")[0].strip()
entry = "%s - %s" % (description, user)
if hotfix or entry not in versions[version]["changes"]:
if hotfix:
versions[hotfix] = {
"changes": [entry],
"date": _changeset_date(cs).strftime("%b %d, %Y"),
}
else:
versions[version]["changes"].insert(0, entry)
# Write out the changelog.
with open(changelog_file, "w") as f:
for version, version_info in versions.items():
header = "Version %s (%s)" % (version, version_info["date"])
f.write("%s\n" % header)
f.write("%s\n" % ("-" * len(header)))
f.write("\n")
if version_info["changes"]:
for change in version_info["changes"]:
f.write(" * %s\n" % change)
else:
f.write(" * No changes listed.\n")
f.write("\n")
def build_modelgraph(docs_path, package_name="mezzanine"):
"""
Creates a diagram of all the models for mezzanine and the given
package name, generates a smaller version and add it to the
docs directory for use in model-graph.rst
"""
to_path = os.path.join(docs_path, "img", "graph.png")
build_path = os.path.join(docs_path, "build", "_images")
resized_path = os.path.join(os.path.dirname(to_path), "graph-small.png")
settings = import_dotted_path(package_name + ".project_template.settings")
apps = [a.rsplit(".")[1] for a in settings.INSTALLED_APPS
if a.startswith("mezzanine.") or a.startswith(package_name + ".")]
try:
from django_extensions.management.commands import graph_models
except ImportError:
warn("Couldn't build model_graph, django_extensions not installed")
else:
options = {"inheritance": True, "outputfile": "graph.png",
"layout": "dot"}
try:
graph_models.Command().execute(*apps, **options)
except Exception as e:
warn("Couldn't build model_graph, graph_models failed on: %s" % e)
else:
try:
move("graph.png", to_path)
except OSError as e:
warn("Couldn't build model_graph, move failed on: %s" % e)
# docs/img/graph.png should exist in the repo - move it to the build path.
try:
if not os.path.exists(build_path):
os.makedirs(build_path)
copyfile(to_path, os.path.join(build_path, "graph.png"))
except OSError as e:
warn("Couldn't build model_graph, copy to build failed on: %s" % e)
try:
image = Image.open(to_path)
image.width = 800
image.height = image.size[1] * 800 // image.size[0]
image.save(resized_path, "PNG", quality=100)
except Exception as e:
warn("Couldn't build model_graph, resize failed on: %s" % e)
return
def build_requirements(docs_path, package_name="mezzanine"):
"""
Updates the requirements file with Mezzanine's version number.
"""
mezz_string = "Mezzanine=="
project_path = os.path.join(docs_path, "..")
requirements_file = os.path.join(project_path, package_name,
"project_template", "requirements.txt")
with open(requirements_file, "r") as f:
requirements = f.readlines()
with open(requirements_file, "w") as f:
f.write("Mezzanine==%s\n" % __version__)
for requirement in requirements:
if requirement.strip() and not requirement.startswith(mezz_string):
f.write(requirement)
|
|
from __future__ import unicode_literals
from .. import Provider as SsnProvider
import datetime
class Provider(SsnProvider):
# Extracted from
# http://www.stats.gov.cn/tjsj/tjbz/xzqhdm/201504/t20150415_712722.html
area_codes = [
"110000", "110100", "110101", "110102", "110105", "110106", "110107",
"110108", "110109", "110111", "110112", "110113", "110114", "110115",
"110116", "110117", "110200", "110228", "110229", "120000", "120100",
"120101", "120102", "120103", "120104", "120105", "120106", "120110",
"120111", "120112", "120113", "120114", "120115", "120116", "120200",
"120221", "120223", "120225", "130000", "130100", "130101", "130102",
"130104", "130105", "130107", "130108", "130109", "130110", "130111",
"130121", "130123", "130125", "130126", "130127", "130128", "130129",
"130130", "130131", "130132", "130133", "130181", "130183", "130184",
"130200", "130201", "130202", "130203", "130204", "130205", "130207",
"130208", "130209", "130223", "130224", "130225", "130227", "130229",
"130281", "130283", "130300", "130301", "130302", "130303", "130304",
"130321", "130322", "130323", "130324", "130400", "130401", "130402",
"130403", "130404", "130406", "130421", "130423", "130424", "130425",
"130426", "130427", "130428", "130429", "130430", "130431", "130432",
"130433", "130434", "130435", "130481", "130500", "130501", "130502",
"130503", "130521", "130522", "130523", "130524", "130525", "130526",
"130527", "130528", "130529", "130530", "130531", "130532", "130533",
"130534", "130535", "130581", "130582", "130600", "130601", "130602",
"130603", "130604", "130621", "130622", "130623", "130624", "130625",
"130626", "130627", "130628", "130629", "130630", "130631", "130632",
"130633", "130634", "130635", "130636", "130637", "130638", "130681",
"130682", "130683", "130684", "130700", "130701", "130702", "130703",
"130705", "130706", "130721", "130722", "130723", "130724", "130725",
"130726", "130727", "130728", "130729", "130730", "130731", "130732",
"130733", "130800", "130801", "130802", "130803", "130804", "130821",
"130822", "130823", "130824", "130825", "130826", "130827", "130828",
"130900", "130901", "130902", "130903", "130921", "130922", "130923",
"130924", "130925", "130926", "130927", "130928", "130929", "130930",
"130981", "130982", "130983", "130984", "131000", "131001", "131002",
"131003", "131022", "131023", "131024", "131025", "131026", "131028",
"131081", "131082", "131100", "131101", "131102", "131121", "131122",
"131123", "131124", "131125", "131126", "131127", "131128", "131181",
"131182", "140000", "140100", "140101", "140105", "140106", "140107",
"140108", "140109", "140110", "140121", "140122", "140123", "140181",
"140200", "140201", "140202", "140203", "140211", "140212", "140221",
"140222", "140223", "140224", "140225", "140226", "140227", "140300",
"140301", "140302", "140303", "140311", "140321", "140322", "140400",
"140401", "140402", "140411", "140421", "140423", "140424", "140425",
"140426", "140427", "140428", "140429", "140430", "140431", "140481",
"140500", "140501", "140502", "140521", "140522", "140524", "140525",
"140581", "140600", "140601", "140602", "140603", "140621", "140622",
"140623", "140624", "140700", "140701", "140702", "140721", "140722",
"140723", "140724", "140725", "140726", "140727", "140728", "140729",
"140781", "140800", "140801", "140802", "140821", "140822", "140823",
"140824", "140825", "140826", "140827", "140828", "140829", "140830",
"140881", "140882", "140900", "140901", "140902", "140921", "140922",
"140923", "140924", "140925", "140926", "140927", "140928", "140929",
"140930", "140931", "140932", "140981", "141000", "141001", "141002",
"141021", "141022", "141023", "141024", "141025", "141026", "141027",
"141028", "141029", "141030", "141031", "141032", "141033", "141034",
"141081", "141082", "141100", "141101", "141102", "141121", "141122",
"141123", "141124", "141125", "141126", "141127", "141128", "141129",
"141130", "141181", "141182", "150000", "150100", "150101", "150102",
"150103", "150104", "150105", "150121", "150122", "150123", "150124",
"150125", "150200", "150201", "150202", "150203", "150204", "150205",
"150206", "150207", "150221", "150222", "150223", "150300", "150301",
"150302", "150303", "150304", "150400", "150401", "150402", "150403",
"150404", "150421", "150422", "150423", "150424", "150425", "150426",
"150428", "150429", "150430", "150500", "150501", "150502", "150521",
"150522", "150523", "150524", "150525", "150526", "150581", "150600",
"150601", "150602", "150621", "150622", "150623", "150624", "150625",
"150626", "150627", "150700", "150701", "150702", "150703", "150721",
"150722", "150723", "150724", "150725", "150726", "150727", "150781",
"150782", "150783", "150784", "150785", "150800", "150801", "150802",
"150821", "150822", "150823", "150824", "150825", "150826", "150900",
"150901", "150902", "150921", "150922", "150923", "150924", "150925",
"150926", "150927", "150928", "150929", "150981", "152200", "152201",
"152202", "152221", "152222", "152223", "152224", "152500", "152501",
"152502", "152522", "152523", "152524", "152525", "152526", "152527",
"152528", "152529", "152530", "152531", "152900", "152921", "152922",
"152923", "210000", "210100", "210101", "210102", "210103", "210104",
"210105", "210106", "210111", "210112", "210113", "210114", "210122",
"210123", "210124", "210181", "210200", "210201", "210202", "210203",
"210204", "210211", "210212", "210213", "210224", "210281", "210282",
"210283", "210300", "210301", "210302", "210303", "210304", "210311",
"210321", "210323", "210381", "210400", "210401", "210402", "210403",
"210404", "210411", "210421", "210422", "210423", "210500", "210501",
"210502", "210503", "210504", "210505", "210521", "210522", "210600",
"210601", "210602", "210603", "210604", "210624", "210681", "210682",
"210700", "210701", "210702", "210703", "210711", "210726", "210727",
"210781", "210782", "210800", "210801", "210802", "210803", "210804",
"210811", "210881", "210882", "210900", "210901", "210902", "210903",
"210904", "210905", "210911", "210921", "210922", "211000", "211001",
"211002", "211003", "211004", "211005", "211011", "211021", "211081",
"211100", "211101", "211102", "211103", "211121", "211122", "211200",
"211201", "211202", "211204", "211221", "211223", "211224", "211281",
"211282", "211300", "211301", "211302", "211303", "211321", "211322",
"211324", "211381", "211382", "211400", "211401", "211402", "211403",
"211404", "211421", "211422", "211481", "220000", "220100", "220101",
"220102", "220103", "220104", "220105", "220106", "220112", "220113",
"220122", "220182", "220183", "220200", "220201", "220202", "220203",
"220204", "220211", "220221", "220281", "220282", "220283", "220284",
"220300", "220301", "220302", "220303", "220322", "220323", "220381",
"220382", "220400", "220401", "220402", "220403", "220421", "220422",
"220500", "220501", "220502", "220503", "220521", "220523", "220524",
"220581", "220582", "220600", "220601", "220602", "220605", "220621",
"220622", "220623", "220681", "220700", "220701", "220702", "220721",
"220722", "220723", "220781", "220800", "220801", "220802", "220821",
"220822", "220881", "220882", "222400", "222401", "222402", "222403",
"222404", "222405", "222406", "222424", "222426", "230000", "230100",
"230101", "230102", "230103", "230104", "230108", "230109", "230110",
"230111", "230112", "230123", "230124", "230125", "230126", "230127",
"230128", "230129", "230182", "230183", "230184", "230200", "230201",
"230202", "230203", "230204", "230205", "230206", "230207", "230208",
"230221", "230223", "230224", "230225", "230227", "230229", "230230",
"230231", "230281", "230300", "230301", "230302", "230303", "230304",
"230305", "230306", "230307", "230321", "230381", "230382", "230400",
"230401", "230402", "230403", "230404", "230405", "230406", "230407",
"230421", "230422", "230500", "230501", "230502", "230503", "230505",
"230506", "230521", "230522", "230523", "230524", "230600", "230601",
"230602", "230603", "230604", "230605", "230606", "230621", "230622",
"230623", "230624", "230700", "230701", "230702", "230703", "230704",
"230705", "230706", "230707", "230708", "230709", "230710", "230711",
"230712", "230713", "230714", "230715", "230716", "230722", "230781",
"230800", "230801", "230803", "230804", "230805", "230811", "230822",
"230826", "230828", "230833", "230881", "230882", "230900", "230901",
"230902", "230903", "230904", "230921", "231000", "231001", "231002",
"231003", "231004", "231005", "231024", "231025", "231081", "231083",
"231084", "231085", "231100", "231101", "231102", "231121", "231123",
"231124", "231181", "231182", "231200", "231201", "231202", "231221",
"231222", "231223", "231224", "231225", "231226", "231281", "231282",
"231283", "232700", "232721", "232722", "232723", "310000", "310100",
"310101", "310104", "310105", "310106", "310107", "310108", "310109",
"310110", "310112", "310113", "310114", "310115", "310116", "310117",
"310118", "310120", "310200", "310230", "320000", "320100", "320101",
"320102", "320104", "320105", "320106", "320111", "320113", "320114",
"320115", "320116", "320117", "320118", "320200", "320201", "320202",
"320203", "320204", "320205", "320206", "320211", "320281", "320282",
"320300", "320301", "320302", "320303", "320305", "320311", "320312",
"320321", "320322", "320324", "320381", "320382", "320400", "320401",
"320402", "320404", "320405", "320411", "320412", "320481", "320482",
"320500", "320501", "320505", "320506", "320507", "320508", "320509",
"320581", "320582", "320583", "320585", "320600", "320601", "320602",
"320611", "320612", "320621", "320623", "320681", "320682", "320684",
"320700", "320701", "320703", "320706", "320707", "320722", "320723",
"320724", "320800", "320801", "320802", "320803", "320804", "320811",
"320826", "320829", "320830", "320831", "320900", "320901", "320902",
"320903", "320921", "320922", "320923", "320924", "320925", "320981",
"320982", "321000", "321001", "321002", "321003", "321012", "321023",
"321081", "321084", "321100", "321101", "321102", "321111", "321112",
"321181", "321182", "321183", "321200", "321201", "321202", "321203",
"321204", "321281", "321282", "321283", "321300", "321301", "321302",
"321311", "321322", "321323", "321324", "330000", "330100", "330101",
"330102", "330103", "330104", "330105", "330106", "330108", "330109",
"330110", "330122", "330127", "330182", "330183", "330185", "330200",
"330201", "330203", "330204", "330205", "330206", "330211", "330212",
"330225", "330226", "330281", "330282", "330283", "330300", "330301",
"330302", "330303", "330304", "330322", "330324", "330326", "330327",
"330328", "330329", "330381", "330382", "330400", "330401", "330402",
"330411", "330421", "330424", "330481", "330482", "330483", "330500",
"330501", "330502", "330503", "330521", "330522", "330523", "330600",
"330601", "330602", "330603", "330604", "330624", "330681", "330683",
"330700", "330701", "330702", "330703", "330723", "330726", "330727",
"330781", "330782", "330783", "330784", "330800", "330801", "330802",
"330803", "330822", "330824", "330825", "330881", "330900", "330901",
"330902", "330903", "330921", "330922", "331000", "331001", "331002",
"331003", "331004", "331021", "331022", "331023", "331024", "331081",
"331082", "331100", "331101", "331102", "331121", "331122", "331123",
"331124", "331125", "331126", "331127", "331181", "340000", "340100",
"340101", "340102", "340103", "340104", "340111", "340121", "340122",
"340123", "340124", "340181", "340200", "340201", "340202", "340203",
"340207", "340208", "340221", "340222", "340223", "340225", "340300",
"340301", "340302", "340303", "340304", "340311", "340321", "340322",
"340323", "340400", "340401", "340402", "340403", "340404", "340405",
"340406", "340421", "340500", "340501", "340503", "340504", "340506",
"340521", "340522", "340523", "340600", "340601", "340602", "340603",
"340604", "340621", "340700", "340701", "340702", "340703", "340711",
"340721", "340800", "340801", "340802", "340803", "340811", "340822",
"340823", "340824", "340825", "340826", "340827", "340828", "340881",
"341000", "341001", "341002", "341003", "341004", "341021", "341022",
"341023", "341024", "341100", "341101", "341102", "341103", "341122",
"341124", "341125", "341126", "341181", "341182", "341200", "341201",
"341202", "341203", "341204", "341221", "341222", "341225", "341226",
"341282", "341300", "341301", "341302", "341321", "341322", "341323",
"341324", "341500", "341501", "341502", "341503", "341521", "341522",
"341523", "341524", "341525", "341600", "341601", "341602", "341621",
"341622", "341623", "341700", "341701", "341702", "341721", "341722",
"341723", "341800", "341801", "341802", "341821", "341822", "341823",
"341824", "341825", "341881", "350000", "350100", "350101", "350102",
"350103", "350104", "350105", "350111", "350121", "350122", "350123",
"350124", "350125", "350128", "350181", "350182", "350200", "350201",
"350203", "350205", "350206", "350211", "350212", "350213", "350300",
"350301", "350302", "350303", "350304", "350305", "350322", "350400",
"350401", "350402", "350403", "350421", "350423", "350424", "350425",
"350426", "350427", "350428", "350429", "350430", "350481", "350500",
"350501", "350502", "350503", "350504", "350505", "350521", "350524",
"350525", "350526", "350527", "350581", "350582", "350583", "350600",
"350601", "350602", "350603", "350622", "350623", "350624", "350625",
"350626", "350627", "350628", "350629", "350681", "350700", "350701",
"350702", "350721", "350722", "350723", "350724", "350725", "350781",
"350782", "350783", "350784", "350800", "350801", "350802", "350821",
"350822", "350823", "350824", "350825", "350881", "350900", "350901",
"350902", "350921", "350922", "350923", "350924", "350925", "350926",
"350981", "350982", "360000", "360100", "360101", "360102", "360103",
"360104", "360105", "360111", "360121", "360122", "360123", "360124",
"360200", "360201", "360202", "360203", "360222", "360281", "360300",
"360301", "360302", "360313", "360321", "360322", "360323", "360400",
"360401", "360402", "360403", "360421", "360423", "360424", "360425",
"360426", "360427", "360428", "360429", "360430", "360481", "360482",
"360500", "360501", "360502", "360521", "360600", "360601", "360602",
"360622", "360681", "360700", "360701", "360702", "360703", "360721",
"360722", "360723", "360724", "360725", "360726", "360727", "360728",
"360729", "360730", "360731", "360732", "360733", "360734", "360735",
"360781", "360800", "360801", "360802", "360803", "360821", "360822",
"360823", "360824", "360825", "360826", "360827", "360828", "360829",
"360830", "360881", "360900", "360901", "360902", "360921", "360922",
"360923", "360924", "360925", "360926", "360981", "360982", "360983",
"361000", "361001", "361002", "361021", "361022", "361023", "361024",
"361025", "361026", "361027", "361028", "361029", "361030", "361100",
"361101", "361102", "361121", "361122", "361123", "361124", "361125",
"361126", "361127", "361128", "361129", "361130", "361181", "370000",
"370100", "370101", "370102", "370103", "370104", "370105", "370112",
"370113", "370124", "370125", "370126", "370181", "370200", "370201",
"370202", "370203", "370211", "370212", "370213", "370214", "370281",
"370282", "370283", "370285", "370300", "370301", "370302", "370303",
"370304", "370305", "370306", "370321", "370322", "370323", "370400",
"370401", "370402", "370403", "370404", "370405", "370406", "370481",
"370500", "370501", "370502", "370503", "370521", "370522", "370523",
"370600", "370601", "370602", "370611", "370612", "370613", "370634",
"370681", "370682", "370683", "370684", "370685", "370686", "370687",
"370700", "370701", "370702", "370703", "370704", "370705", "370724",
"370725", "370781", "370782", "370783", "370784", "370785", "370786",
"370800", "370801", "370811", "370812", "370826", "370827", "370828",
"370829", "370830", "370831", "370832", "370881", "370883", "370900",
"370901", "370902", "370911", "370921", "370923", "370982", "370983",
"371000", "371001", "371002", "371003", "371082", "371083", "371100",
"371101", "371102", "371103", "371121", "371122", "371200", "371201",
"371202", "371203", "371300", "371301", "371302", "371311", "371312",
"371321", "371322", "371323", "371324", "371325", "371326", "371327",
"371328", "371329", "371400", "371401", "371402", "371403", "371422",
"371423", "371424", "371425", "371426", "371427", "371428", "371481",
"371482", "371500", "371501", "371502", "371521", "371522", "371523",
"371524", "371525", "371526", "371581", "371600", "371601", "371602",
"371603", "371621", "371622", "371623", "371625", "371626", "371700",
"371701", "371702", "371721", "371722", "371723", "371724", "371725",
"371726", "371727", "371728", "410000", "410100", "410101", "410102",
"410103", "410104", "410105", "410106", "410108", "410122", "410181",
"410182", "410183", "410184", "410185", "410200", "410201", "410202",
"410203", "410204", "410205", "410211", "410221", "410222", "410223",
"410224", "410225", "410300", "410301", "410302", "410303", "410304",
"410305", "410306", "410311", "410322", "410323", "410324", "410325",
"410326", "410327", "410328", "410329", "410381", "410400", "410401",
"410402", "410403", "410404", "410411", "410421", "410422", "410423",
"410425", "410481", "410482", "410500", "410501", "410502", "410503",
"410505", "410506", "410522", "410523", "410526", "410527", "410581",
"410600", "410601", "410602", "410603", "410611", "410621", "410622",
"410700", "410701", "410702", "410703", "410704", "410711", "410721",
"410724", "410725", "410726", "410727", "410728", "410781", "410782",
"410800", "410801", "410802", "410803", "410804", "410811", "410821",
"410822", "410823", "410825", "410882", "410883", "410900", "410901",
"410902", "410922", "410923", "410926", "410927", "410928", "411000",
"411001", "411002", "411023", "411024", "411025", "411081", "411082",
"411100", "411101", "411102", "411103", "411104", "411121", "411122",
"411200", "411201", "411202", "411221", "411222", "411224", "411281",
"411282", "411300", "411301", "411302", "411303", "411321", "411322",
"411323", "411324", "411325", "411326", "411327", "411328", "411329",
"411330", "411381", "411400", "411401", "411402", "411403", "411421",
"411422", "411423", "411424", "411425", "411426", "411481", "411500",
"411501", "411502", "411503", "411521", "411522", "411523", "411524",
"411525", "411526", "411527", "411528", "411600", "411601", "411602",
"411621", "411622", "411623", "411624", "411625", "411626", "411627",
"411628", "411681", "411700", "411701", "411702", "411721", "411722",
"411723", "411724", "411725", "411726", "411727", "411728", "411729",
"419000", "419001", "420000", "420100", "420101", "420102", "420103",
"420104", "420105", "420106", "420107", "420111", "420112", "420113",
"420114", "420115", "420116", "420117", "420200", "420201", "420202",
"420203", "420204", "420205", "420222", "420281", "420300", "420301",
"420302", "420303", "420304", "420322", "420323", "420324", "420325",
"420381", "420500", "420501", "420502", "420503", "420504", "420505",
"420506", "420525", "420526", "420527", "420528", "420529", "420581",
"420582", "420583", "420600", "420601", "420602", "420606", "420607",
"420624", "420625", "420626", "420682", "420683", "420684", "420700",
"420701", "420702", "420703", "420704", "420800", "420801", "420802",
"420804", "420821", "420822", "420881", "420900", "420901", "420902",
"420921", "420922", "420923", "420981", "420982", "420984", "421000",
"421001", "421002", "421003", "421022", "421023", "421024", "421081",
"421083", "421087", "421100", "421101", "421102", "421121", "421122",
"421123", "421124", "421125", "421126", "421127", "421181", "421182",
"421200", "421201", "421202", "421221", "421222", "421223", "421224",
"421281", "421300", "421301", "421303", "421321", "421381", "422800",
"422801", "422802", "422822", "422823", "422825", "422826", "422827",
"422828", "429000", "429004", "429005", "429006", "429021", "430000",
"430100", "430101", "430102", "430103", "430104", "430105", "430111",
"430112", "430121", "430124", "430181", "430200", "430201", "430202",
"430203", "430204", "430211", "430221", "430223", "430224", "430225",
"430281", "430300", "430301", "430302", "430304", "430321", "430381",
"430382", "430400", "430401", "430405", "430406", "430407", "430408",
"430412", "430421", "430422", "430423", "430424", "430426", "430481",
"430482", "430500", "430501", "430502", "430503", "430511", "430521",
"430522", "430523", "430524", "430525", "430527", "430528", "430529",
"430581", "430600", "430601", "430602", "430603", "430611", "430621",
"430623", "430624", "430626", "430681", "430682", "430700", "430701",
"430702", "430703", "430721", "430722", "430723", "430724", "430725",
"430726", "430781", "430800", "430801", "430802", "430811", "430821",
"430822", "430900", "430901", "430902", "430903", "430921", "430922",
"430923", "430981", "431000", "431001", "431002", "431003", "431021",
"431022", "431023", "431024", "431025", "431026", "431027", "431028",
"431081", "431100", "431101", "431102", "431103", "431121", "431122",
"431123", "431124", "431125", "431126", "431127", "431128", "431129",
"431200", "431201", "431202", "431221", "431222", "431223", "431224",
"431225", "431226", "431227", "431228", "431229", "431230", "431281",
"431300", "431301", "431302", "431321", "431322", "431381", "431382",
"433100", "433101", "433122", "433123", "433124", "433125", "433126",
"433127", "433130", "440000", "440100", "440101", "440103", "440104",
"440105", "440106", "440111", "440112", "440113", "440114", "440115",
"440116", "440117", "440118", "440200", "440201", "440203", "440204",
"440205", "440222", "440224", "440229", "440232", "440233", "440281",
"440282", "440300", "440301", "440303", "440304", "440305", "440306",
"440307", "440308", "440400", "440401", "440402", "440403", "440404",
"440500", "440501", "440507", "440511", "440512", "440513", "440514",
"440515", "440523", "440600", "440601", "440604", "440605", "440606",
"440607", "440608", "440700", "440701", "440703", "440704", "440705",
"440781", "440783", "440784", "440785", "440800", "440801", "440802",
"440803", "440804", "440811", "440823", "440825", "440881", "440882",
"440883", "440900", "440901", "440902", "440904", "440981", "440982",
"440983", "441200", "441201", "441202", "441203", "441223", "441224",
"441225", "441226", "441283", "441284", "441300", "441301", "441302",
"441303", "441322", "441323", "441324", "441400", "441401", "441402",
"441403", "441422", "441423", "441424", "441426", "441427", "441481",
"441500", "441501", "441502", "441521", "441523", "441581", "441600",
"441601", "441602", "441621", "441622", "441623", "441624", "441625",
"441700", "441701", "441702", "441721", "441723", "441781", "441800",
"441801", "441802", "441803", "441821", "441823", "441825", "441826",
"441881", "441882", "441900", "442000", "445100", "445101", "445102",
"445103", "445122", "445200", "445201", "445202", "445203", "445222",
"445224", "445281", "445300", "445301", "445302", "445303", "445321",
"445322", "445381", "450000", "450100", "450101", "450102", "450103",
"450105", "450107", "450108", "450109", "450122", "450123", "450124",
"450125", "450126", "450127", "450200", "450201", "450202", "450203",
"450204", "450205", "450221", "450222", "450223", "450224", "450225",
"450226", "450300", "450301", "450302", "450303", "450304", "450305",
"450311", "450312", "450321", "450323", "450324", "450325", "450326",
"450327", "450328", "450329", "450330", "450331", "450332", "450400",
"450401", "450403", "450405", "450406", "450421", "450422", "450423",
"450481", "450500", "450501", "450502", "450503", "450512", "450521",
"450600", "450601", "450602", "450603", "450621", "450681", "450700",
"450701", "450702", "450703", "450721", "450722", "450800", "450801",
"450802", "450803", "450804", "450821", "450881", "450900", "450901",
"450902", "450903", "450921", "450922", "450923", "450924", "450981",
"451000", "451001", "451002", "451021", "451022", "451023", "451024",
"451025", "451026", "451027", "451028", "451029", "451030", "451031",
"451100", "451101", "451102", "451121", "451122", "451123", "451200",
"451201", "451202", "451221", "451222", "451223", "451224", "451225",
"451226", "451227", "451228", "451229", "451281", "451300", "451301",
"451302", "451321", "451322", "451323", "451324", "451381", "451400",
"451401", "451402", "451421", "451422", "451423", "451424", "451425",
"451481", "460000", "460100", "460101", "460105", "460106", "460107",
"460108", "460200", "460201", "460202", "460203", "460204", "460205",
"460300", "469000", "469001", "469002", "469003", "469005", "469006",
"469007", "469021", "469022", "469023", "469024", "469025", "469026",
"469027", "469028", "469029", "469030", "500000", "500100", "500101",
"500102", "500103", "500104", "500105", "500106", "500107", "500108",
"500109", "500110", "500111", "500112", "500113", "500114", "500115",
"500116", "500117", "500118", "500119", "500120", "500151", "500200",
"500223", "500226", "500228", "500229", "500230", "500231", "500232",
"500233", "500234", "500235", "500236", "500237", "500238", "500240",
"500241", "500242", "500243", "510000", "510100", "510101", "510104",
"510105", "510106", "510107", "510108", "510112", "510113", "510114",
"510115", "510121", "510122", "510124", "510129", "510131", "510132",
"510181", "510182", "510183", "510184", "510300", "510301", "510302",
"510303", "510304", "510311", "510321", "510322", "510400", "510401",
"510402", "510403", "510411", "510421", "510422", "510500", "510501",
"510502", "510503", "510504", "510521", "510522", "510524", "510525",
"510600", "510601", "510603", "510623", "510626", "510681", "510682",
"510683", "510700", "510701", "510703", "510704", "510722", "510723",
"510724", "510725", "510726", "510727", "510781", "510800", "510801",
"510802", "510811", "510812", "510821", "510822", "510823", "510824",
"510900", "510901", "510903", "510904", "510921", "510922", "510923",
"511000", "511001", "511002", "511011", "511024", "511025", "511028",
"511100", "511101", "511102", "511111", "511112", "511113", "511123",
"511124", "511126", "511129", "511132", "511133", "511181", "511300",
"511301", "511302", "511303", "511304", "511321", "511322", "511323",
"511324", "511325", "511381", "511400", "511401", "511402", "511421",
"511422", "511423", "511424", "511425", "511500", "511501", "511502",
"511503", "511521", "511523", "511524", "511525", "511526", "511527",
"511528", "511529", "511600", "511601", "511602", "511603", "511621",
"511622", "511623", "511681", "511700", "511701", "511702", "511703",
"511722", "511723", "511724", "511725", "511781", "511800", "511801",
"511802", "511803", "511822", "511823", "511824", "511825", "511826",
"511827", "511900", "511901", "511902", "511903", "511921", "511922",
"511923", "512000", "512001", "512002", "512021", "512022", "512081",
"513200", "513221", "513222", "513223", "513224", "513225", "513226",
"513227", "513228", "513229", "513230", "513231", "513232", "513233",
"513300", "513321", "513322", "513323", "513324", "513325", "513326",
"513327", "513328", "513329", "513330", "513331", "513332", "513333",
"513334", "513335", "513336", "513337", "513338", "513400", "513401",
"513422", "513423", "513424", "513425", "513426", "513427", "513428",
"513429", "513430", "513431", "513432", "513433", "513434", "513435",
"513436", "513437", "520000", "520100", "520101", "520102", "520103",
"520111", "520112", "520113", "520115", "520121", "520122", "520123",
"520181", "520200", "520201", "520203", "520221", "520222", "520300",
"520301", "520302", "520303", "520321", "520322", "520323", "520324",
"520325", "520326", "520327", "520328", "520329", "520330", "520381",
"520382", "520400", "520401", "520402", "520421", "520422", "520423",
"520424", "520425", "520500", "520501", "520502", "520521", "520522",
"520523", "520524", "520525", "520526", "520527", "520600", "520601",
"520602", "520603", "520621", "520622", "520623", "520624", "520625",
"520626", "520627", "520628", "522300", "522301", "522322", "522323",
"522324", "522325", "522326", "522327", "522328", "522600", "522601",
"522622", "522623", "522624", "522625", "522626", "522627", "522628",
"522629", "522630", "522631", "522632", "522633", "522634", "522635",
"522636", "522700", "522701", "522702", "522722", "522723", "522725",
"522726", "522727", "522728", "522729", "522730", "522731", "522732",
"530000", "530100", "530101", "530102", "530103", "530111", "530112",
"530113", "530114", "530122", "530124", "530125", "530126", "530127",
"530128", "530129", "530181", "530300", "530301", "530302", "530321",
"530322", "530323", "530324", "530325", "530326", "530328", "530381",
"530400", "530401", "530402", "530421", "530422", "530423", "530424",
"530425", "530426", "530427", "530428", "530500", "530501", "530502",
"530521", "530522", "530523", "530524", "530600", "530601", "530602",
"530621", "530622", "530623", "530624", "530625", "530626", "530627",
"530628", "530629", "530630", "530700", "530701", "530702", "530721",
"530722", "530723", "530724", "530800", "530801", "530802", "530821",
"530822", "530823", "530824", "530825", "530826", "530827", "530828",
"530829", "530900", "530901", "530902", "530921", "530922", "530923",
"530924", "530925", "530926", "530927", "532300", "532301", "532322",
"532323", "532324", "532325", "532326", "532327", "532328", "532329",
"532331", "532500", "532501", "532502", "532503", "532504", "532523",
"532524", "532525", "532527", "532528", "532529", "532530", "532531",
"532532", "532600", "532601", "532622", "532623", "532624", "532625",
"532626", "532627", "532628", "532800", "532801", "532822", "532823",
"532900", "532901", "532922", "532923", "532924", "532925", "532926",
"532927", "532928", "532929", "532930", "532931", "532932", "533100",
"533102", "533103", "533122", "533123", "533124", "533300", "533321",
"533323", "533324", "533325", "533400", "533421", "533422", "533423",
"540000", "540100", "540101", "540102", "540121", "540122", "540123",
"540124", "540125", "540126", "540127", "540200", "540202", "540221",
"540222", "540223", "540224", "540225", "540226", "540227", "540228",
"540229", "540230", "540231", "540232", "540233", "540234", "540235",
"540236", "540237", "542100", "542121", "542122", "542123", "542124",
"542125", "542126", "542127", "542128", "542129", "542132", "542133",
"542200", "542221", "542222", "542223", "542224", "542225", "542226",
"542227", "542228", "542229", "542231", "542232", "542233", "542400",
"542421", "542422", "542423", "542424", "542425", "542426", "542427",
"542428", "542429", "542430", "542431", "542500", "542521", "542522",
"542523", "542524", "542525", "542526", "542527", "542600", "542621",
"542622", "542623", "542624", "542625", "542626", "542627", "610000",
"610100", "610101", "610102", "610103", "610104", "610111", "610112",
"610113", "610114", "610115", "610116", "610122", "610124", "610125",
"610126", "610200", "610201", "610202", "610203", "610204", "610222",
"610300", "610301", "610302", "610303", "610304", "610322", "610323",
"610324", "610326", "610327", "610328", "610329", "610330", "610331",
"610400", "610401", "610402", "610403", "610404", "610422", "610423",
"610424", "610425", "610426", "610427", "610428", "610429", "610430",
"610431", "610481", "610500", "610501", "610502", "610521", "610522",
"610523", "610524", "610525", "610526", "610527", "610528", "610581",
"610582", "610600", "610601", "610602", "610621", "610622", "610623",
"610624", "610625", "610626", "610627", "610628", "610629", "610630",
"610631", "610632", "610700", "610701", "610702", "610721", "610722",
"610723", "610724", "610725", "610726", "610727", "610728", "610729",
"610730", "610800", "610801", "610802", "610821", "610822", "610823",
"610824", "610825", "610826", "610827", "610828", "610829", "610830",
"610831", "610900", "610901", "610902", "610921", "610922", "610923",
"610924", "610925", "610926", "610927", "610928", "610929", "611000",
"611001", "611002", "611021", "611022", "611023", "611024", "611025",
"611026", "620000", "620100", "620101", "620102", "620103", "620104",
"620105", "620111", "620121", "620122", "620123", "620200", "620201",
"620300", "620301", "620302", "620321", "620400", "620401", "620402",
"620403", "620421", "620422", "620423", "620500", "620501", "620502",
"620503", "620521", "620522", "620523", "620524", "620525", "620600",
"620601", "620602", "620621", "620622", "620623", "620700", "620701",
"620702", "620721", "620722", "620723", "620724", "620725", "620800",
"620801", "620802", "620821", "620822", "620823", "620824", "620825",
"620826", "620900", "620901", "620902", "620921", "620922", "620923",
"620924", "620981", "620982", "621000", "621001", "621002", "621021",
"621022", "621023", "621024", "621025", "621026", "621027", "621100",
"621101", "621102", "621121", "621122", "621123", "621124", "621125",
"621126", "621200", "621201", "621202", "621221", "621222", "621223",
"621224", "621225", "621226", "621227", "621228", "622900", "622901",
"622921", "622922", "622923", "622924", "622925", "622926", "622927",
"623000", "623001", "623021", "623022", "623023", "623024", "623025",
"623026", "623027", "630000", "630100", "630101", "630102", "630103",
"630104", "630105", "630121", "630122", "630123", "630200", "630202",
"630221", "630222", "630223", "630224", "630225", "632200", "632221",
"632222", "632223", "632224", "632300", "632321", "632322", "632323",
"632324", "632500", "632521", "632522", "632523", "632524", "632525",
"632600", "632621", "632622", "632623", "632624", "632625", "632626",
"632700", "632701", "632722", "632723", "632724", "632725", "632726",
"632800", "632801", "632802", "632821", "632822", "632823", "640000",
"640100", "640101", "640104", "640105", "640106", "640121", "640122",
"640181", "640200", "640201", "640202", "640205", "640221", "640300",
"640301", "640302", "640303", "640323", "640324", "640381", "640400",
"640401", "640402", "640422", "640423", "640424", "640425", "640500",
"640501", "640502", "640521", "640522", "650000", "650100", "650101",
"650102", "650103", "650104", "650105", "650106", "650107", "650109",
"650121", "650200", "650201", "650202", "650203", "650204", "650205",
"652100", "652101", "652122", "652123", "652200", "652201", "652222",
"652223", "652300", "652301", "652302", "652323", "652324", "652325",
"652327", "652328", "652700", "652701", "652702", "652722", "652723",
"652800", "652801", "652822", "652823", "652824", "652825", "652826",
"652827", "652828", "652829", "652900", "652901", "652922", "652923",
"652924", "652925", "652926", "652927", "652928", "652929", "653000",
"653001", "653022", "653023", "653024", "653100", "653101", "653121",
"653122", "653123", "653124", "653125", "653126", "653127", "653128",
"653129", "653130", "653131", "653200", "653201", "653221", "653222",
"653223", "653224", "653225", "653226", "653227", "654000", "654002",
"654003", "654021", "654022", "654023", "654024", "654025", "654026",
"654027", "654028", "654200", "654201", "654202", "654221", "654223",
"654224", "654225", "654226", "654300", "654301", "654321", "654322",
"654323", "654324", "654325", "654326", "659000", "659001", "659002",
"659003", "659004", "710000", "810000", "820000",
]
def ssn(self, min_age=18, max_age=90):
def checksum(s):
return str((1 - 2 * int(s, 13)) % 11).replace('10', 'X')
age = datetime.timedelta(days=self.random_int(
min_age * 365, max_age * 365))
birthday = datetime.date.today() - age
birthday_str = birthday.strftime('%Y%m%d')
ssn_without_checksum = self.numerify(
self.random_element(self.area_codes) + birthday_str + "###")
return ssn_without_checksum + checksum(ssn_without_checksum)
|
|
from os import path
from warnings import warn
try:
import ujson as json
except ImportError:
import json
from .tokenizer import Tokenizer
from .vocab import Vocab
from .syntax.parser import Parser
from .tagger import Tagger
from .matcher import Matcher
from .serialize.packer import Packer
from ._ml import Model
from . import attrs
from . import orth
from .syntax.ner import BiluoPushDown
from .syntax.arc_eager import ArcEager
from .attrs import TAG, DEP, ENT_IOB, ENT_TYPE, HEAD
class Language(object):
@staticmethod
def lower(string):
return string.lower()
@staticmethod
def norm(string):
return string
@staticmethod
def shape(string):
return orth.word_shape(string)
@staticmethod
def prefix(string):
return string[0]
@staticmethod
def suffix(string):
return string[-3:]
@staticmethod
def prob(string):
return -30
@staticmethod
def cluster(string):
return 0
@staticmethod
def is_alpha(string):
return orth.is_alpha(string)
@staticmethod
def is_ascii(string):
return orth.is_ascii(string)
@staticmethod
def is_digit(string):
return string.isdigit()
@staticmethod
def is_lower(string):
return orth.is_lower(string)
@staticmethod
def is_punct(string):
return orth.is_punct(string)
@staticmethod
def is_space(string):
return string.isspace()
@staticmethod
def is_title(string):
return orth.is_title(string)
@staticmethod
def is_upper(string):
return orth.is_upper(string)
@staticmethod
def like_url(string):
return orth.like_url(string)
@staticmethod
def like_number(string):
return orth.like_number(string)
@staticmethod
def like_email(string):
return orth.like_email(string)
@staticmethod
def is_stop(string):
return 0
@classmethod
def default_lex_attrs(cls, data_dir=None):
return {
attrs.LOWER: cls.lower,
attrs.NORM: cls.norm,
attrs.SHAPE: cls.shape,
attrs.PREFIX: cls.prefix,
attrs.SUFFIX: cls.suffix,
attrs.CLUSTER: cls.cluster,
attrs.PROB: lambda string: -10.0,
attrs.IS_ALPHA: cls.is_alpha,
attrs.IS_ASCII: cls.is_ascii,
attrs.IS_DIGIT: cls.is_digit,
attrs.IS_LOWER: cls.is_lower,
attrs.IS_PUNCT: cls.is_punct,
attrs.IS_SPACE: cls.is_space,
attrs.IS_TITLE: cls.is_title,
attrs.IS_UPPER: cls.is_upper,
attrs.LIKE_URL: cls.like_url,
attrs.LIKE_NUM: cls.like_number,
attrs.LIKE_EMAIL: cls.like_email,
attrs.IS_STOP: cls.is_stop,
attrs.IS_OOV: lambda string: True
}
@classmethod
def default_dep_labels(cls):
return {0: {'ROOT': True}}
@classmethod
def default_ner_labels(cls):
return {0: {'PER': True, 'LOC': True, 'ORG': True, 'MISC': True}}
@classmethod
def default_data_dir(cls):
return path.join(path.dirname(__file__), 'data')
@classmethod
def default_vocab(cls, data_dir=None, get_lex_attr=None):
if data_dir is None:
data_dir = cls.default_data_dir()
if get_lex_attr is None:
get_lex_attr = cls.default_lex_attrs(data_dir)
return Vocab.from_dir(
path.join(data_dir, 'vocab'),
get_lex_attr=get_lex_attr)
@classmethod
def default_tokenizer(cls, vocab, data_dir):
if path.exists(data_dir):
return Tokenizer.from_dir(vocab, data_dir)
else:
return Tokenizer(vocab, {}, None, None, None)
@classmethod
def default_tagger(cls, vocab, data_dir):
if path.exists(data_dir):
return Tagger.from_dir(data_dir, vocab)
else:
return None
@classmethod
def default_parser(cls, vocab, data_dir):
if path.exists(data_dir):
return Parser.from_dir(data_dir, vocab.strings, ArcEager)
else:
return None
@classmethod
def default_entity(cls, vocab, data_dir):
if path.exists(data_dir):
return Parser.from_dir(data_dir, vocab.strings, BiluoPushDown)
else:
return None
@classmethod
def default_matcher(cls, vocab, data_dir):
if path.exists(data_dir):
return Matcher.from_dir(data_dir, vocab)
else:
return None
def __init__(self, data_dir=None, vocab=None, tokenizer=None, tagger=None,
parser=None, entity=None, matcher=None, serializer=None,
load_vectors=True):
if load_vectors is not True:
warn("load_vectors is deprecated", DeprecationWarning)
if data_dir in (None, True):
data_dir = self.default_data_dir()
if vocab in (None, True):
vocab = self.default_vocab(data_dir)
if tokenizer in (None, True):
tokenizer = self.default_tokenizer(vocab, data_dir=path.join(data_dir, 'tokenizer'))
if tagger in (None, True):
tagger = self.default_tagger(vocab, data_dir=path.join(data_dir, 'pos'))
if entity in (None, True):
entity = self.default_entity(vocab, data_dir=path.join(data_dir, 'ner'))
if parser in (None, True):
parser = self.default_parser(vocab, data_dir=path.join(data_dir, 'deps'))
if matcher in (None, True):
matcher = self.default_matcher(vocab, data_dir=data_dir)
self.vocab = vocab
self.tokenizer = tokenizer
self.tagger = tagger
self.parser = parser
self.entity = entity
self.matcher = matcher
def __call__(self, text, tag=True, parse=True, entity=True):
"""Apply the pipeline to some text. The text can span multiple sentences,
and can contain arbtrary whitespace. Alignment into the original string
is preserved.
Args:
text (unicode): The text to be processed.
Returns:
tokens (spacy.tokens.Doc):
>>> from spacy.en import English
>>> nlp = English()
>>> tokens = nlp('An example sentence. Another example sentence.')
>>> tokens[0].orth_, tokens[0].head.tag_
('An', 'NN')
"""
tokens = self.tokenizer(text)
if self.tagger and tag:
self.tagger(tokens)
if self.matcher and entity:
self.matcher(tokens)
if self.parser and parse:
self.parser(tokens)
if self.entity and entity:
self.entity(tokens)
return tokens
def end_training(self, data_dir=None):
if data_dir is None:
data_dir = self.data_dir
self.parser.model.end_training(path.join(data_dir, 'deps', 'model'))
self.entity.model.end_training(path.join(data_dir, 'ner', 'model'))
self.tagger.model.end_training(path.join(data_dir, 'pos', 'model'))
self.vocab.strings.dump(path.join(data_dir, 'vocab', 'strings.txt'))
with open(path.join(data_dir, 'vocab', 'serializer.json'), 'w') as file_:
file_.write(
json.dumps([
(TAG, list(self.tagger.freqs[TAG].items())),
(DEP, list(self.parser.moves.freqs[DEP].items())),
(ENT_IOB, list(self.entity.moves.freqs[ENT_IOB].items())),
(ENT_TYPE, list(self.entity.moves.freqs[ENT_TYPE].items())),
(HEAD, list(self.parser.moves.freqs[HEAD].items()))]))
|
|
from __future__ import unicode_literals
"""
Format String 2D array
2d array for compositing term-formated strings
-autoexpanding vertically
-interesting get_item behavior (renders fmtstrs)
-caching behavior eventually
>>> a = FSArray(10, 14)
>>> a.shape
(10, 14)
>>> a[1] = 'i'
>>> a[3:4, :] = ['i' * 14]
>>> a[16:17, :] = ['j' * 14]
>>> a.shape, a[16, 0]
((17, 14), ['j'])
>>> a[200, 1] = ['i']
>>> a[200, 1]
['i']
"""
import sys
import logging
import unittest
from .formatstring import fmtstr
from .formatstring import normalize_slice
from .formatstring import FmtStr
logger = logging.getLogger(__name__)
#TODO check that strings used in arrays don't have tabs or spaces in them!
def slicesize(s):
return int((s.stop - s.start) / (s.step if s.step else 1))
def fsarray(strings, *args, **kwargs):
"""fsarray(list_of_FmtStrs_or_strings, width=None) -> FSArray
Returns a new FSArray of width of the maximum size of the provided
strings, or width provided, and height of the number of strings provided.
If a width is provided, raises a ValueError if any of the strings
are of length greater than this width"""
strings = list(strings)
if 'width' in kwargs:
width = kwargs['width']
del kwargs['width']
if strings and max(len(s) for s in strings) > width:
raise ValueError("Those strings won't fit for width %d" % width)
else:
width = max(len(s) for s in strings) if strings else 0
fstrings = [s if isinstance(s, FmtStr) else fmtstr(s, *args, **kwargs) for s in strings]
arr = FSArray(len(fstrings), width, *args, **kwargs)
rows = [fs.setslice_with_length(0, len(s), s, width) for fs, s in zip(arr.rows, fstrings)]
arr.rows = rows
return arr
class FSArray(object):
"""A 2D array of colored text.
Internally represented by a list of FmtStrs of identical size."""
#TODO add constructor that takes fmtstrs instead of dims
def __init__(self, num_rows, num_columns, *args, **kwargs):
self.saved_args, self.saved_kwargs = args, kwargs
self.rows = [fmtstr('', *args, **kwargs) for _ in range(num_rows)]
self.num_columns = num_columns
def __getitem__(self, slicetuple):
if isinstance(slicetuple, int):
if slicetuple < 0:
slicetuple = len(self.rows) - slicetuple
if slicetuple < 0 or slicetuple >= len(self.rows):
raise IndexError('out of bounds')
return self.rows[slicetuple]
if isinstance(slicetuple, slice):
rowslice = normalize_slice(len(self.rows), slicetuple)
return self.rows[rowslice]
rowslice, colslice = slicetuple
rowslice = normalize_slice(len(self.rows), rowslice)
colslice = normalize_slice(self.num_columns, colslice)
#TODO clean up slices
return [fs[colslice] for fs in self.rows[rowslice]]
def __len__(self):
return len(self.rows)
@property
def shape(self):
"""tuple of (len(rows, len(num_columns)) numpy-style shape"""
return len(self.rows), self.num_columns
height = property(lambda self: len(self.rows), None, None, """The number of rows""")
width = property(lambda self: self.num_columns, None, None, """The number of columns""")
def __setitem__(self, slicetuple, value):
"""Place a FSArray in a FSArray"""
logger.debug('slice: %r', slicetuple)
if isinstance(slicetuple, slice):
rowslice, colslice = slicetuple, slice(None)
if isinstance(value, (bytes, unicode)):
raise ValueError('if slice is 2D, value must be 2D')
elif isinstance(slicetuple, int):
normalize_slice(self.height, slicetuple)
self.rows[slicetuple] = value
return
else:
rowslice, colslice = slicetuple
# temp shim to allow numpy arrays as values
if value.__class__.__name__ == 'ndarray':
value = [fmtstr(''.join(line)) for line in value]
rowslice = normalize_slice(sys.maxsize, rowslice)
additional_rows = max(0, rowslice.stop - len(self.rows))
self.rows.extend([fmtstr('', *self.saved_args, **self.saved_kwargs)
for _ in range(additional_rows)])
logger.debug('num columns: %r', self.num_columns)
logger.debug('colslice: %r', colslice)
colslice = normalize_slice(self.num_columns, colslice)
if slicesize(colslice) == 0 or slicesize(rowslice) == 0:
return
if slicesize(rowslice) != len(value):
raise ValueError('row dimensions do not match: %r, %r' % (len(value), rowslice))
self.rows = (self.rows[:rowslice.start] +
[fs.setslice_with_length(colslice.start, colslice.stop, v, self.num_columns) for fs, v in zip(self.rows[rowslice], value)] +
self.rows[rowslice.stop:])
def dumb_display(self):
"""Prints each row followed by a newline without regard for the terminal window size"""
for line in self.rows:
print(line)
@classmethod
def diff(cls, a, b, ignore_formatting=False):
"""Returns two FSArrays with differences underlined"""
def underline(x): return u'\x1b[4m%s\x1b[0m' % (x,)
def blink(x): return u'\x1b[5m%s\x1b[0m' % (x,)
a_rows = []
b_rows = []
max_width = max([len(row) for row in a] + [len(row) for row in b])
a_lengths = []
b_lengths = []
for a_row, b_row in zip(a, b):
a_lengths.append(len(a_row))
b_lengths.append(len(b_row))
extra_a = u'`' * (max_width - len(a_row))
extra_b = u'`' * (max_width - len(b_row))
a_line = u''
b_line = u''
for a_char, b_char in zip(a_row + extra_a, b_row + extra_b):
if ignore_formatting:
a_char_for_eval = a_char.s if isinstance(a_char, FmtStr) else a_char
b_char_for_eval = b_char.s if isinstance(b_char, FmtStr) else b_char
else:
a_char_for_eval = a_char
b_char_for_eval = b_char
if a_char_for_eval == b_char_for_eval:
a_line += actualize(a_char)
b_line += actualize(b_char)
else:
a_line += underline(blink(actualize(a_char)))
b_line += underline(blink(actualize(b_char)))
a_rows.append(a_line)
b_rows.append(b_line)
hdiff = '\n'.join(a_line + u' %3d | %3d ' % (a_len, b_len) + b_line for a_line, b_line, a_len, b_len in zip(a_rows, b_rows, a_lengths, b_lengths))
return hdiff
actualize = str if sys.version_info[0] == 3 else unicode
def simple_format(x):
return '\n'.join(actualize(l) for l in x)
class FormatStringTest(unittest.TestCase):
def assertFSArraysEqual(self, a, b):
self.assertEqual(type(a), FSArray)
self.assertEqual(type(b), FSArray)
self.assertEqual((a.width, b.height), (a.width, b.height), 'fsarray dimensions do not match: %s %s' % (a.shape, b.shape))
for i, (a_row, b_row) in enumerate(zip(a, b)):
self.assertEqual(a_row, b_row, 'FSArrays differ first on line %s:\n%s' % (i, FSArray.diff(a, b)))
def assertFSArraysEqualIgnoringFormatting(self, a, b):
"""Also accepts arrays of strings"""
self.assertEqual(len(a), len(b), 'fsarray heights do not match: %s %s \n%s \n%s' % (len(a), len(b), simple_format(a), simple_format(b)))
for i, (a_row, b_row) in enumerate(zip(a, b)):
a_row = a_row.s if isinstance(a_row, FmtStr) else a_row
b_row = b_row.s if isinstance(b_row, FmtStr) else b_row
self.assertEqual(a_row, b_row, 'FSArrays differ first on line %s:\n%s' % (i, FSArray.diff(a, b, ignore_formatting=True)))
if __name__ == '__main__':
a = FSArray(3, 14, bg='blue')
a[0:2, 5:11] = fmtstr("hey", 'on_blue') + ' ' + fmtstr('yo', 'on_red'), fmtstr('qwe qw')
a.dumb_display()
a = fsarray(['hey', 'there'], bg='cyan')
a.dumb_display()
print(FSArray.diff(a, fsarray(['hey', 'there ']), ignore_formatting=True))
|
|
""" Given an xform definition, storageutility generates dynamic data tables.
Given an xml instance, storeagutility populates the data tables.
Basically, storageutility abstracts away all interaction with the database,
and it only knows about the data structures in xformdef.py
"""
import re
import os
import sys
import logging
import settings
import string
from datetime import datetime, timedelta
from lxml import etree
from MySQLdb import IntegrityError
from django.db import connection, transaction, DatabaseError
from xformmanager.models import ElementDefModel, FormDefModel, Metadata
from xformmanager.util import *
from xformmanager.xformdef import FormDef
from xformmanager.xmlrouter import process
from receiver.models import SubmissionHandlingOccurrence, SubmissionHandlingType
from stat import S_ISREG, ST_MODE
_MAX_FIELD_NAME_LENTH = 64
class StorageUtility(object):
""" This class handles everything that touches the database - both form and instance data."""
# should pull this out into a rsc file...
def __init__(self):
# our own, transient data structure
self.formdef = ''
# the persistent django model of this form
self.formdefmodel = None
@transaction.commit_on_success
def add_schema(self, formdef):
formdef.force_to_valid()
formdefmodel = FormDefModel.create_models(formdef)
self.formdefmodel = formdefmodel
self.formdef = self._strip_meta_def( formdef )
queries = XFormDBTableCreator( self.formdef, self.formdefmodel ).create()
self._execute_queries(queries)
return formdefmodel
@transaction.commit_on_success
def save_form_data_matching_formdef(self, data_stream_pointer, formdef, formdefmodel, attachment):
""" returns True on success """
logging.debug("StorageProvider: saving form data")
data_tree = self._get_data_tree_from_stream(data_stream_pointer)
self.formdef = formdef
populator = XFormDBTablePopulator( formdef )
queries = populator.populate( data_tree )
if not queries:
# we cannot put this check queries_to_populate (which is recursive)
# since this is only an error on the top node
raise TypeError("save_form_data called with empty form data")
if not populator.errors.is_empty():
if len(populator.errors.missing)>0:
# this is quite common. esp. for metadata fields
logging.info( "XForm instance is missing fields %s" % \
populator.errors.str('Missing') )
elif len(populator.errors.bad_type)>0:
raise populator.errors
# TODO - add handlers for errors.duplicate and errors.extra
# once those are implemented
new_rawdata_id = queries.execute_insert()
metadata_model = self._create_metadata(data_tree, formdefmodel, attachment, new_rawdata_id)
# rl - seems like a strange place to put this message...
# respond with the number of submissions they have
# made today.
startdate = datetime.now().date()
enddate = startdate + timedelta(days=1)
message = metadata_model.get_submission_count(startdate, enddate)
self._add_handled(metadata_model.attachment, method="instance_data", message=message)
return True
def _get_data_tree_from_stream(self, stream):
tree=etree.parse(stream)
return tree.getroot()
def _get_data_tree_from_file(self, file_name):
fin = open(file_name, 'r')
root = self._get_data_tree_from_stream(fin)
fin.close()
return root
def _create_metadata(self, data_tree, formdefmodel, attachment, rawdata_id):
metadata_model = Metadata()
version = case_insensitive_attribute(data_tree, "version")
if version and version.strip().isdigit():
metadata_model.version = version.strip()
uiversion = case_insensitive_attribute(data_tree, "uiversion")
if uiversion and uiversion.strip().isdigit():
metadata_model.uiversion = uiversion.strip()
metadata_model.init( data_tree, self.formdef.target_namespace )
metadata_model.formdefmodel = formdefmodel
metadata_model.attachment = attachment
metadata_model.raw_data = rawdata_id
metadata_model.save(self.formdef.target_namespace)
return metadata_model
def save_form_data(self, xml_file_name, attachment):
""" returns True on success and false on fail """
f = open(xml_file_name, "r")
# should match XMLNS
xmlns, version = self.get_xmlns_from_instance(f)
# If there is a special way to route this form, based on the xmlns
# then do so here.
# czue: this is probably not the most appropriate place for this logic
# but it keeps us from having to parse the xml multiple times.
process(attachment, xmlns, version)
try:
formdefmodel = FormDefModel.objects.get(target_namespace=xmlns, version=version)
except FormDefModel.DoesNotExist:
raise self.XFormError("XMLNS %s could not be matched to any registered formdefmodel." % xmlns)
if formdefmodel.xsd_file_location is None:
raise self.XFormError("Schema for form %s could not be found on the file system." % formdefmodel[0].id)
formdef = self.get_formdef_from_schema_file(formdefmodel.xsd_file_location)
f.seek(0,0)
status = self.save_form_data_matching_formdef(f, formdef, formdefmodel, attachment)
f.close()
return status
def get_formdef_from_schema_file(self, xsd_file_location):
g = open( xsd_file_location ,"r")
formdef = FormDef(g)
formdef.force_to_valid()
stripped_formdef = self._strip_meta_def( formdef )
g.close()
self.formdef = formdef
return stripped_formdef
# note that this does not remove the file from the filesystem
# (by design, for security)
@transaction.commit_on_success
def remove_instance_matching_schema(self, formdef_id, instance_id, remove_submission=False):
fdm = FormDefModel.objects.get(pk=formdef_id)
edm_id = fdm.element.id
edm = ElementDefModel.objects.get(pk=edm_id)
self._remove_instance_inner_loop(edm, instance_id)
try:
meta = Metadata.objects.get(raw_data=instance_id, formdefmodel=formdef_id)
except Metadata.DoesNotExist:
# not a problem since this simply means the data was
# never successfully registered
return
# mark as intentionally handled
self._add_handled(meta.attachment, method="deleted")
if remove_submission:
meta.attachment.submission.delete()
meta.delete()
def _add_handled(self, attachment, method, message=''):
'''Tells the receiver that this attachment's submission was handled.
Should only be called _after_ we are sure that we got a linked
schema of this type.
'''
try:
handle_type = SubmissionHandlingType.objects.get(app="xformmanager", method=method)
except SubmissionHandlingType.DoesNotExist:
handle_type = SubmissionHandlingType.objects.create(app="xformmanager", method=method)
attachment.handled(handle_type, message)
def _remove_handled(self, attachment):
'''Tells the receiver that this attachment's submission was not handled.
Only used when we are deleting data from xformmanager but not receiver
'''
try:
handle_type = SubmissionHandlingType.objects.get(app="xformmanager", method="instance_data")
except SubmissionHandlingType.DoesNotExist:
handle_type = SubmissionHandlingType.objects.create(app="xformmanager", method="instance_data")
attachment.unhandled(handle_type)
def _remove_instance_inner_loop(self, elementdef, instance_id):
edms = ElementDefModel.objects.filter(parent=elementdef)
cursor = connection.cursor()
for edm in edms:
cursor.execute( " select id, parent_id from " + edm.table_name + \
" where parent_id = %s ", [instance_id] )
rows = cursor.fetchall()
if rows:
for row in rows:
self._remove_instance_inner_loop( edm, row[0] )
query = " delete from " + edm.table_name + " where parent_id = %s "
cursor.execute(query , [instance_id] )
cursor.execute( " delete from " + elementdef.table_name + " where id = %s ", [instance_id] )
@transaction.commit_on_success
def remove_schema(self, id, remove_submissions=False, delete_xml=True):
fdds = FormDefModel.objects.all().filter(id=id)
if fdds is None or len(fdds) == 0:
logging.error(" Schema with id %s could not be found. Not deleted." % id)
return
# must remove tables first since removing form_meta automatically deletes some tables
self._remove_form_tables(fdds[0])
self._remove_form_models(fdds[0], remove_submissions, delete_xml)
# when we delete formdefdata, django automatically deletes all associated elementdefdata
# make sure when calling this function always to confirm with the user
def clear(self, remove_submissions=True, delete_xml=True):
""" removes all schemas found in XSD_REPOSITORY_PATH
and associated tables.
If delete_xml is true (default) it also deletes the
contents of XFORM_SUBMISSION_PATH.
"""
self._remove_form_tables()
self._remove_form_models(remove_submissions=remove_submissions)
# when we delete formdefdata, django automatically deletes all associated elementdefdata
if delete_xml:
# drop all xml data instance files stored in XFORM_SUBMISSION_PATH
for file in os.listdir( settings.RAPIDSMS_APPS['receiver']['xform_submission_path'] ):
file = os.path.join( settings.RAPIDSMS_APPS['receiver']['xform_submission_path'] , file)
logging.debug( "Deleting " + file )
stat = os.stat(file)
if S_ISREG(stat[ST_MODE]) and os.access(file, os.W_OK):
os.remove( file )
else:
logging.debug( " WARNING: Permission denied to access " + file )
continue
class XFormError(SyntaxError):
""" Generic error for XFormManager """
pass
def _execute_queries(self, queries):
# todo - rollback on fail
if queries is None or len(queries) == 0:
logging.error("xformmanager: storageutility - xform " + self.formdef.target_namespace + " could not be parsed")
return
logging.debug(queries)
cursor = connection.cursor()
if queries.count(';') > 0:
simple_queries = queries.split(';')
for query in simple_queries:
if len(query)>0:
cursor.execute(query)
else:
cursor.execute(queries)
#TODO: commcare-specific functionality - should pull out into separate file
def _strip_meta_def(self, formdef):
""" TODO: currently, we do not strip the duplicate meta information in the xformdata
so as not to break dan's code (reporting/graphing). Should fix dan's code to
use metadata tables now.
root_node = formdef.child_elements[0]
# this requires that 'meta' be the first child element within root node
if len( root_node.child_elements ) > 0:
meta_node = root_node.child_elements[0]
new_meta_children = []
if meta_node.name.lower().endswith('meta'):
# this rather tedious construction is so that we can support metadata with missing fields but not lose metadata with wrong fields
for element in meta_node.child_elements:
field = self._data_name(meta_node.name,element.name)
if field.lower() not in Metadata.fields:
new_meta_children = new_meta_children + [ element ]
if len(new_meta_children) > 0:
meta_node.child_elements = new_meta_children
"""
return formdef
def _remove_form_models(self,form='', remove_submissions=False, delete_xml=True):
"""Drop all schemas, associated tables, and files"""
if form == '':
fdds = FormDefModel.objects.all().filter()
else:
fdds = [form]
for fdd in fdds:
if delete_xml:
file = fdd.xsd_file_location
if file is not None:
logging.debug( " removing file " + file )
if os.path.exists(file):
os.remove(file)
else:
logging.warn("Tried to delete schema file: %s but it wasn't found!" % file)
logging.debug( " deleting form definition for " + fdd.target_namespace )
all_meta = Metadata.objects.filter(formdefmodel=fdd)
for meta in all_meta:
if remove_submissions:
meta.attachment.submission.delete()
self._remove_handled(meta.attachment)
all_meta.delete()
fdd.delete()
# in theory, there should be away to *not* remove elemenetdefdata when deleting formdef
# until we figure out how to do that, this'll work fine
def _remove_form_tables(self,form=''):
# drop all element definitions and associated tables
# the reverse ordering is a horrible hack (but efficient)
# to make sure we delete children before parents
if form == '':
edds = ElementDefModel.objects.all().filter().order_by("-table_name")
else:
edds = ElementDefModel.objects.all().filter(form=form).order_by("-table_name")
for edd in edds:
logging.debug( " deleting data table:" + edd.table_name )
if self._table_exists(edd.table_name):
self._drop_table(edd.table_name)
else:
logging.warn("Tried to delete %s table, but it wasn't there!" % edd.table_name)
def _table_exists(self, table_name):
'''Check if a table exists'''
cursor = connection.cursor()
cursor.execute("show tables like '%s'" % table_name)
return len(cursor.fetchall()) == 1
def _drop_table(self, table_name):
'''Drop a table'''
cursor = connection.cursor()
cursor.execute("drop table %s" % table_name)
#temporary measure to get target form
# todo - fix this to be more efficient, so we don't parse the file twice
def get_xmlns_from_instance(self, stream):
xml_string = get_xml_string(stream)
try:
root = etree.XML(xml_string)
except etree.XMLSyntaxError:
raise self.XFormError("XML Syntax Error")
r = re.search('{[a-zA-Z0-9_\-\.\/\:]*}', root.tag)
if r is None:
raise self.XFormError("NO XMLNS FOUND IN SUBMITTED FORM")
xmlns = r.group(0).strip('{').strip('}')
version = case_insensitive_attribute(root, "version")
if version and version.strip().isdigit():
return (xmlns, version.strip())
return (xmlns, None)
class Query(object):
""" stores all the information needed to run a query """
def __init__(self, table_name='', field_value_dict={}, child_queries=[]):
self.table_name = table_name # string
self.field_value_dict = field_value_dict # list of strings
self.child_queries = child_queries # list of Queries
self.parent_id = 0
@transaction.commit_on_success
def execute_insert(self):
new_id = -1
if len( self.field_value_dict ) > 0:
query_string = "INSERT INTO " + self.table_name + " (";
for field in self.field_value_dict:
query_string = query_string + field + ", "
query_string = self._trim2chars( query_string )
if self.parent_id > 0: query_string = query_string + ", parent_id"
query_string = query_string + ") VALUES( "
# we use c-style substitution to enable django-built-in
# sql-injection protection
for value in self.field_value_dict:
query_string = query_string + "%s, "
query_string = self._trim2chars( query_string )
if self.parent_id > 0: query_string = query_string + ", " + str(self.parent_id)
query_string = query_string + ");"
values = []
for value in self.field_value_dict:
values = values + [ self.field_value_dict[ value ] ]
new_id = self._execute(query_string, values)
for child_query in self.child_queries:
child_query.execute_insert()
return new_id
def _execute(self, queries, values):
# todo - rollback on fail
if queries is None or len(queries) == 0:
logging.error("xformmanager: storageutility - xform " + self.formdef.target_namespace + " could not be parsed")
return
cursor = connection.cursor()
if settings.DATABASE_ENGINE=='mysql':
cursor.execute(queries, values)
query = "SELECT LAST_INSERT_ID();"
cursor.execute(query)
else:
cursor.execute(queries, values)
query = "SELECT LAST_INSERT_ROWID()"
cursor.execute(query)
row = cursor.fetchone()
if row is not None:
return row[0]
return -1
def _trim2chars(self, string):
return string[0:len(string)-2]
class XFormProcessor(object):
""" Some useful utilities for any inheriting xformprocessor about how to deal with data """
META_FIELDS = ['meta_formname','meta_commcareversion','meta_formversion','meta_deviceid',
'meta_timestart','meta_timeend','meta_username','meta_chw_id','meta_uid']
def _hack_to_get_cchq_working(self, name):
prefix = sanitize (self.formdef.name) + "_"
if name[0:len(prefix)] == prefix:
name = name[len(prefix)+1:len(name)]
splits = name.split('_')
endsplit = splits[-2:]
if self.META_FIELDS.count('_'.join(endsplit)) == 1:
return '_'.join(endsplit)
return name
class XFormDBTableCreator(XFormProcessor):
""" This class is responsible for parsing a schema and generating the corresponding
db tables dynamically
If there are errors, these errors will be stored in self.errors
"""
# Data types taken from mysql.
# This should really draw from django built-in utilities which are database independent.
XSD_TO_MYSQL_TYPES = {
'string':'VARCHAR(255)',
'integer':'INT(11)',
'int':'INT(11)',
'decimal':'DECIMAL(5,2)',
'double':'DOUBLE',
'float':'DOUBLE',
'datetime':'DATETIME', # string
'date':'DATE', # string
'time':'TIME', # string
'gyear':'INT(11)',
'gmonth':'INT(11)',
'gday':'INT(11)',
'gyearmonth':'INT(11)',
'gmonthday':'INT(11)',
'boolean':'TINYINT(1)',
'base64binary':'DOUBLE', #i don't know...
'hexbinary':'DOUBLE', #..meh.
'anyuri':'VARCHAR(200)', # string
'default':'VARCHAR(255)',
}
XSD_TO_DEFAULT_TYPES = { #sqlite3 compliant
'string':'VARCHAR(255)',
'integer':'INT(11)',
'int':'INT(11)',
'decimal':'DECIMAL(5,2)',
'double':'DOUBLE',
'float':'DOUBLE',
'datetime':'DateField', # string
'date':'DateField', # string
'time':'DateField', # string
'gyear':'INT(11)',
'gmonth':'INT(11)',
'gday':'INT(11)',
'gyearmonth':'INT(11)',
'gmonthday':'INT(11)',
'boolean':'TINYINT(1)',
'base64binary':'DOUBLE', #i don't know...
'hexbinary':'DOUBLE', #..meh.
'anyuri':'VARCHAR(200)', # string
'default':'VARCHAR(255)',
}
def __init__(self, formdef, formdefmodel):
"""
formdef - in memory transition object
formdefmodel - django model which exists for each schema registered
"""
self.formdef = formdef
self.formdefmodel = formdefmodel
self.errors = XFormErrors(formdef.target_namespace)
def create(self):
return self.queries_to_create_instance_tables( self.formdef,
self.formdefmodel.element.id,
self.formdef.name, self.formdef.name)
# TODO - this should be cleaned up to use the same Query object that populate_instance_tables uses
# (rather than just passing around tuples of strings)
def queries_to_create_instance_tables(self, elementdef, parent_id, parent_name='', parent_table_name=''):
table_name = format_table_name( formatted_join(parent_name, elementdef.name), self.formdef.version )
(next_query, fields) = self._create_instance_tables_query_inner_loop(elementdef, parent_id, parent_name, parent_table_name )
# add this later - should never be called during unit tests
if not fields: return next_query
queries = ''
if settings.DATABASE_ENGINE=='mysql' :
queries = "CREATE TABLE "+ table_name +" ( id INT(11) NOT NULL AUTO_INCREMENT PRIMARY KEY, "
else:
queries = "CREATE TABLE "+ table_name +" ( id INTEGER PRIMARY KEY, "
if len(fields[0]) == 1:
queries = queries + str(fields)
else:
for field in fields:
if len(field)>0:
queries = queries + str(field)
# we don't really need a parent_id in our top-level table...
# should be NOT NULL?
if parent_name is not '':
if settings.DATABASE_ENGINE=='mysql' :
queries = queries + " parent_id INT(11), "
queries = queries + " FOREIGN KEY (parent_id) REFERENCES " + format_table_name(parent_table_name, self.formdef.version) + "(id) ON DELETE SET NULL"
else:
queries = queries + " parent_id REFERENCES " + format_table_name(parent_table_name, self.formdef.version) + "(id) ON DELETE SET NULL"
else:
queries = self._trim2chars(queries)
# most of the time, we rely on global mysql config in my.conf/ini
end_query = ");"
# we only specify default engine and character set if it's clear that
# we are already doing something against the global config
# (i.e. we're settings database_options in settings.py)
if hasattr(settings,'DATABASE_OPTIONS') and \
'init_command' in settings.DATABASE_OPTIONS:
if 'innodb' in settings.DATABASE_OPTIONS['init_command'].lower():
end_query = ") ENGINE=InnoDB;"
elif 'myisam' in settings.DATABASE_OPTIONS['init_command'].lower():
end_query = ") ENGINE=MyISAM;"
queries = queries + end_query + next_query
return queries
def _create_instance_tables_query_inner_loop(self, elementdef, parent_id,
parent_name='', parent_table_name=''):
""" This is 'handle' instead of 'create'(_children_tables) because not only
are we creating children tables, we are also gathering/passing
children/field information back to the parent.
"""
if not elementdef: return
local_fields = [];
next_query = ''
if elementdef.is_repeatable and len(elementdef.child_elements)== 0 :
return (next_query, self._db_field_definition_string(elementdef) )
for child in elementdef.child_elements:
# put in a check for root.isRepeatable
next_parent_name = formatted_join(parent_name, elementdef.name)
if child.is_repeatable :
# repeatable elements must generate a new table
if parent_id == '':
ed = ElementDefModel(form_id=self.formdefmodel.id, xpath=child.xpath,
table_name = format_table_name( formatted_join(parent_name, child.name), self.formdef.version ) ) #should parent_name be next_parent_name?
ed.save()
ed.parent = ed
else:
ed = ElementDefModel(parent_id=parent_id, form=self.formdefmodel, xpath=child.xpath,
table_name = format_table_name( formatted_join(parent_name, child.name), self.formdef.version ) ) #next_parent_name
ed.save()
query = self.queries_to_create_instance_tables(child, ed.id, parent_name, parent_table_name )
next_query = next_query + query
else:
if len(child.child_elements) > 0 :
(q, f) = self._create_instance_tables_query_inner_loop(elementdef=child, parent_id=parent_id, parent_name=formatted_join( next_parent_name, child.name ), parent_table_name=parent_table_name) #next-parent-name
else:
local_fields.append( self._db_field_definition_string(child) )
(q,f) = self._create_instance_tables_query_inner_loop(elementdef=child, parent_id=parent_id, parent_name=next_parent_name, parent_table_name=parent_table_name ) #next-parent-name
next_query = next_query + q
local_fields = local_fields + f
return (next_query, local_fields)
def _db_field_definition_string(self, elementdef):
""" generates the sql string to conform to the expected data type """
label = self._hack_to_get_cchq_working( sanitize( elementdef.name ) )
if elementdef.type == None:
# This is an issue. For now just log it as an error and default
# it to a string
logging.error("No data type found in element: %s! will use a string data type" % elementdef)
elementdef.type = "string"
if elementdef.type[0:5] == 'list.':
field = ''
simple_type = self.formdef.types[elementdef.type]
if simple_type is not None:
for value in simple_type.multiselect_values:
column_name = self._truncate(label + "_" + value)
column_type = self._get_db_type( 'boolean' )
field += "%s %s, " % (column_name, column_type)
return field
field = self._truncate(label) + " " + self._get_db_type( elementdef.type ) + ", "
return field
def _get_db_type(self, type):
type = type.lower()
if settings.DATABASE_ENGINE=='mysql' :
if type in self.XSD_TO_MYSQL_TYPES:
return self.XSD_TO_MYSQL_TYPES[type]
return self.XSD_TO_MYSQL_TYPES['default']
else:
if type in self.XSD_TO_DEFAULT_TYPES:
return self.XSD_TO_DEFAULT_TYPES[type]
return self.XSD_TO_DEFAULT_TYPES['default']
def _truncate(self, field_name):
'''Truncates a field name to _MAX_FIELD_NAME_LENTH characters, which is the max length allowed
by mysql. This is NOT smart enough to check for conflicts, so there could
be issues if an xform has two very similar, very long, fields'''
if len(field_name) > _MAX_FIELD_NAME_LENTH:
return field_name[:_MAX_FIELD_NAME_LENTH]
return field_name
def _trim2chars(self, string):
return string[0:len(string)-2]
class XFormDBTablePopulator(XFormProcessor):
""" This class is responsible for parsing an xform instance
and populating the corresponding db tables dynamically
If there are errors, these errors will be stored in self.errors
"""
DB_NON_STRING_TYPES = (
'integer',
'int',
'decimal',
'double',
'float',
'datetime',
'date',
'time',
'gyear',
'gmonthday',
'boolean',
'base64binary',
'hexbinary',
)
DB_NUMERIC_TYPES = {
'integer': int, 'int': int, 'decimal': float, 'double' : float, 'float':float,'gyear':int
}
def __init__(self, formdef):
self.formdef = formdef
self.errors = XFormErrors(formdef.target_namespace)
def populate(self, data_tree):
return self.queries_to_populate_instance_tables(data_tree=data_tree,
elementdef=self.formdef.root,
parent_name=self.formdef.name)
def queries_to_populate_instance_tables(self, data_tree, elementdef, parent_name='', parent_table_name='', parent_id=0):
if data_tree is None and elementdef:
self.errors.missing.append( "Missing element: %s" % elementdef.name )
return
table_name = get_registered_table_name( elementdef.xpath, self.formdef.target_namespace, self.formdef.version )
if len( parent_table_name ) > 0:
# todo - make sure this is thread-safe (in case someone else is updating table). ;)
# currently this assumes that we update child elements at exactly the same time we update parents =b
cursor = connection.cursor()
s = "SELECT id FROM " + str(parent_table_name) + " order by id DESC"
logging.debug(s)
cursor.execute(s)
row = cursor.fetchone()
if row is not None:
parent_id = row[0]
else:
parent_id = 1
query = self._populate_instance_tables_inner_loop(data_tree=data_tree, elementdef=elementdef, \
parent_name=parent_name, parent_table_name=table_name, \
parent_id=parent_id )
query.parent_id = parent_id
return query
def _populate_instance_tables_inner_loop(self, data_tree, elementdef, parent_name='', \
parent_table_name='', parent_id=0 ):
if data_tree is None and elementdef:
self.errors.missing.append( "Missing element: %s" % elementdef.name )
return
local_field_value_dict = {};
next_query = Query(parent_table_name)
if len(elementdef.child_elements)== 0:
field_value_dict = {}
if elementdef.is_repeatable :
try:
field_value_dict = self._get_formatted_field_and_value(elementdef,data_tree.text)
except TypeError, e:
self.errors.bad_type.append( unicode(e) )
return Query( parent_table_name, field_value_dict )
for def_child in elementdef.child_elements:
data_node = None
# todo - make sure this works in a case-insensitive way
# find the data matching the current elementdef
# todo - put in a check for root.isRepeatable
next_parent_name = formatted_join(parent_name, elementdef.name)
if def_child.is_repeatable :
for data_child in case_insensitive_iter(data_tree, '{'+self.formdef.target_namespace+'}'+ self._data_name( elementdef.name, def_child.name) ):
query = self.queries_to_populate_instance_tables(data_child, def_child, next_parent_name, \
parent_table_name, parent_id )
if next_query is not None:
next_query.child_queries = next_query.child_queries + [ query ]
else:
next_query = query
else:
# if there are children (which are not repeatable) then flatten the table
for data_child in case_insensitive_iter(data_tree, '{'+self.formdef.target_namespace+'}'+ self._data_name( elementdef.name, def_child.name) ):
data_node = data_child
break;
if data_node is None:
# no biggie - repeatable and irrelevant fields in the schema
# do not show up in the instance
self.errors.missing.append( def_child.name )
continue
if( len(def_child.child_elements)>0 ):
# here we are propagating, not onlyt the list of fields and values, but aso the child queries
query = self._populate_instance_tables_inner_loop(data_tree=data_node, \
elementdef=def_child, \
parent_name=parent_name, \
parent_table_name=parent_table_name )
next_query.child_queries = next_query.child_queries + query.child_queries
local_field_value_dict.update( query.field_value_dict )
else:
# if there are no children, then add values to the table
if data_node.text is not None :
try:
field_value_dict = self._get_formatted_field_and_value(def_child, data_node.text)
except TypeError, e:
self.errors.bad_type.append( unicode(e) )
local_field_value_dict.update( field_value_dict )
query = self._populate_instance_tables_inner_loop(data_node, def_child, \
next_parent_name, parent_table_name)
next_query.child_queries = next_query.child_queries + query.child_queries
local_field_value_dict.update( query.field_value_dict )
q = Query( parent_table_name, local_field_value_dict )
q.child_queries = q.child_queries + [ next_query ]
return q
def _get_formatted_field_and_value(self, elementdef, raw_value):
""" returns a dictionary of key-value pairs """
label = self._hack_to_get_cchq_working( sanitize(elementdef.name) )
#don't sanitize value yet, since numbers/dates should not be sanitized in the same way
if elementdef.type[0:5] == 'list.':
field = ''
value = ''
values = raw_value.split()
simple_type = self.formdef.types[elementdef.type]
if simple_type is not None and simple_type.multiselect_values is not None:
field_value = {}
for v in values:
v = sanitize(v)
if v in simple_type.multiselect_values:
field_value.update( { label + "_" + v : '1' } )
return field_value
return { label : self._db_format(elementdef.type, raw_value) }
def _db_format(self, type, text):
if type is None:
raise TypeError("No type found for value: %s." % text)
type = type.lower()
if text == '' or text is None:
raise TypeError("No value provided for element: %s." % type)
if type in self.DB_NON_STRING_TYPES:
#dmyung :: some additional input validation
if self.DB_NUMERIC_TYPES.has_key(type):
typefunc = self.DB_NUMERIC_TYPES[type]
try:
val = typefunc(text.strip())
return str(val)
except:
raise TypeError("Error validating type %s with value %s (is not %s)" % \
(type,text,str(typefunc)) )
elif type == "datetime" :
text = string.replace(text,'T',' ')
if settings.DATABASE_ENGINE!='mysql' :
# truncate microseconds
index = text.rfind('.')
if index != -1:
text = text[0:index]
return text.strip()
else:
return text.strip()
else:
return text.strip()
def _data_name(self, parent_name, child_name):
if child_name[0:len(parent_name)].lower() == parent_name.lower():
child_name = child_name[len(parent_name)+1:len(child_name)]
return child_name
class XFormErrors(Exception):
'''Exception to make dealing with xform query errors easier.'''
def __init__(self, xform_name=None):
self.xform_name = xform_name
self.missing = []
self.bad_type = []
# TODO - add checks for the following
self.extra = [] # requires maintaining a data structure for input tree
self.duplicate = [] # requires something better than case_insensitive_iter()
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
error_msgs = []
error_msgs.append("Missing fields: (%s)" % (",".join(self.missing)))
error_msgs.append("Extra fields: (%s)" % (",".join(self.extra)))
error_msgs.append("Duplicate fields: (%s)" % (",".join(self.duplicate)))
error_msgs.append( "Poorly formatted fields: (%s)" % (",".join(self.bad_type)) )
self.error_string = "\n".join(error_msgs)
return "Error for instance of %s: \n%s" % (self.xform_name, self.error_string)
def is_empty(self):
return not( self.missing or self.extra or self.duplicate or self.bad_type )
def str(self, field):
if not hasattr(self, field.lower() ):
return unicode(self)
return "%s Error for instance of %s: \n%s" % \
( field, self.xform_name, ",".join(getattr(self,field.lower() )) )
def is_schema_registered(target_namespace, version=None):
""" given a form and version is that form registered """
try:
fdd = FormDefModel.objects.get(target_namespace=target_namespace, version=version)
return True
except FormDefModel.DoesNotExist:
return False
def get_registered_table_name(xpath, target_namespace, version=None):
""" the correct lookup function """
# TODO : fix - do we need to account for UI version?
fdd = FormDefModel.objects.get(target_namespace=target_namespace, version=version)
return ElementDefModel.objects.get(xpath=xpath, form=fdd).table_name
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.io_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os.path
import time
import contextlib
import shutil
import tempfile
import tensorflow as tf
import numpy as np
import six
from google.protobuf.any_pb2 import Any
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import queue_runner_pb2
from tensorflow.python.framework import function
from tensorflow.python.platform import gfile
def _TestDir(test_name):
test_dir = os.path.join(tf.test.get_temp_dir(), test_name)
if os.path.exists(test_dir):
shutil.rmtree(test_dir)
gfile.MakeDirs(test_dir)
return test_dir
class SaverTest(tf.test.TestCase):
def testBasics(self):
save_path = os.path.join(self.get_temp_dir(), "basics")
with self.test_session() as sess:
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = tf.Variable(10.0, name="v0")
v1 = tf.Variable(20.0, name="v1")
save = tf.train.Saver({"v0": v0, "v1": v1}, restore_sequentially=True)
tf.initialize_all_variables().run()
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
# Save the initialized values in the file at "save_path"
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
# Start a second session. In that session the parameter nodes
# have not been initialized either.
with self.test_session() as sess:
v0 = tf.Variable(-1.0, name="v0")
v1 = tf.Variable(-1.0, name="v1")
save = tf.train.Saver({"v0": v0, "v1": v1})
with self.assertRaisesWithPredicateMatch(
tf.OpError, lambda e: "uninitialized value v0" in e.message):
sess.run(v0)
with self.assertRaisesWithPredicateMatch(
tf.OpError, lambda e: "uninitialized value v1" in e.message):
sess.run(v1)
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
# Build another graph with 2 nodes, initialized
# differently, and a Restore node for them.
with self.test_session() as sess:
v0_2 = tf.Variable(1000.0, name="v0")
v1_2 = tf.Variable(2000.0, name="v1")
save2 = tf.train.Saver({"v0": v0_2, "v1": v1_2})
tf.initialize_all_variables().run()
# Check that the parameter nodes have been initialized.
self.assertEqual(1000.0, v0_2.eval())
self.assertEqual(2000.0, v1_2.eval())
# Restore the values saved earlier in the parameter nodes.
save2.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0_2.eval())
self.assertEqual(20.0, v1_2.eval())
def testInt64(self):
save_path = os.path.join(self.get_temp_dir(), "int64")
with self.test_session() as sess:
# Build a graph with 1 node, and save and restore for them.
v = tf.Variable(np.int64(15), name="v")
save = tf.train.Saver({"v": v}, restore_sequentially=True)
tf.initialize_all_variables().run()
# Save the initialized values in the file at "save_path"
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
with self.test_session() as sess:
v = tf.Variable(np.int64(-1), name="v")
save = tf.train.Saver({"v": v})
with self.assertRaisesWithPredicateMatch(
tf.OpError, lambda e: "uninitialized value v" in e.message):
sess.run(v)
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(np.int64(15), v.eval())
def testSomeErrors(self):
with tf.Graph().as_default():
v0 = tf.Variable([10.0], name="v0")
v1 = tf.Variable([20.0], name="v1")
v2 = tf.Variable([20.0], name="v2")
v2._set_save_slice_info(tf.Variable.SaveSliceInfo("v1", [1], [0], [1]))
# By default the name used for "v2" will be "v1" and raise an error.
with self.assertRaisesRegexp(ValueError, "same name: v1"):
tf.train.Saver([v0, v1, v2])
# The names are different and will work.
tf.train.Saver({"vee1": v1, "other": [v2]})
def testBasicsWithListOfVariables(self):
save_path = os.path.join(self.get_temp_dir(), "basics_with_list")
with self.test_session(graph=tf.Graph()) as sess:
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = tf.Variable(10.0, name="v0")
v1 = tf.Variable(20.0, name="v1")
save = tf.train.Saver([v0, v1])
tf.initialize_all_variables().run()
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
# Save the initialized values in the file at "save_path"
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
# Start a second session. In that session the variables
# have not been initialized either.
with self.test_session(graph=tf.Graph()) as sess:
v0 = tf.Variable(-1.0, name="v0")
v1 = tf.Variable(-1.0, name="v1")
save = tf.train.Saver([v0, v1])
with self.assertRaisesWithPredicateMatch(
tf.OpError, lambda e: "uninitialized value v0" in e.message):
sess.run(v0)
with self.assertRaisesWithPredicateMatch(
tf.OpError, lambda e: "uninitialized value v1" in e.message):
sess.run(v1)
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
# Build another graph with 2 nodes, initialized
# differently, and a Restore node for them.
with self.test_session(graph=tf.Graph()) as sess:
v0_2 = tf.Variable(1000.0, name="v0")
v1_2 = tf.Variable(2000.0, name="v1")
save2 = tf.train.Saver([v0_2, v1_2])
tf.initialize_all_variables().run()
# Check that the parameter nodes have been initialized.
self.assertEqual(1000.0, v0_2.eval())
self.assertEqual(2000.0, v1_2.eval())
# Restore the values saved earlier in the parameter nodes.
save2.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0_2.eval())
self.assertEqual(20.0, v1_2.eval())
def _SaveAndLoad(self, var_name, var_value, other_value, save_path):
with self.test_session() as sess:
var = tf.Variable(var_value, name=var_name)
save = tf.train.Saver({var_name: var})
var.initializer.run()
val = save.save(sess, save_path)
self.assertEqual(save_path, val)
with self.test_session() as sess:
var = tf.Variable(other_value, name=var_name)
save = tf.train.Saver({var_name: var})
save.restore(sess, save_path)
self.assertAllClose(var_value, var.eval())
def testCacheRereadsFile(self):
save_path = os.path.join(self.get_temp_dir(), "cache_rereads")
# Save and reload one Variable named "var0".
self._SaveAndLoad("var0", 0.0, 1.0, save_path)
# Save and reload one Variable named "var1" in the same file.
# The cached readers should know to re-read the file.
self._SaveAndLoad("var1", 1.1, 2.2, save_path)
def testGPU(self):
if not tf.test.is_built_with_cuda():
return
save_path = os.path.join(self.get_temp_dir(), "gpu")
with tf.Session("", graph=tf.Graph()) as sess:
with sess.graph.device("/gpu:0"):
v0_1 = tf.Variable(123.45)
save = tf.train.Saver({"v0": v0_1})
tf.initialize_all_variables().run()
save.save(sess, save_path)
with tf.Session("", graph=tf.Graph()) as sess:
with sess.graph.device("/gpu:0"):
v0_2 = tf.Variable(543.21)
save = tf.train.Saver({"v0": v0_2})
tf.initialize_all_variables().run()
self.assertAllClose(543.21, v0_2.eval())
save.restore(sess, save_path)
self.assertAllClose(123.45, v0_2.eval())
def testVariables(self):
save_path = os.path.join(self.get_temp_dir(), "variables")
with tf.Session("", graph=tf.Graph()) as sess:
one = tf.Variable(1.0)
twos = tf.Variable([2.0, 2.0, 2.0])
init = tf.initialize_all_variables()
save = tf.train.Saver(tf.all_variables())
init.run()
save.save(sess, save_path)
with tf.Session("", graph=tf.Graph()) as sess:
one = tf.Variable(0.0)
twos = tf.Variable([0.0, 0.0, 0.0])
# Saver with no arg, defaults to 'all variables'.
save = tf.train.Saver()
save.restore(sess, save_path)
self.assertAllClose(1.0, one.eval())
self.assertAllClose([2.0, 2.0, 2.0], twos.eval())
def testSaveWithGlobalStep(self):
save_path = os.path.join(self.get_temp_dir(), "ckpt_with_global_step")
global_step_int = 5
# Save and reload one Variable named "var0".
self._SaveAndLoad("var0", 0.0, 1.0, save_path)
for use_tensor in [True, False]:
with self.test_session() as sess:
var = tf.Variable(1.0, name="var0")
save = tf.train.Saver({var.op.name: var})
var.initializer.run()
if use_tensor:
global_step = tf.constant(global_step_int)
val = save.save(sess, save_path, global_step=global_step)
else:
val = save.save(sess, save_path, global_step=global_step_int)
expected_save_path = "%s-%d" % (save_path, global_step_int)
self.assertEqual(expected_save_path, val)
class SaveRestoreShardedTest(tf.test.TestCase):
def testBasics(self):
save_path = os.path.join(self.get_temp_dir(), "sharded")
# Build a graph with 2 parameter nodes on different devices.
with tf.Session(
target="",
config=tf.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = tf.Variable(10, name="v0")
with sess.graph.device("/cpu:1"):
v1 = tf.Variable(20, name="v1")
save = tf.train.Saver({"v0": v0, "v1": v1}, sharded=True)
tf.initialize_all_variables().run()
val = save.save(sess, save_path)
self.assertEqual(save_path + "-?????-of-00002", val)
meta_graph_filename = save._MetaGraphFilename(val)
self.assertEqual(save_path + ".meta", meta_graph_filename)
# Restore a different "v0" from shard 0 of the saved files.
with tf.Session(
target="",
config=tf.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = tf.Variable(111, name="v0")
save = tf.train.Saver({"v0": v0}, sharded=True)
tf.initialize_all_variables().run()
self.assertEqual(111, v0.eval())
save.restore(sess, save_path + "-00000-of-00002")
self.assertEqual(10, v0.eval())
# Restore a different "v1" from shard 1 of the saved files.
with tf.Session(
target="",
config=tf.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v1 = tf.Variable(222)
save = tf.train.Saver({"v1": v1}, sharded=True)
tf.initialize_all_variables().run()
self.assertEqual(222, v1.eval())
save.restore(sess, save_path + "-00001-of-00002")
self.assertEqual(20, v1.eval())
# Now try a restore with the sharded filename.
with tf.Session(
target="",
config=tf.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = tf.Variable(111, name="v0")
with sess.graph.device("/cpu:1"):
v1 = tf.Variable(222, name="v1")
save = tf.train.Saver({"v0": v0, "v1": v1}, sharded=True)
tf.initialize_all_variables().run()
self.assertEqual(111, v0.eval())
self.assertEqual(222, v1.eval())
save_path = os.path.join(self.get_temp_dir(), "sharded")
save.restore(sess, save_path + "-?????-of-?????")
self.assertEqual(10, v0.eval())
self.assertEqual(20, v1.eval())
self.assertEqual(
tf.train.latest_checkpoint(self.get_temp_dir()),
os.path.join(self.get_temp_dir(), "sharded-?????-of-00002"))
def testSaverDef(self):
with self.test_session():
v0 = tf.Variable(123, name="v0")
save = tf.train.Saver({"v0": v0}, sharded=True)
sd = save.as_saver_def()
self.assertTrue(sd.sharded)
class MaxToKeepTest(tf.test.TestCase):
def testNonSharded(self):
save_dir = _TestDir("max_to_keep_non_sharded")
with self.test_session() as sess:
v = tf.Variable(10.0, name="v")
save = tf.train.Saver({"v": v}, max_to_keep=2)
tf.initialize_all_variables().run()
self.assertEqual([], save.last_checkpoints)
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s1], save.last_checkpoints)
self.assertTrue(gfile.Exists(s1))
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s1, s2], save.last_checkpoints)
self.assertTrue(gfile.Exists(s1))
self.assertTrue(gfile.Exists(s2))
s3 = save.save(sess, os.path.join(save_dir, "s3"))
self.assertEqual([s2, s3], save.last_checkpoints)
self.assertFalse(gfile.Exists(s1))
self.assertTrue(gfile.Exists(s2))
self.assertTrue(gfile.Exists(s3))
# Create a second helper, identical to the first.
save2 = tf.train.Saver(saver_def=save.as_saver_def())
save2.set_last_checkpoints(save.last_checkpoints)
# Create a third helper, with the same configuration but no knowledge of
# previous checkpoints.
save3 = tf.train.Saver(saver_def=save.as_saver_def())
# Exercise the first helper.
# Adding s2 again (old s2 is removed first, then new s2 appended)
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s3, s2], save.last_checkpoints)
self.assertFalse(gfile.Exists(s1))
self.assertFalse(gfile.Exists(save._MetaGraphFilename(s1)))
self.assertTrue(gfile.Exists(s3))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s3)))
self.assertTrue(gfile.Exists(s2))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2)))
# Adding s1 (s3 should now be deleted as oldest in list)
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s2, s1], save.last_checkpoints)
self.assertFalse(gfile.Exists(s3))
self.assertFalse(gfile.Exists(save._MetaGraphFilename(s3)))
self.assertTrue(gfile.Exists(s2))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2)))
self.assertTrue(gfile.Exists(s1))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1)))
# Exercise the second helper.
# Adding s2 again (old s2 is removed first, then new s2 appended)
s2 = save2.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s3, s2], save2.last_checkpoints)
# Created by the first helper.
self.assertTrue(gfile.Exists(s1))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1)))
# Deleted by the first helper.
self.assertFalse(gfile.Exists(s3))
self.assertFalse(gfile.Exists(save._MetaGraphFilename(s3)))
self.assertTrue(gfile.Exists(s2))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2)))
# Adding s1 (s3 should now be deleted as oldest in list)
s1 = save2.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s2, s1], save2.last_checkpoints)
self.assertFalse(gfile.Exists(s3))
self.assertFalse(gfile.Exists(save._MetaGraphFilename(s3)))
self.assertTrue(gfile.Exists(s2))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2)))
self.assertTrue(gfile.Exists(s1))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1)))
# Exercise the third helper.
# Adding s2 again (but helper is unaware of previous s2)
s2 = save3.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s2], save3.last_checkpoints)
# Created by the first helper.
self.assertTrue(gfile.Exists(s1))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1)))
# Deleted by the first helper.
self.assertFalse(gfile.Exists(s3))
self.assertFalse(gfile.Exists(save._MetaGraphFilename(s3)))
self.assertTrue(gfile.Exists(s2))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2)))
# Adding s1 (s3 should not be deleted because helper is unaware of it)
s1 = save3.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s2, s1], save3.last_checkpoints)
self.assertFalse(gfile.Exists(s3))
self.assertFalse(gfile.Exists(save._MetaGraphFilename(s3)))
self.assertTrue(gfile.Exists(s2))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2)))
self.assertTrue(gfile.Exists(s1))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1)))
def testSharded(self):
save_dir = _TestDir("max_to_keep_sharded")
with tf.Session(
target="",
config=tf.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = tf.Variable(111, name="v0")
with sess.graph.device("/cpu:1"):
v1 = tf.Variable(222, name="v1")
save = tf.train.Saver({"v0": v0, "v1": v1}, sharded=True, max_to_keep=2)
tf.initialize_all_variables().run()
self.assertEqual([], save.last_checkpoints)
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s1], save.last_checkpoints)
self.assertEqual(2, len(gfile.Glob(s1)))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1)))
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s1, s2], save.last_checkpoints)
self.assertEqual(2, len(gfile.Glob(s1)))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1)))
self.assertEqual(2, len(gfile.Glob(s2)))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2)))
s3 = save.save(sess, os.path.join(save_dir, "s3"))
self.assertEqual([s2, s3], save.last_checkpoints)
self.assertEqual(0, len(gfile.Glob(s1)))
self.assertFalse(gfile.Exists(save._MetaGraphFilename(s1)))
self.assertEqual(2, len(gfile.Glob(s2)))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2)))
self.assertEqual(2, len(gfile.Glob(s3)))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s3)))
def testNoMaxToKeep(self):
save_dir = _TestDir("no_max_to_keep")
save_dir2 = _TestDir("max_to_keep_0")
with self.test_session() as sess:
v = tf.Variable(10.0, name="v")
tf.initialize_all_variables().run()
# Test max_to_keep being None.
save = tf.train.Saver({"v": v}, max_to_keep=None)
self.assertEqual([], save.last_checkpoints)
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([], save.last_checkpoints)
self.assertTrue(gfile.Exists(s1))
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([], save.last_checkpoints)
self.assertTrue(gfile.Exists(s2))
# Test max_to_keep being 0.
save2 = tf.train.Saver({"v": v}, max_to_keep=0)
self.assertEqual([], save2.last_checkpoints)
s1 = save2.save(sess, os.path.join(save_dir2, "s1"))
self.assertEqual([], save2.last_checkpoints)
self.assertTrue(gfile.Exists(s1))
s2 = save2.save(sess, os.path.join(save_dir2, "s2"))
self.assertEqual([], save2.last_checkpoints)
self.assertTrue(gfile.Exists(s2))
class KeepCheckpointEveryNHoursTest(tf.test.TestCase):
def testNonSharded(self):
save_dir = _TestDir("keep_checkpoint_every_n_hours")
with self.test_session() as sess:
v = tf.Variable([10.0], name="v")
# Run the initializer NOW to avoid the 0.5s overhead of the first Run()
# call, which throws the test timing off in fastbuild mode.
tf.initialize_all_variables().run()
# Create a saver that will keep the last 2 checkpoints plus one every 0.7
# seconds.
start_time = time.time()
save = tf.train.Saver({"v": v}, max_to_keep=2,
keep_checkpoint_every_n_hours=0.7 / 3600)
self.assertEqual([], save.last_checkpoints)
# Wait till 0.7 second have elapsed so s1 will be old enough to keep.
time.sleep((time.time() + 0.7) - start_time)
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s1], save.last_checkpoints)
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s1, s2], save.last_checkpoints)
# We now have 2 'last_checkpoints': [s1, s2]. The next call to Save(),
# would normally delete s1, because max_to_keep is 2. However, s1 is
# older than 0.7s so we must keep it.
s3 = save.save(sess, os.path.join(save_dir, "s3"))
self.assertEqual([s2, s3], save.last_checkpoints)
# s1 should still be here, we are Not checking now to reduce time
# variance in the test.
# We now have 2 'last_checkpoints': [s2, s3], and s1 on disk. The next
# call to Save(), will delete s2, because max_to_keep is 2, and because
# we already kept the old s1. s2 is very close in time to s1 so it gets
# deleted.
s4 = save.save(sess, os.path.join(save_dir, "s4"))
self.assertEqual([s3, s4], save.last_checkpoints)
# Check that s1 is still here, but s2 is gone.
self.assertTrue(gfile.Exists(s1))
self.assertFalse(gfile.Exists(s2))
self.assertTrue(gfile.Exists(s3))
self.assertTrue(gfile.Exists(s4))
class SaveRestoreWithVariableNameMap(tf.test.TestCase):
def testNonReshape(self):
save_path = os.path.join(self.get_temp_dir(), "basics")
with self.test_session() as sess:
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = tf.Variable(10.0, name="v0")
v1 = tf.Variable(20.0, name="v1")
save = tf.train.Saver({"save_prefix/v0": v0, "save_prefix/v1": v1})
tf.initialize_all_variables().run()
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
# Save the initialized values in the file at "save_path"
# Use a variable name map to set the saved tensor names
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
# Verify that the original names are not in the Saved file
save = tf.train.Saver({"v0": v0, "v1": v1})
with self.assertRaisesOpError("not found in checkpoint"):
save.restore(sess, save_path)
# Verify that the mapped names are present in the Saved file and can be
# Restored using remapped names.
with self.test_session() as sess:
v0 = tf.Variable(-1.0, name="v0")
v1 = tf.Variable(-1.0, name="v1")
with self.assertRaisesOpError("uninitialized value v0"):
sess.run(v0)
with self.assertRaisesOpError("uninitialized value v1"):
sess.run(v1)
save = tf.train.Saver({"save_prefix/v0": v0, "save_prefix/v1": v1})
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
# Add a prefix to the node names in the current graph and Restore using
# remapped names.
with self.test_session() as sess:
v0 = tf.Variable(-1.0, name="restore_prefix/v0")
v1 = tf.Variable(-1.0, name="restore_prefix/v1")
with self.assertRaisesOpError("uninitialized value restore_prefix/v0"):
sess.run(v0)
with self.assertRaisesOpError("uninitialized value restore_prefix/v1"):
sess.run(v1)
# Restore the saved values in the parameter nodes.
save = tf.train.Saver({"save_prefix/v0": v0, "save_prefix/v1": v1})
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
class LatestCheckpointWithRelativePaths(tf.test.TestCase):
@staticmethod
@contextlib.contextmanager
def tempWorkingDir(temppath):
cwd = os.getcwd()
os.chdir(temppath)
try:
yield
finally:
os.chdir(cwd)
@staticmethod
@contextlib.contextmanager
def tempDir():
tempdir = tempfile.mkdtemp()
try:
yield tempdir
finally:
shutil.rmtree(tempdir)
def testRelativePath(self):
# Make sure we have a clean directory to work in.
with self.tempDir() as tempdir:
# Jump to that directory until this test is done.
with self.tempWorkingDir(tempdir):
# Save training snapshots to a relative path.
traindir = "train/"
os.mkdir(traindir)
filename = "snapshot"
filepath = os.path.join(traindir, filename)
with self.test_session() as sess:
# Build a simple graph.
v0 = tf.Variable(0.0)
inc = v0.assign_add(1.0)
save = tf.train.Saver({"v0": v0})
# Record a short training history.
tf.initialize_all_variables().run()
save.save(sess, filepath, global_step=0)
inc.eval()
save.save(sess, filepath, global_step=1)
inc.eval()
save.save(sess, filepath, global_step=2)
with self.test_session() as sess:
# Build a new graph with different initialization.
v0 = tf.Variable(-1.0)
# Create a new saver.
save = tf.train.Saver({"v0": v0})
tf.initialize_all_variables().run()
# Get the most recent checkpoint name from the training history file.
name = tf.train.latest_checkpoint(traindir)
self.assertIsNotNone(name)
# Restore "v0" from that checkpoint.
save.restore(sess, name)
self.assertEqual(v0.eval(), 2.0)
class CheckpointStateTest(tf.test.TestCase):
def testAbsPath(self):
save_dir = _TestDir("abs_paths")
abs_path = os.path.join(save_dir, "model-0")
ckpt = tf.train.generate_checkpoint_state_proto(save_dir, abs_path)
self.assertEqual(ckpt.model_checkpoint_path, abs_path)
self.assertTrue(os.path.isabs(ckpt.model_checkpoint_path))
self.assertEqual(len(ckpt.all_model_checkpoint_paths), 1)
self.assertEqual(ckpt.all_model_checkpoint_paths[-1], abs_path)
def testRelPath(self):
train_dir = "train"
model = os.path.join(train_dir, "model-0")
# model_checkpoint_path should have no "train" directory part.
new_rel_path = "model-0"
ckpt = tf.train.generate_checkpoint_state_proto(train_dir, model)
self.assertEqual(ckpt.model_checkpoint_path, new_rel_path)
self.assertEqual(len(ckpt.all_model_checkpoint_paths), 1)
self.assertEqual(ckpt.all_model_checkpoint_paths[-1], new_rel_path)
def testAllModelCheckpointPaths(self):
save_dir = _TestDir("all_models_test")
abs_path = os.path.join(save_dir, "model-0")
for paths in [None, [], ["model-2"]]:
ckpt = tf.train.generate_checkpoint_state_proto(
save_dir,
abs_path,
all_model_checkpoint_paths=paths)
self.assertEqual(ckpt.model_checkpoint_path, abs_path)
self.assertTrue(os.path.isabs(ckpt.model_checkpoint_path))
self.assertEqual(
len(ckpt.all_model_checkpoint_paths), len(paths) if paths else 1)
self.assertEqual(ckpt.all_model_checkpoint_paths[-1], abs_path)
def testUpdateCheckpointState(self):
save_dir = _TestDir("update_checkpoint_state")
os.chdir(save_dir)
# Make a temporary train directory.
train_dir = "train"
os.mkdir(train_dir)
abs_path = os.path.join(save_dir, "model-0")
rel_path = "train/model-2"
tf.train.update_checkpoint_state(
train_dir,
rel_path,
all_model_checkpoint_paths=[abs_path, rel_path])
ckpt = tf.train.get_checkpoint_state(train_dir)
self.assertEqual(ckpt.model_checkpoint_path, rel_path)
self.assertEqual(len(ckpt.all_model_checkpoint_paths), 2)
self.assertEqual(ckpt.all_model_checkpoint_paths[-1], rel_path)
self.assertEqual(ckpt.all_model_checkpoint_paths[0], abs_path)
class MetaGraphTest(tf.test.TestCase):
def testAddCollectionDef(self):
test_dir = _TestDir("good_collection")
filename = os.path.join(test_dir, "metafile")
with self.test_session():
# Creates a graph.
v0 = tf.Variable(10.0, name="v0")
var = tf.Variable(tf.constant(0, dtype=tf.int64))
count_up_to = var.count_up_to(3)
input_queue = tf.FIFOQueue(30, tf.float32, shared_name="collection_queue")
qr = tf.train.QueueRunner(input_queue, [count_up_to])
tf.initialize_all_variables()
# Creates a saver.
save = tf.train.Saver({"v0": v0})
# Adds a set of collections.
tf.add_to_collection("int_collection", 3)
tf.add_to_collection("float_collection", 3.5)
tf.add_to_collection("string_collection", "hello")
tf.add_to_collection("variable_collection", v0)
# Add QueueRunners.
tf.train.add_queue_runner(qr)
# Adds user_defined proto in three formats: string, bytes and Any.
queue_runner = queue_runner_pb2.QueueRunnerDef(queue_name="test_queue")
tf.add_to_collection("user_defined_string_collection", str(queue_runner))
tf.add_to_collection("user_defined_bytes_collection",
queue_runner.SerializeToString())
any_buf = Any()
any_buf.Pack(queue_runner)
tf.add_to_collection("user_defined_any_collection", any_buf)
# Generates MetaGraphDef.
meta_graph_def = save.export_meta_graph(filename)
self.assertTrue(meta_graph_def.HasField("saver_def"))
self.assertTrue(meta_graph_def.HasField("graph_def"))
collection_def = meta_graph_def.collection_def
self.assertEqual(len(collection_def), 10)
with tf.Graph().as_default():
# Restores from MetaGraphDef.
new_saver = tf.train.import_meta_graph(filename)
# Generates a new MetaGraphDef.
new_meta_graph_def = new_saver.export_meta_graph()
# It should be the same as the original.
self.assertProtoEquals(meta_graph_def, new_meta_graph_def)
def testAddCollectionDefFails(self):
with self.test_session():
# Creates a graph.
v0 = tf.Variable(10.0, name="v0")
# Creates a saver.
save = tf.train.Saver({"v0": v0})
# Generates MetaGraphDef.
meta_graph_def = meta_graph_pb2.MetaGraphDef()
# Verifies that collection with unsupported key will not be added.
tf.add_to_collection(save, 3)
save._add_collection_def(meta_graph_def, save)
self.assertEqual(len(meta_graph_def.collection_def), 0)
# Verifies that collection where item type does not match expected
# type will not be added.
tf.add_to_collection("int_collection", 3)
tf.add_to_collection("int_collection", 3.5)
save._add_collection_def(meta_graph_def, "int_collection")
self.assertEqual(len(meta_graph_def.collection_def), 0)
def _testMultiSaverCollectionSave(self):
test_dir = _TestDir("saver_collection")
filename = os.path.join(test_dir, "metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
saver1_ckpt = os.path.join(test_dir, "saver1.ckpt")
with self.test_session(graph=tf.Graph()) as sess:
# Creates a graph.
v0 = tf.Variable(10.0, name="v0")
v1 = tf.Variable(11.0, name="v1")
# Creates 2 savers.
saver0 = tf.train.Saver({"v0": v0}, name="saver0")
saver1 = tf.train.Saver({"v1": v1}, name="saver1")
tf.add_to_collection("savers", saver0)
tf.add_to_collection("savers", saver1)
tf.initialize_all_variables().run()
# Saves to different checkpoints.
saver0.save(sess, saver0_ckpt)
saver1.save(sess, saver1_ckpt)
# Generates MetaGraphDef.
meta_graph_def = tf.train.export_meta_graph(filename)
meta_graph_def0 = saver0.export_meta_graph()
meta_graph_def1 = saver1.export_meta_graph()
# Verifies that there is no saver_def in meta_graph_def.
self.assertFalse(meta_graph_def.HasField("saver_def"))
# Verifies that there is saver_def in meta_graph_def0 and 1.
self.assertTrue(meta_graph_def0.HasField("saver_def"))
self.assertTrue(meta_graph_def1.HasField("saver_def"))
# Verifies SAVERS is saved as bytes_list for meta_graph_def.
collection_def = meta_graph_def.collection_def["savers"]
kind = collection_def.WhichOneof("kind")
self.assertEqual(kind, "bytes_list")
# Verifies that there are 2 entries in SAVERS collection.
savers = getattr(collection_def, kind)
self.assertEqual(2, len(savers.value))
# Verifies SAVERS collection is saved as bytes_list for meta_graph_def0.
collection_def = meta_graph_def0.collection_def["savers"]
kind = collection_def.WhichOneof("kind")
self.assertEqual(kind, "bytes_list")
# Verifies that there are 3 entries in SAVERS collection.
savers = getattr(collection_def, kind)
self.assertEqual(2, len(savers.value))
def _testMultiSaverCollectionRestore(self):
test_dir = os.path.join(self.get_temp_dir(), "saver_collection")
filename = os.path.join(test_dir, "metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
saver1_ckpt = os.path.join(test_dir, "saver1.ckpt")
with self.test_session(graph=tf.Graph()) as sess:
# Imports from meta_graph.
tf.train.import_meta_graph(filename)
# Retrieves SAVERS collection. Verifies there are 2 entries.
savers = tf.get_collection("savers")
self.assertEqual(2, len(savers))
# Retrieves saver0. Verifies that new_saver0 can restore v0, but not v1.
new_saver0 = savers[0]
new_saver0.restore(sess, saver0_ckpt)
v0 = sess.graph.get_tensor_by_name("v0:0")
v1 = sess.graph.get_tensor_by_name("v1:0")
self.assertEqual(10.0, v0.eval())
with self.assertRaisesWithPredicateMatch(
tf.OpError, lambda e: "uninitialized value v1" in e.message):
sess.run(v1)
# Retrieves saver1. Verifies that new_saver1 can restore v1.
new_saver1 = savers[1]
new_saver1.restore(sess, saver1_ckpt)
v1 = sess.graph.get_tensor_by_name("v1:0")
self.assertEqual(11.0, v1.eval())
def testMultiSaverCollection(self):
self._testMultiSaverCollectionSave()
self._testMultiSaverCollectionRestore()
def testBinaryAndTextFormat(self):
test_dir = _TestDir("binary_and_text")
filename = os.path.join(test_dir, "metafile")
with self.test_session(graph=tf.Graph()):
# Creates a graph.
tf.Variable(10.0, name="v0")
# Exports the graph as binary format.
tf.train.export_meta_graph(filename, as_text=False)
with self.test_session(graph=tf.Graph()):
# Imports the binary format graph.
saver = tf.train.import_meta_graph(filename)
# Exports the graph as text format.
saver.export_meta_graph(filename, as_text=True)
with self.test_session(graph=tf.Graph()):
# Imports the text format graph.
tf.train.import_meta_graph(filename)
# Writes wrong contents to the file.
tf.train.write_graph(saver.as_saver_def(), os.path.dirname(filename),
os.path.basename(filename))
with self.test_session(graph=tf.Graph()):
# Import should fail.
with self.assertRaisesWithPredicateMatch(
IOError, lambda e: "Cannot parse file"):
tf.train.import_meta_graph(filename)
# Deletes the file
gfile.Remove(filename)
with self.assertRaisesWithPredicateMatch(
IOError, lambda e: "does not exist"):
tf.train.import_meta_graph(filename)
def testSliceVariable(self):
test_dir = _TestDir("slice_saver")
filename = os.path.join(test_dir, "metafile")
with self.test_session():
v1 = tf.Variable([20.0], name="v1")
v2 = tf.Variable([20.0], name="v2")
v2._set_save_slice_info(tf.Variable.SaveSliceInfo("v1", [1], [0], [1]))
# The names are different and will work.
slice_saver = tf.train.Saver({"first": v1, "second": v2})
tf.initialize_all_variables().run()
# Exports to meta_graph
meta_graph_def = slice_saver.export_meta_graph(filename)
with tf.Graph().as_default():
# Restores from MetaGraphDef.
new_saver = tf.train.import_meta_graph(filename)
# Generates a new MetaGraphDef.
new_meta_graph_def = new_saver.export_meta_graph()
# It should be the same as the original.
self.assertProtoEquals(meta_graph_def, new_meta_graph_def)
def _testGraphExtensionSave(self):
test_dir = _TestDir("graph_extension")
filename = os.path.join(test_dir, "metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
with self.test_session(graph=tf.Graph()) as sess:
# Creates an inference graph.
# Hidden 1
images = tf.constant(1.2, tf.float32, shape=[100, 28])
with tf.name_scope("hidden1"):
weights = tf.Variable(
tf.truncated_normal([28, 128],
stddev=1.0 / math.sqrt(float(28))),
name="weights")
biases = tf.Variable(tf.zeros([128]),
name="biases")
hidden1 = tf.nn.relu(tf.matmul(images, weights) + biases)
# Hidden 2
with tf.name_scope("hidden2"):
weights = tf.Variable(
tf.truncated_normal([128, 32],
stddev=1.0 / math.sqrt(float(128))),
name="weights")
biases = tf.Variable(tf.zeros([32]),
name="biases")
hidden2 = tf.nn.relu(tf.matmul(hidden1, weights) + biases)
# Linear
with tf.name_scope("softmax_linear"):
weights = tf.Variable(
tf.truncated_normal([32, 10],
stddev=1.0 / math.sqrt(float(32))),
name="weights")
biases = tf.Variable(tf.zeros([10]),
name="biases")
logits = tf.matmul(hidden2, weights) + biases
tf.add_to_collection("logits", logits)
# Runs to logit.
tf.initialize_all_variables().run()
sess.run(logits)
# Creates a saver.
saver0 = tf.train.Saver()
saver0.save(sess, saver0_ckpt)
# Generates MetaGraphDef.
saver0.export_meta_graph(filename)
def _testGraphExtensionRestore(self):
test_dir = os.path.join(self.get_temp_dir(), "graph_extension")
filename = os.path.join(test_dir, "metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
with self.test_session(graph=tf.Graph()) as sess:
# Restores from MetaGraphDef.
new_saver = tf.train.import_meta_graph(filename)
# Generates a new MetaGraphDef.
new_saver.export_meta_graph()
# Restores from checkpoint.
new_saver.restore(sess, saver0_ckpt)
# Addes loss and train.
labels = tf.constant(0, tf.int32, shape=[100], name="labels")
batch_size = tf.size(labels)
labels = tf.expand_dims(labels, 1)
indices = tf.expand_dims(tf.range(0, batch_size), 1)
concated = tf.concat(1, [indices, labels])
onehot_labels = tf.sparse_to_dense(
concated, tf.pack([batch_size, 10]), 1.0, 0.0)
logits = tf.get_collection("logits")[0]
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits,
onehot_labels,
name="xentropy")
loss = tf.reduce_mean(cross_entropy, name="xentropy_mean")
tf.scalar_summary(loss.op.name, loss)
# Creates the gradient descent optimizer with the given learning rate.
optimizer = tf.train.GradientDescentOptimizer(0.01)
# Runs train_op.
train_op = optimizer.minimize(loss)
sess.run(train_op)
def testGraphExtension(self):
self._testGraphExtensionSave()
self._testGraphExtensionRestore()
def testStrippedOpListDef(self):
with self.test_session():
# Creates a graph.
v0 = tf.Variable(0.0)
var = tf.Variable(10.0)
tf.add(v0, var)
@function.Defun(x=tf.float32)
def minus_one(x):
return x - 1
minus_one(tf.identity(v0))
save = tf.train.Saver({"v0": v0})
tf.initialize_all_variables()
# Generates MetaGraphDef.
meta_graph_def = save.export_meta_graph()
ops = [o.name for o in meta_graph_def.meta_info_def.stripped_op_list.op]
self.assertEqual(ops, ["Add", "Assign", "Const", "Identity", "NoOp",
"RestoreSlice", "SaveSlices", "Sub", "Variable"])
# Test calling stripped_op_list_for_graph directly
op_list = tf.contrib.util.stripped_op_list_for_graph(
meta_graph_def.graph_def)
self.assertEqual(ops, [o.name for o in op_list.op])
for o in op_list.op:
self.assertEqual(o.summary, "")
self.assertEqual(o.description, "")
def testStrippedOpListNestedFunctions(self):
with self.test_session():
# Square two levels deep
def f0(x):
return tf.square(x)
f0 = function.define_function(f0, {"x": tf.int32})
def f1(x):
return function.call_function(f0, x)
f1 = function.define_function(f1, {"x": tf.int32})
# At this point we've defined two functions but haven't called them, so
# there should be no used ops.
op_list = tf.contrib.util.stripped_op_list_for_graph(
tf.get_default_graph().as_graph_def())
self.assertEquals(len(op_list.op), 0)
# If we call the function on a constant, there should be two ops
function.call_function(f1, tf.constant(7))
op_list = tf.contrib.util.stripped_op_list_for_graph(
tf.get_default_graph().as_graph_def())
self.assertEquals(["Const", "Square"], [op.name for op in op_list.op])
def testStrippedOpListRecursiveFunctions(self):
# The function module doesn't support recursive functions, so we build a
# recursive function situation by ourselves: A calls B calls A and Const.
graph = graph_pb2.GraphDef()
a = graph.library.function.add()
b = graph.library.function.add()
a.signature.name = "A"
b.signature.name = "B"
a.node.add().op = "B"
b.node.add().op = "Const"
b.node.add().op = "A"
# Use A in the graph
graph.node.add().op = "A"
# The stripped op list should contain just Const.
op_list = tf.contrib.util.stripped_op_list_for_graph(graph)
self.assertEquals(["Const"], [op.name for op in op_list.op])
if __name__ == "__main__":
tf.test.main()
|
|
# ===============================================================================
# Copyright 2016 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
import os
import re
import yaml
# ============= enthought library imports =======================
from apptools.preferences.preference_binding import bind_preference
from traits.api import HasTraits, Str, Bool, Property, Event, cached_property, \
Button, String, Instance, List, Float, on_trait_change
from traitsui.api import UItem, Item, VGroup, HGroup, EnumEditor
from pychron.core.helpers.traitsui_shortcuts import okcancel_view
from pychron.core.pychron_traits import EmailStr
from pychron.core.yaml import yload
from pychron.dvc.dvc_irradiationable import DVCAble
from pychron.entry.providers.macrostrat import get_lithology_values
from pychron.entry.tasks.sample.sample_edit_view import SampleEditModel, LatFloat, LonFloat, SAMPLE_ATTRS
from pychron.envisage.icon_button_editor import icon_button_editor
from pychron.paths import paths
PI_REGEX = re.compile(r'^[A-Z]+\w+(, ?[A-Z]{1})*$')
# MATERIAL_REGEX = re.compile(r'^[A-Z]+[\w%/\+-_]+$')
PROJECT_REGEX = re.compile(r'^[a-zA-Z]+[\-\d_\w]*$')
class RString(String):
def validate(self, obj, name, value):
if not self.regex.match(value):
return self.error(obj, name, value)
else:
return value
PI_NAMES = ('NMGRL',)
if os.path.isfile(paths.valid_pi_names):
PI_NAMES = yload(paths.valid_pi_names)
class PIStr(String):
def validate(self, obj, name, value):
if not PI_REGEX.match(value) and value not in PI_NAMES:
return self.error(obj, name, value)
else:
return value
# class MaterialStr(RString):
# regex = MATERIAL_REGEX
class ProjectStr(String):
regex = PROJECT_REGEX
class Spec(HasTraits):
name = Str
added = Bool
class PISpec(Spec):
affiliation = Str
email = EmailStr
def todump(self):
return {'name': str(self.name),
'affiliation': str(self.affiliation),
'email': str(self.email)}
@classmethod
def fromdump(cls, d):
obj = cls()
obj.name = d['name']
obj.email = d.get('email', '')
obj.affiliation = d.get('affiliation', '')
return obj
class MaterialSpec(Spec):
name = Str
grainsize = Str
def todump(self):
return {'name': str(self.name), 'grainsize': str(self.grainsize)}
@classmethod
def fromdump(cls, d):
obj = cls()
obj.name = d['name']
obj.grainsize = d['grainsize']
return obj
class ProjectSpec(Spec):
principal_investigator = Instance(PISpec)
lab_contact = Str
comment = Str
institution = Str
@property
def optionals(self):
return {'lab_contact': self.lab_contact, 'comment': self.comment, 'institution': self.institution}
def todump(self):
return {'name': str(self.name), 'principal_investigator': self.principal_investigator.todump()}
@classmethod
def fromdump(cls, d, ps):
obj = cls()
obj.name = d['name']
pi = d['principal_investigator']['name']
for pp in ps:
if pp.name == pi:
obj.principal_investigator = pp
break
return obj
class SampleSpec(Spec):
project = Instance(ProjectSpec)
material = Instance(MaterialSpec)
note = Str
lat = Float
lon = Float
igsn = Str
unit = Str
storage_location = Str
lithology = Str
lithology_class = Str
lithology_group = Str
lithology_type = Str
location = Str
approximate_age = Float
elevation = Float
def todump(self):
d = {'name': str(self.name), 'project': self.project.todump(),
'material': self.material.todump()}
for attr in SAMPLE_ATTRS:
d[attr] = getattr(self, attr)
@classmethod
def fromdump(cls, d, pps, ms):
obj = cls()
for attr in SAMPLE_ATTRS:
try:
setattr(obj, attr, d[attr])
except KeyError:
pass
project = d['project']
pname = project['name']
piname = project['principal_investigator']['name']
for pp in pps:
# print(pname, pp.name, piname, pp.principal_investigator.name)
if pp.name == pname and pp.principal_investigator.name == piname:
obj.project = pp
break
m = d['material']
mname, grainsize = m['name'], m['grainsize']
for mi in ms:
if mi.name == mname and mi.grainsize == grainsize:
obj.material = mi
break
return obj
class SampleEntry(DVCAble):
principal_investigator = PIStr(enter_set=True, auto_set=False)
principal_investigators = Property(depends_on='refresh_pis')
email = EmailStr
affiliation = Str
refresh_pis = Event
project = ProjectStr(enter_set=True, auto_set=False)
projects = Property(depends_on='refresh_projects')
refresh_projects = Event
material = Str
materials = Property(depends_on='refresh_materials')
refresh_materials = Event
grainsize = Str
grainsizes = Property(depends_on='refresh_grainsizes')
refresh_grainsizes = Event
sample = Str
note = Str
lat = LatFloat
lon = LonFloat
igsn = Str
unit = Str
lithology = Str
lithology_class = Str
lithology_group = Str
lithology_type = Str
lithologies = List
lithology_classes = List
lithology_groups = List
lithology_types = List
location = Str
storage_location = Str
approximate_age = Float
elevation = Float
clear_sample_attributes_button = Button
configure_sample_button = Button
configure_pi_button = Button
add_principal_investigator_button = Button
add_project_button = Button
add_sample_button = Button
add_sample_enabled = Property(depends_on='sample, _add_sample_enabled')
_add_sample_enabled = Bool
add_button = Button
add_material_button = Button
generate_project_button = Button('Generate Name')
set_optionals_button = Button('Set Optionals')
project_comment = Str
project_institution = Str
project_lab_contact = Str
lab_contacts = List
lock_project_comment = Bool
lock_project_institution = Bool
lock_project_lab_contact = Bool
project_enabled = Property(depends_on='principal_investigator')
sample_enabled = Property(depends_on='principal_investigator, project, material')
refresh_table = Event
db_samples = List
sample_filter = String(enter_set=True, auto_set=False)
sample_filter_attr = Str('name')
sample_filter_attrs = List(('name', 'project', 'material', 'principal_investigator') + SAMPLE_ATTRS)
selected_db_samples = List
_samples = List
_projects = List
_materials = List
_principal_investigators = List
_default_project_count = 0
selected_samples = List
selected_projects = List
selected_principal_investigators = List
selected_materials = List
sample_edit_model = Instance(SampleEditModel, ())
auto_add_project_repository = Bool
def activated(self):
bind_preference(self, 'auto_add_project_repository', 'pychron.entry.sample.auto_add_project_repository')
self.refresh_pis = True
self.refresh_materials = True
self.refresh_projects = True
self.refresh_grainsizes = True
self.dvc.create_session()
self.sample_edit_model.dvc = self.dvc
self._load_lithologies()
def prepare_destroy(self):
self._backup()
self.dvc.close_session()
def import_sample_from_file(self):
from pyface.file_dialog import FileDialog
from pyface.constant import OK
dlg = FileDialog(action='open', default_directory=paths.root_dir,
wildcard=FileDialog.create_wildcard('Excel', ('*.xls', '*.xlsx')))
if dlg.open() == OK:
path = dlg.path
if path:
from pychron.entry.sample_loader import XLSSampleLoader
sample_loader = XLSSampleLoader(dvc=self.dvc)
sample_loader.load(path)
sample_loader.do_import()
def clear(self):
if self.selected_principal_investigators:
for p in self.selected_principal_investigators:
if not p.added:
self._principal_investigators.remove(p)
self._projects = [p for p in self._projects
if p.principal_investigator not in self.selected_principal_investigators]
self._samples = [s for s in self._samples
if s.project.principal_investigator not in self.selected_principal_investigators]
self.selected_principal_investigators = []
if self.selected_projects:
for p in self.selected_projects:
if not p.added:
try:
self._projects.remove(p)
except ValueError:
pass
self._samples = [s for s in self._samples if s.project not in self.selected_projects]
self.selected_projects = []
if self.selected_materials:
for mi in self.selected_materials:
if not mi.added:
try:
self._materials.remove(mi)
except ValueError:
pass
self._samples = [s for s in self._samples if s.material not in self.selected_materials]
self.selected_materials = []
if self.selected_samples:
for ri in self.selected_samples:
if not ri.added:
try:
self._samples.remove(ri)
except ValueError:
pass
self.selected_samples = []
def save(self):
msg = None
if not self.sample_edit_model.save():
self._backup()
if self._save():
msg = 'Samples added to database'
else:
# refresh samples in display table
self._handle_sample_filter()
msg = 'Changes saved to database'
if msg:
self.information_dialog(msg)
def load(self, p):
obj = yload(p)
self._principal_investigators = ps = [PISpec.fromdump(p) for p in obj['principal_investigators'] if
p is not None]
self._materials = ms = [MaterialSpec.fromdump(p) for p in obj['materials'] if p is not None]
self._projects = pps = [ProjectSpec.fromdump(p, ps) for p in obj['projects'] if p is not None]
self._samples = [SampleSpec.fromdump(p, pps, ms) for p in obj['samples'] if p is not None]
def dump(self, p):
"""
only dump if at least one value is not null
:param p:
:return:
"""
obj = self._assemble()
if obj:
with open(p, 'w') as wfile:
yaml.dump(obj, wfile)
# private
def _selected_db_samples_changed(self, new):
if new:
self.sample_edit_model.init()
self.sample_edit_model.set_sample(new[0])
@on_trait_change('sample_filter_attr, sample_filter')
def _handle_sample_filter(self):
if self.sample_filter and self.sample_filter_attr:
sams = self.dvc.get_samples_filter(self.sample_filter_attr, self.sample_filter)
self.db_samples = sams
def _load_lithologies(self):
liths, groups, classes, types = get_lithology_values()
self.lithologies = liths
self.lithology_groups = groups
self.lithology_classes = classes
self.lithology_types = types
def _backup(self):
p = os.path.join(paths.sample_dir, '.last.yaml')
self.dump(p)
def _assemble(self):
ps = [p.todump() for p in self._principal_investigators]
ms = [p.todump() for p in self._materials]
pps = [p.todump() for p in self._projects]
ss = [p.todump() for p in self._samples]
if ps or ms or pps or ss:
obj = {'principal_investigators': ps,
'projects': pps,
'materials': ms,
'samples': ss}
return obj
def _save(self):
if not any((getattr(self, attr) for attr in ('_principal_investigators', '_materials', '_projects',
'_samples'))):
return
self.debug('saving sample info')
dvc = self.dvc
with dvc.session_ctx(use_parent_session=False):
for p in self._principal_investigators:
if dvc.add_principal_investigator(p.name, email=p.email, affiliation=p.affiliation):
p.added = True
dvc.commit()
for p in self._projects:
with dvc.session_ctx(use_parent_session=False):
if p.name.startswith('?'):
if dvc.add_project(p.name, p.principal_investigator.name,
**p.optionals):
dbproject = dvc.get_project(p.name, p.principal_investigator.name)
p.added = True
dvc.commit()
dbproject.name = p.name = '{}{}'.format(p.name[1:-2], dbproject.id)
if self.project.startswith('?'):
self.project = p.name
dvc.commit()
else:
if dvc.add_project(p.name, p.principal_investigator.name, **p.optionals):
p.added = True
dvc.commit()
if self.auto_add_project_repository:
dvc.add_repository(p.name, p.principal_investigator.name, inform=False)
for m in self._materials:
with dvc.session_ctx(use_parent_session=False):
if dvc.add_material(m.name, m.grainsize or None):
m.added = True
dvc.commit()
for s in self._samples:
with dvc.session_ctx(use_parent_session=False):
if not s.name:
self.warning_dialog('A Sample name is required')
continue
if (s.project and not s.project.name) or not s.project:
self.warning_dialog('A project name is required. Skipping {}'.format(s.name))
continue
if (s.material and not s.material.name) or not s.material:
self.warning_dialog('A material is required. Skipping {}'.format(s.name))
continue
if dvc.add_sample(s.name, s.project.name, s.project.principal_investigator.name,
s.material.name,
s.material.grainsize or None,
igsn=s.igsn,
unit=s.unit,
storage_location=s.storage_location,
lithology=s.lithology,
lithology_class=s.lithology_class,
lithology_group=s.lithology_group,
lithology_type=s.lithology_type,
location=s.location,
approximate_age=s.approximate_age,
elevation=s.elevation,
lat=s.lat, lon=s.lon,
note=s.note):
s.added = True
dvc.commit()
self.refresh_table = True
return True
def _check_for_similar_sample(self, s):
dvc = self.dvc
sims = []
sams = dvc.get_fuzzy_samples(s.name)
if sams:
sims = ['{}({})'.format(si.name, si.project.name) for si in sams]
return ','.join(sims)
def _principal_investigator_factory(self):
p = PISpec(name=self.principal_investigator,
email=self.email,
affiliation=self.affiliation)
self._principal_investigators.append(p)
return p
def _get_principal_investigator_spec(self):
for p in self._principal_investigators:
if p.name == self.principal_investigator:
return p
else:
p = self._principal_investigator_factory()
return p
def _get_project_spec(self):
if self.project:
pspec = self._get_principal_investigator_spec()
for p in self._projects:
if p.name == self.project and p.principal_investigator.name == pspec.name:
return p
else:
p = self._new_project_spec(pspec)
return p
def _new_project_spec(self, principal_investigator_spec):
project_spec = ProjectSpec(name=self.project,
principal_investigator=principal_investigator_spec)
for attr in ('lab_contact', 'comment', 'institution'):
name = 'project_{}'.format(attr)
setattr(project_spec, attr, getattr(self, name))
if not getattr(self, 'lock_{}'.format(name)):
setattr(self, name, '')
self._projects.append(project_spec)
return project_spec
def _get_material_spec(self):
if self.material:
for p in self._materials:
if p.name == self.material:
if not self.grainsize or self.grainsize == p.grainsize:
return p
else:
m = MaterialSpec(name=self.material, grainsize=self.grainsize)
self._materials.append(m)
return m
# handlers
def _clear_sample_attributes_button_fired(self):
self.storage_location = ''
self.lithology = ''
self.lithology_class = ''
self.lithology_group = ''
self.lithology_type = ''
self.lat = 0
self.lon = 0
self.location = ''
self.elevation = 0
self.igsn = ''
self.note = ''
self.approximate_age = 0
self.unit = ''
def _configure_pi_button_fired(self):
v = okcancel_view(VGroup(VGroup(UItem('principal_investigator'),
label='Name', show_border=True),
VGroup(Item('affiliation', label='Affiliation'),
Item('email', label='Email'),
label='Optional', show_border=True)),
title='Set Principal Investigator Attributes')
self.edit_traits(view=v)
def _configure_sample_button_fired(self):
v = okcancel_view(VGroup(HGroup(icon_button_editor('clear_sample_attributes_button', 'clear')),
VGroup(UItem('sample'),
label='Name', show_border=True),
VGroup(Item('lat', label='Latitude'),
Item('lon', label='Longitude'),
Item('location'),
Item('elevation'),
label='Location', show_border=True),
VGroup(Item('lithology', editor=EnumEditor(name='lithologies')),
Item('lithology_class', label='Class',
editor=EnumEditor(name='lithology_classes')),
Item('lithology_group', label='Group',
editor=EnumEditor(name='lithology_groups')),
Item('lithology_type', label='Type', editor=EnumEditor(name='lithology_types')),
Item('approximate_age', label='Approx. Age (Ma)'),
Item('storage_location'),
show_border=True)),
title='Set Sample Attributes')
self.edit_traits(view=v)
def _add_sample_button_fired(self):
if self.sample:
material_spec = self._get_material_spec()
if not material_spec or not material_spec.name:
self.information_dialog('Please enter a material for this sample')
return
project_spec = self._get_project_spec()
if not project_spec or not project_spec.name:
self.information_dialog('Please enter a project for this sample')
return
kw = {'project': project_spec, 'material': material_spec}
for attr in (('name', 'sample'),
'lat', 'lon', 'igsn', 'note', 'unit',
'lithology', 'lithology_class', 'lithology_type', 'lithology_group'):
if isinstance(attr, tuple):
specattr, attr = attr
else:
specattr, attr = attr, attr
kw[specattr] = getattr(self, attr)
add = True
spec = SampleSpec(**kw)
sim = self._check_for_similar_sample(spec)
if sim:
add = False
# a similar sample exists
msg = '''A similar sample already exists.
Yes= Add anyways
No = Skip sample
Current Sample: {}
Existing Sample: {}'''.format(spec.name, sim)
if self.confirmation_dialog(msg):
add = True
if add:
self._samples.append(spec)
self._add_sample_enabled = False
self._backup()
def _add_project_button_fired(self):
if self.project:
pispec = self._get_principal_investigator_spec()
for p in self._projects:
if p.name == self.project and p.principal_investigator.name == pispec.name:
break
else:
self._new_project_spec(pispec)
self._backup()
def _add_material_button_fired(self):
if self.material:
from pychron.entry.dvc_import.model import Mapper
mapper = Mapper()
nm = mapper.material(self.material)
if nm != self.material:
msg = 'Pychron suggests changing "{}" to "{}". \n\n' \
'Would you like to continue?'.format(self.material, nm)
if not self.confirmation_dialog(msg):
return
self.material = nm
for m in self._materials:
if m.name == self.material and m.grainsize == self.grainsize:
break
else:
self._materials.append(MaterialSpec(name=self.material,
grainsize=self.grainsize))
self._backup()
def _add_principal_investigator_button_fired(self):
if self.principal_investigator:
for p in self._principal_investigators:
if p.name == self.principal_investigator:
break
else:
self._principal_investigator_factory()
self._backup()
def _generate_project_button_fired(self):
piname = self.principal_investigator
if ',' in piname:
piname = piname.split(',')[0]
self.project = '?{}{:03n}'.format(piname, self._default_project_count)
self._default_project_count += 1
def _set_optionals_button_fired(self):
self.lab_contacts = self.dvc.get_usernames()
from pychron.entry.tasks.sample.project_optionals_view import ProjectOptionalsView
v = ProjectOptionalsView(model=self)
v.edit_traits()
def _sample_changed(self):
self._add_sample_enabled = True
@cached_property
def _get_project_enabled(self):
return bool(self.principal_investigator)
@cached_property
def _get_add_sample_enabled(self):
return bool(self.sample) and self._add_sample_enabled
@cached_property
def _get_sample_enabled(self):
return bool(self.material) and bool(self.project) and bool(self.principal_investigator)
@cached_property
def _get_principal_investigators(self):
with self.dvc.session_ctx():
return self.dvc.get_principal_investigator_names()
@cached_property
def _get_materials(self):
with self.dvc.session_ctx():
ms = self.dvc.get_material_names()
return ms
@cached_property
def _get_projects(self):
with self.dvc.session_ctx():
ps = self.dvc.get_project_names()
return ps
@cached_property
def _get_grainsizes(self):
with self.dvc.session_ctx():
gs = [''] + self.dvc.get_grainsizes()
return gs
# ============= EOF =============================================
|
|
"""Test the Tradfri config flow."""
from unittest.mock import patch
import pytest
from homeassistant import data_entry_flow
from homeassistant.components.tradfri import config_flow
from tests.common import mock_coro, MockConfigEntry
@pytest.fixture
def mock_auth():
"""Mock authenticate."""
with patch('homeassistant.components.tradfri.config_flow.'
'authenticate') as mock_auth:
yield mock_auth
@pytest.fixture
def mock_entry_setup():
"""Mock entry setup."""
with patch('homeassistant.components.tradfri.'
'async_setup_entry') as mock_setup:
mock_setup.return_value = mock_coro(True)
yield mock_setup
async def test_user_connection_successful(hass, mock_auth, mock_entry_setup):
"""Test a successful connection."""
mock_auth.side_effect = lambda hass, host, code: mock_coro({
'host': host,
'gateway_id': 'bla'
})
flow = await hass.config_entries.flow.async_init(
'tradfri', context={'source': 'user'})
result = await hass.config_entries.flow.async_configure(flow['flow_id'], {
'host': '123.123.123.123',
'security_code': 'abcd',
})
assert len(mock_entry_setup.mock_calls) == 1
assert result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result['result'].data == {
'host': '123.123.123.123',
'gateway_id': 'bla',
'import_groups': False
}
async def test_user_connection_timeout(hass, mock_auth, mock_entry_setup):
"""Test a connection timeout."""
mock_auth.side_effect = config_flow.AuthError('timeout')
flow = await hass.config_entries.flow.async_init(
'tradfri', context={'source': 'user'})
result = await hass.config_entries.flow.async_configure(flow['flow_id'], {
'host': '127.0.0.1',
'security_code': 'abcd',
})
assert len(mock_entry_setup.mock_calls) == 0
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
assert result['errors'] == {
'base': 'timeout'
}
async def test_user_connection_bad_key(hass, mock_auth, mock_entry_setup):
"""Test a connection with bad key."""
mock_auth.side_effect = config_flow.AuthError('invalid_security_code')
flow = await hass.config_entries.flow.async_init(
'tradfri', context={'source': 'user'})
result = await hass.config_entries.flow.async_configure(flow['flow_id'], {
'host': '127.0.0.1',
'security_code': 'abcd',
})
assert len(mock_entry_setup.mock_calls) == 0
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
assert result['errors'] == {
'security_code': 'invalid_security_code'
}
async def test_discovery_connection(hass, mock_auth, mock_entry_setup):
"""Test a connection via discovery."""
mock_auth.side_effect = lambda hass, host, code: mock_coro({
'host': host,
'gateway_id': 'bla'
})
flow = await hass.config_entries.flow.async_init(
'tradfri', context={'source': 'zeroconf'}, data={
'host': '123.123.123.123'
})
result = await hass.config_entries.flow.async_configure(flow['flow_id'], {
'security_code': 'abcd',
})
assert len(mock_entry_setup.mock_calls) == 1
assert result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result['result'].data == {
'host': '123.123.123.123',
'gateway_id': 'bla',
'import_groups': False
}
async def test_import_connection(hass, mock_auth, mock_entry_setup):
"""Test a connection via import."""
mock_auth.side_effect = lambda hass, host, code: mock_coro({
'host': host,
'gateway_id': 'bla',
'identity': 'mock-iden',
'key': 'mock-key',
})
flow = await hass.config_entries.flow.async_init(
'tradfri', context={'source': 'import'}, data={
'host': '123.123.123.123',
'import_groups': True
})
result = await hass.config_entries.flow.async_configure(flow['flow_id'], {
'security_code': 'abcd',
})
assert result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result['result'].data == {
'host': '123.123.123.123',
'gateway_id': 'bla',
'identity': 'mock-iden',
'key': 'mock-key',
'import_groups': True
}
assert len(mock_entry_setup.mock_calls) == 1
async def test_import_connection_no_groups(hass, mock_auth, mock_entry_setup):
"""Test a connection via import and no groups allowed."""
mock_auth.side_effect = lambda hass, host, code: mock_coro({
'host': host,
'gateway_id': 'bla',
'identity': 'mock-iden',
'key': 'mock-key',
})
flow = await hass.config_entries.flow.async_init(
'tradfri', context={'source': 'import'}, data={
'host': '123.123.123.123',
'import_groups': False
})
result = await hass.config_entries.flow.async_configure(flow['flow_id'], {
'security_code': 'abcd',
})
assert result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result['result'].data == {
'host': '123.123.123.123',
'gateway_id': 'bla',
'identity': 'mock-iden',
'key': 'mock-key',
'import_groups': False
}
assert len(mock_entry_setup.mock_calls) == 1
async def test_import_connection_legacy(hass, mock_gateway_info,
mock_entry_setup):
"""Test a connection via import."""
mock_gateway_info.side_effect = \
lambda hass, host, identity, key: mock_coro({
'host': host,
'identity': identity,
'key': key,
'gateway_id': 'mock-gateway'
})
result = await hass.config_entries.flow.async_init(
'tradfri', context={'source': 'import'}, data={
'host': '123.123.123.123',
'key': 'mock-key',
'import_groups': True
})
assert result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result['result'].data == {
'host': '123.123.123.123',
'gateway_id': 'mock-gateway',
'identity': 'homeassistant',
'key': 'mock-key',
'import_groups': True
}
assert len(mock_gateway_info.mock_calls) == 1
assert len(mock_entry_setup.mock_calls) == 1
async def test_import_connection_legacy_no_groups(
hass, mock_gateway_info, mock_entry_setup):
"""Test a connection via legacy import and no groups allowed."""
mock_gateway_info.side_effect = \
lambda hass, host, identity, key: mock_coro({
'host': host,
'identity': identity,
'key': key,
'gateway_id': 'mock-gateway'
})
result = await hass.config_entries.flow.async_init(
'tradfri', context={'source': 'import'}, data={
'host': '123.123.123.123',
'key': 'mock-key',
'import_groups': False
})
assert result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result['result'].data == {
'host': '123.123.123.123',
'gateway_id': 'mock-gateway',
'identity': 'homeassistant',
'key': 'mock-key',
'import_groups': False
}
assert len(mock_gateway_info.mock_calls) == 1
assert len(mock_entry_setup.mock_calls) == 1
async def test_discovery_duplicate_aborted(hass):
"""Test a duplicate discovery host is ignored."""
MockConfigEntry(
domain='tradfri',
data={'host': 'some-host'}
).add_to_hass(hass)
flow = await hass.config_entries.flow.async_init(
'tradfri', context={'source': 'zeroconf'}, data={
'host': 'some-host'
})
assert flow['type'] == data_entry_flow.RESULT_TYPE_ABORT
assert flow['reason'] == 'already_configured'
async def test_import_duplicate_aborted(hass):
"""Test a duplicate import host is ignored."""
MockConfigEntry(
domain='tradfri',
data={'host': 'some-host'}
).add_to_hass(hass)
flow = await hass.config_entries.flow.async_init(
'tradfri', context={'source': 'import'}, data={
'host': 'some-host'
})
assert flow['type'] == data_entry_flow.RESULT_TYPE_ABORT
assert flow['reason'] == 'already_configured'
async def test_duplicate_discovery(hass, mock_auth, mock_entry_setup):
"""Test a duplicate discovery in progress is ignored."""
result = await hass.config_entries.flow.async_init(
'tradfri', context={'source': 'zeroconf'}, data={
'host': '123.123.123.123'
})
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
result2 = await hass.config_entries.flow.async_init(
'tradfri', context={'source': 'zeroconf'}, data={
'host': '123.123.123.123'
})
assert result2['type'] == data_entry_flow.RESULT_TYPE_ABORT
|
|
# Copyright 2015 VMware.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import re
from django.core.urlresolvers import reverse
from django import template
from django.utils.text import slugify
from django.utils.translation import ugettext_lazy as _
from horizon import forms
from horizon import workflows
import six
from congress_dashboard.api import congress
COLUMN_FORMAT = '<datasource>%s<table> <column>' % congress.TABLE_SEPARATOR
COLUMN_PATTERN = r'\s*[\w.]+%s[\w.]+\s+[\w.]+\s*$' % congress.TABLE_SEPARATOR
COLUMN_PATTERN_ERROR = 'Column name must be in "%s" format' % COLUMN_FORMAT
TABLE_FORMAT = '<datasource>%s<table>' % congress.TABLE_SEPARATOR
TABLE_PATTERN = r'\s*[\w.]+%s[\w.]+\s*$' % congress.TABLE_SEPARATOR
TABLE_PATTERN_ERROR = 'Table name must be in "%s" format' % TABLE_FORMAT
LOG = logging.getLogger(__name__)
class CreateOutputAction(workflows.Action):
policy_name = forms.CharField(widget=forms.HiddenInput(), required=False)
rule_name = forms.CharField(label=_('Rule Name'), max_length=255,
initial='', required=False)
comment = forms.CharField(label=_('Rule Comment'), initial='',
required=False)
policy_table = forms.CharField(label=_("Policy Table Name"), initial='',
max_length=255)
policy_columns = forms.CharField(
label=_('Policy Table Columns'), initial='',
help_text=_('Name the columns in the output table, one per textbox.'))
failure_url = 'horizon:admin:policies:detail'
def __init__(self, request, context, *args, **kwargs):
super(CreateOutputAction, self).__init__(request, context, *args,
**kwargs)
self.fields['policy_name'].initial = context['policy_name']
class Meta(object):
name = _('Output')
class CreateOutput(workflows.Step):
action_class = CreateOutputAction
contributes = ('policy_name', 'rule_name', 'comment', 'policy_table',
'policy_columns')
template_name = 'admin/policies/rules/_create_output.html'
help_text = _('Information about the rule and the policy table '
'being created.')
def render(self):
# Overriding parent method to add extra template context variables.
step_template = template.loader.get_template(self.template_name)
extra_context = {"form": self.action,
"step": self}
context = template.RequestContext(self.workflow.request, extra_context)
# Data needed to re-create policy column inputs after an error occurs.
policy_columns = self.workflow.request.POST.get('policy_columns', '')
columns_list = policy_columns.split(', ')
context['policy_columns_list'] = columns_list
context['policy_columns_count'] = len(columns_list)
return step_template.render(context)
class CreateConditionsAction(workflows.Action):
mappings = forms.CharField(label=_('Policy table columns:'), initial='')
class Meta(object):
name = _('Conditions')
class CreateConditions(workflows.Step):
action_class = CreateConditionsAction
contributes = ('mappings',)
template_name = 'admin/policies/rules/_create_conditions.html'
help_text = _('Sources from which the output policy table will get its '
'data, plus any constraints.')
def _compare_mapping_columns(self, x, y):
# x = "mapping_column_<int>", y = "mapping_column_<int>"
return cmp(int(x.split('_')[-1]), int(y.split('_')[-1]))
def render(self):
# Overriding parent method to add extra template context variables.
step_template = template.loader.get_template(self.template_name)
extra_context = {"form": self.action,
"step": self}
context = template.RequestContext(self.workflow.request, extra_context)
# Data needed to re-create mapping column inputs after an error occurs.
post = self.workflow.request.POST
mappings = []
policy_columns = post.get('policy_columns')
policy_columns_list = []
# Policy column to data source mappings.
if policy_columns:
policy_columns_list = policy_columns.split(', ')
mapping_columns = []
for param, value in post.items():
if (param.startswith('mapping_column_') and
param != 'mapping_column_0'):
mapping_columns.append(param)
# Mapping columns should be in the same order as the policy columns
# above to which they match.
sorted_mapping_columns = sorted(mapping_columns,
cmp=self._compare_mapping_columns)
mapping_columns_list = [post.get(c)
for c in sorted_mapping_columns]
mappings = zip(policy_columns_list, mapping_columns_list)
context['mappings'] = mappings
# Add one for the hidden template row.
context['mappings_count'] = len(mappings) + 1
# Data needed to re-create join, negation, and alias inputs.
joins = []
negations = []
aliases = []
for param, value in post.items():
if param.startswith('join_left_') and value:
join_num = param.split('_')[-1]
other_value = post.get('join_right_%s' % join_num)
join_op = post.get('join_op_%s' % join_num)
if other_value and join_op is not None:
joins.append((value, join_op, other_value))
elif param.startswith('negation_value_') and value:
negation_num = param.split('_')[-1]
negation_column = post.get('negation_column_%s' %
negation_num)
if negation_column:
negations.append((value, negation_column))
elif param.startswith('alias_column_') and value:
alias_num = param.split('_')[-1]
alias_name = post.get('alias_name_%s' % alias_num)
if alias_name:
aliases.append((value, alias_name))
# Make sure there's at least one empty row.
context['joins'] = joins or [('', '')]
context['joins_count'] = len(joins) or 1
context['negations'] = negations or [('', '')]
context['negations_count'] = len(negations) or 1
context['aliases'] = aliases or [('', '')]
context['aliases_count'] = len(aliases) or 1
# Input validation attributes.
context['column_pattern'] = COLUMN_PATTERN
context['column_pattern_error'] = COLUMN_PATTERN_ERROR
context['table_pattern'] = TABLE_PATTERN
context['table_pattern_error'] = TABLE_PATTERN_ERROR
return step_template.render(context)
def _underscore_slugify(name):
# Slugify given string, except using undesrscores instead of hyphens.
return slugify(name).replace('-', '_')
class CreateRule(workflows.Workflow):
slug = 'create_rule'
name = _('Create Rule')
finalize_button_name = _('Create')
success_message = _('Created rule%(rule_name)s.%(error)s')
failure_message = _('Unable to create rule%(rule_name)s: %(error)s')
default_steps = (CreateOutput, CreateConditions)
wizard = True
def get_success_url(self):
policy_name = self.context.get('policy_name')
return reverse('horizon:admin:policies:detail', args=(policy_name,))
def get_failure_url(self):
policy_name = self.context.get('policy_name')
return reverse('horizon:admin:policies:detail', args=(policy_name,))
def format_status_message(self, message):
rule_name = self.context.get('rule_name')
name_str = ''
if rule_name:
name_str = ' "%s"' % rule_name
else:
rule_id = self.context.get('rule_id')
if rule_id:
name_str = ' %s' % rule_id
return message % {'rule_name': name_str,
'error': self.context.get('error', '')}
def _get_schema_columns(self, request, table):
table_parts = table.split(congress.TABLE_SEPARATOR)
datasource = table_parts[0]
table_name = table_parts[1]
try:
schema = congress.datasource_table_schema_get_by_name(
request, datasource, table_name)
except Exception:
# Maybe it's a policy table, not a service.
try:
schema = congress.policy_table_schema_get(
request, datasource, table_name)
except Exception as e:
# Nope.
LOG.error('Unable to get schema for table "%s", '
'datasource "%s": %s',
table_name, datasource, str(e))
return str(e)
return schema['columns']
def handle(self, request, data):
policy_name = data['policy_name']
username = request.user.username
project_name = request.user.tenant_name
# Output data.
rule_name = data.get('rule_name')
comment = data.get('comment')
policy_table = _underscore_slugify(data['policy_table'])
if not data['policy_columns']:
self.context['error'] = 'Missing policy table columns'
return False
policy_columns = data['policy_columns'].split(', ')
# Conditions data.
if not data['mappings']:
self.context['error'] = ('Missing data source column mappings for '
'policy table columns')
return False
mapping_columns = [c.strip() for c in data['mappings'].split(', ')]
if len(policy_columns) != len(mapping_columns):
self.context['error'] = ('Missing data source column mappings for '
'some policy table columns')
return False
# Map columns used in rule's head. Every column in the head must also
# appear in the body.
head_columns = [_underscore_slugify(c).strip() for c in policy_columns]
column_variables = dict(zip(mapping_columns, head_columns))
# All tables needed in the body.
body_tables = set()
negation_tables = set()
# Keep track of the tables from the head that need to be in the body.
for column in mapping_columns:
if re.match(COLUMN_PATTERN, column) is None:
self.context['error'] = '%s: %s' % (COLUMN_PATTERN_ERROR,
column)
return False
table = column.split()[0]
body_tables.add(table)
# Make sure columns that are given a significant variable name are
# unique names by adding name_count as a suffix.
name_count = 0
for param, value in request.POST.items():
if param.startswith('join_left_') and value:
if re.match(COLUMN_PATTERN, value) is None:
self.context['error'] = '%s: %s' % (COLUMN_PATTERN_ERROR,
value)
return False
value = value.strip()
# Get operator and other column used in join.
join_num = param.split('_')[-1]
join_op = request.POST.get('join_op_%s' % join_num)
other_value = request.POST.get('join_right_%s' % join_num)
other_value = other_value.strip()
if join_op == '=':
try:
# Check if static value is a number, but keep it as a
# string, to be used later.
int(other_value)
column_variables[value] = other_value
except ValueError:
# Pass it along as a quoted string.
column_variables[value] = '"%s"' % other_value
else:
# Join between two columns.
if not other_value:
# Ignore incomplete pairing.
continue
if re.match(COLUMN_PATTERN, other_value) is None:
self.context['error'] = ('%s: %s' %
(COLUMN_PATTERN_ERROR,
other_value))
return False
# Tables used in the join need to be in the body.
value_parts = value.split()
body_tables.add(value_parts[0])
body_tables.add(other_value.split()[0])
# Arbitrarily name the right column the same as the left.
column_name = value_parts[1]
# Use existing variable name if there is already one for
# either column in this join.
if other_value in column_variables:
column_variables[value] = column_variables[other_value]
elif value in column_variables:
column_variables[other_value] = column_variables[value]
else:
variable = '%s_%s' % (column_name, name_count)
name_count += 1
column_variables[value] = variable
column_variables[other_value] = variable
elif param.startswith('negation_value_') and value:
if re.match(COLUMN_PATTERN, value) is None:
self.context['error'] = '%s: %s' % (COLUMN_PATTERN_ERROR,
value)
return False
value = value.strip()
# Get operator and other column used in negation.
negation_num = param.split('_')[-1]
negation_column = request.POST.get('negation_column_%s' %
negation_num)
if not negation_column:
# Ignore incomplete pairing.
continue
if re.match(COLUMN_PATTERN, negation_column) is None:
self.context['error'] = '%s: %s' % (COLUMN_PATTERN_ERROR,
negation_column)
return False
negation_column = negation_column.strip()
# Tables for columns referenced by the negation table must
# appear in the body.
value_parts = value.split()
body_tables.add(value_parts[0])
negation_tables.add(negation_column.split()[0])
# Use existing variable name if there is already one for either
# column in this negation.
if negation_column in column_variables:
column_variables[value] = column_variables[negation_column]
elif value in column_variables:
column_variables[negation_column] = column_variables[value]
else:
# Arbitrarily name the negated table's column the same as
# the value column.
column_name = value_parts[1]
variable = '%s_%s' % (column_name, name_count)
name_count += 1
column_variables[value] = variable
column_variables[negation_column] = variable
LOG.debug('column_variables for rule: %s', column_variables)
# Form the literals for all the tables needed in the body. Make sure
# column that have no relation to any other columns are given a unique
# variable name, using column_count.
column_count = 0
literals = []
for table in body_tables:
# Replace column names with variable names that join related
# columns together.
columns = self._get_schema_columns(request, table)
if isinstance(columns, six.string_types):
self.context['error'] = columns
return False
literal_columns = []
if columns:
for column in columns:
table_column = '%s %s' % (table, column['name'])
literal_columns.append(
column_variables.get(table_column, 'col_%s' %
column_count))
column_count += 1
literals.append('%s(%s)' % (table, ', '.join(literal_columns)))
else:
# Just the table name, such as for classification:true.
literals.append(table)
# Form the negated tables.
for table in negation_tables:
columns = self._get_schema_columns(request, table)
if isinstance(columns, six.string_types):
self.context['error'] = columns
return False
literal_columns = []
num_variables = 0
for column in columns:
table_column = '%s %s' % (table, column['name'])
if table_column in column_variables:
literal_columns.append(column_variables[table_column])
num_variables += 1
else:
literal_columns.append('col_%s' % column_count)
column_count += 1
literal = 'not %s(%s)' % (table, ', '.join(literal_columns))
literals.append(literal)
# Every column in the negated table must appear in a non-negated
# literal in the body. If there are some variables that have not
# been used elsewhere, repeat the literal in its non-negated form.
if num_variables != len(columns) and table not in body_tables:
literals.append(literal.replace('not ', ''))
# All together now.
rule = '%s(%s) %s %s' % (policy_table, ', '.join(head_columns),
congress.RULE_SEPARATOR, ', '.join(literals))
LOG.info('User %s creating policy "%s" rule "%s" in tenant %s: %s',
username, policy_name, rule_name, project_name, rule)
try:
params = {
'name': rule_name,
'comment': comment,
'rule': rule,
}
rule = congress.policy_rule_create(request, policy_name,
body=params)
LOG.info('Created rule %s', rule['id'])
self.context['rule_id'] = rule['id']
except Exception as e:
LOG.error('Error creating policy "%s" rule "%s": %s',
policy_name, rule_name, str(e))
self.context['error'] = str(e)
return False
return True
|
|
# -*- coding: utf-8 -*-
"""
Runtime configuration and launch logic for third party tools.
This module contains logic to configure and launch third-party tools;
setting environment variables and configuration files as required to
assure interoperability with DA norms and processes.
---
type:
python_module
validation_level:
v00_minimum
protection:
k00_public
copyright:
"Copyright 2016 High Integrity Artificial Intelligence Systems"
license:
"Licensed under the Apache License, Version 2.0 (the License);
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an AS IS BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."
...
"""
import copy
import logging
import os
import sys
import subprocess
import da.lwc.discover
import da.lwc.env
# -----------------------------------------------------------------------------
def python2(arglist, # pylint: disable=R0913
stdout = None,
stderr = None,
cwd = None,
env = None,
dirpath_lwc_root = None):
"""
Launch the python 2 interpreter in a subprocess.
Set the PYTHONPATH environment variable so DA dependencies can be used.
"""
return _python(
arglist,
dependency_id = 'pyrun2',
libraries_interface = 'lib_python2',
stdout = stdout,
stderr = stderr,
cwd = cwd,
env = env,
dirpath_lwc_root = dirpath_lwc_root)
# -----------------------------------------------------------------------------
def python3(arglist, # pylint: disable=R0913
stdout = None,
stderr = None,
cwd = None,
env = None,
dirpath_lwc_root = None):
"""
Launch the python 3 interpreter in a subprocess.
Set the PYTHONPATH environment variable so DA dependencies can be used.
"""
return _python(
arglist,
dependency_id = 'pyrun3',
libraries_interface = 'lib_python3',
stdout = stdout,
stderr = stderr,
cwd = cwd,
env = env,
dirpath_lwc_root = dirpath_lwc_root)
# -----------------------------------------------------------------------------
def _python(arglist, # pylint: disable=R0913
dependency_id,
libraries_interface,
stdout = None,
stderr = None,
cwd = None,
env = None,
dirpath_lwc_root = None):
"""
Launch the python interpreter in a subprocess.
Set the PYTHONPATH environment variable so DA dependencies can be used.
"""
if env is None:
python_import_path = da.lwc.env.python_import_path(
iface_name = libraries_interface,
dirpath_lwc_root = dirpath_lwc_root)
env = copy.copy(os.environ)
env['PYTHONPATH'] = os.pathsep.join(python_import_path)
# env['PATH'] = ''
filepath_python = os.path.join(da.lwc.env.cli_path(
dependency_id = dependency_id,
application_name = 'pyrun',
dirpath_lwc_root = dirpath_lwc_root))
try:
return _subprocess_call(
[filepath_python] + arglist,
stdout = stdout,
stderr = stderr,
cwd = cwd,
env = env)
except OSError:
# TODO: This is the exception that you get when you don't have an
# environment -- try to print a helpful error message here.
raise
# -----------------------------------------------------------------------------
def bash(dirpath_lwc_root = None):
"""
Launch the bash interpreter in a subprocess.
Set the PATH and PYTHONPATH environment variables so DA dependencies can
be used.
"""
dirpath_env = da.lwc.discover.path(
'current_env',
dirpath_lwc_root = dirpath_lwc_root)
register = da.lwc.env.dependencies_register(
dirpath_lwc_root = dirpath_lwc_root)
path_cli = []
for dependency_data in register.values():
for app_name in dependency_data['cli']:
relpath_app = dependency_data['cli'][app_name]
filepath_app = os.path.join(dirpath_env,
dependency_data['dirname'],
dependency_data['policy'],
relpath_app)
dirpath_app = os.path.normpath(os.path.dirname(filepath_app))
path_cli.append(dirpath_app)
env = copy.copy(os.environ)
env['PYTHONPATH'] = os.pathsep.join(sys.path)
env['PATH'] = os.pathsep.join([env['PATH']] + path_cli)
bash_command = ['/bin/bash']
status = _subprocess_call(bash_command, env = env)
return status
# -----------------------------------------------------------------------------
def subl(filepath = None, line_number = 1, dirpath_lwc_root = None):
"""
Open the specified file in Sublime Text.
"""
if dirpath_lwc_root is None:
dirpath_lwc_root = da.lwc.discover.path(key = 'root')
filepath_subl = da.lwc.env.cli_path(
dependency_id = 'subl',
application_name = 'sublime_text',
dirpath_lwc_root = dirpath_lwc_root)
if filepath is None:
logging.debug('Run sublime text')
return _subprocess_call([filepath_subl])
# The stack trace that is retrieved during the
# handling of an Exception thrown from within
# one of PyRun's built-in libraries may have
# a stack trace that contains filenames of the
# form "<pyrun>/filename.py". It is not possible
# to open such files in the editor.
#
# Although this is an anomalous condition, we
# do not expect the developer to take any
# remedial action when it is encountered. We
# therefore refrain from throwing an exception
# and instead simply log the fact that it has
# occurred and return normally.
#
# It is conceivable that other similar conditions
# may be encountered, so as a piece of defensive
# programming, we also take the same action if
# the filepath parameter does not indicate a
# valid file.
if filepath.startswith('<pyrun>') or not os.path.isfile(filepath):
logging.warning('Cannot open file: "%s"', filepath)
return 1
argument = '{filepath}:{line_number}'.format(
filepath = filepath,
line_number = line_number)
logging.debug('Open file in sublime text: %s', argument)
return _subprocess_call([filepath_subl, '-a', argument])
# -----------------------------------------------------------------------------
def _subprocess_call(*args, **kwargs):
"""
Wrap subprocess.call so we have somewhere we can monkey-patch during test.
"""
return subprocess.call(*args, **kwargs)
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import six
from oslo_utils import timeutils
from senlin.common import exception
from senlin.common.i18n import _
from senlin.common import utils as common_utils
from senlin.db.sqlalchemy import api as db_api
from senlin.engine import node as nodem
from senlin.profiles import base as profiles_base
from senlin.tests.unit.common import base
from senlin.tests.unit.common import utils
class TestNode(base.SenlinTestCase):
def setUp(self):
super(TestNode, self).setUp()
self.context = utils.dummy_context(project='node_test_project')
self.profile = self._create_profile('PROFILE_ID')
self.cluster = self._create_cluster('CLUSTER_ID')
def _create_profile(self, profile_id):
values = {
'id': profile_id,
'type': 'os.nova.server-1.0',
'name': 'test-profile',
'spec': {
'type': 'os.nova.server',
'version': '1.0',
},
'user': self.context.user,
'project': self.context.project
}
return db_api.profile_create(self.context, values)
def _create_cluster(self, cluster_id):
values = {
'id': cluster_id,
'profile_id': self.profile.id,
'name': 'test-cluster',
'user': self.context.user,
'project': self.context.project,
'next_index': 1,
}
return db_api.cluster_create(self.context, values)
def _create_node(self, node_id):
values = {
'id': node_id,
'profile_id': self.profile.id,
'cluster_id': self.cluster.id,
'project': self.context.project,
'name': 'node1',
'role': 'test_node',
}
return db_api.node_create(self.context, values)
def test_node_init(self):
node = nodem.Node('node1', self.profile.id, self.cluster.id,
role='first_node')
self.assertIsNone(node.id)
self.assertEqual('node1', node.name)
self.assertEqual('', node.physical_id)
self.assertEqual(self.profile.id, node.profile_id)
self.assertEqual('', node.user)
self.assertEqual('', node.project)
self.assertEqual('', node.domain)
self.assertEqual(self.cluster.id, node.cluster_id)
self.assertEqual(-1, node.index)
self.assertEqual('first_node', node.role)
self.assertIsNone(node.init_at)
self.assertIsNone(node.created_at)
self.assertIsNone(node.updated_at)
self.assertEqual('INIT', node.status)
self.assertEqual('Initializing', node.status_reason)
self.assertEqual({}, node.data)
self.assertEqual({}, node.metadata)
self.assertEqual({}, node.rt)
def test_node_init_random_name(self):
node = nodem.Node(None, self.profile.id, None)
self.assertIsNotNone(node.name)
self.assertEqual(13, len(node.name))
def test_node_store_init(self):
node = nodem.Node('node1', self.profile.id, self.cluster.id,
self.context, role='first_node',
index=1)
self.assertIsNone(node.id)
node_id = node.store(self.context)
self.assertIsNotNone(node_id)
node_info = db_api.node_get(self.context, node_id)
self.assertIsNotNone(node_info)
self.assertEqual('node1', node_info.name)
self.assertEqual('', node_info.physical_id)
self.assertEqual(self.cluster.id, node_info.cluster_id)
self.assertEqual(self.profile.id, node_info.profile_id)
self.assertEqual(self.context.user, node_info.user)
self.assertEqual(self.context.project, node_info.project)
self.assertEqual(self.context.domain, node_info.domain)
self.assertEqual(1, node_info.index)
self.assertEqual('first_node', node.role)
self.assertIsNotNone(node_info.init_at)
self.assertIsNone(node_info.created_at)
self.assertIsNone(node_info.updated_at)
self.assertEqual('INIT', node_info.status)
self.assertEqual('Initializing', node_info.status_reason)
self.assertEqual({}, node_info.meta_data)
self.assertEqual({}, node_info.data)
def test_node_store_update(self):
node = nodem.Node('node1', self.profile.id, None)
node_id = node.store(self.context)
node.name = 'new_name'
new_node_id = node.store(self.context)
self.assertEqual(node_id, new_node_id)
def test_node_load(self):
ex = self.assertRaises(exception.NodeNotFound,
nodem.Node.load,
self.context, 'non-existent', None)
self.assertEqual('The node (non-existent) could not be found.',
six.text_type(ex))
node = self._create_node('NODE_ID')
node_info = nodem.Node.load(self.context, 'NODE_ID')
self.assertEqual(node.id, node_info.id)
self.assertEqual(node.name, node_info.name)
self.assertEqual(node.physical_id, node_info.physical_id)
self.assertEqual(node.cluster_id, node_info.cluster_id)
self.assertEqual(node.profile_id, node_info.profile_id)
self.assertEqual(node.user, node_info.user)
self.assertEqual(node.project, node_info.project)
self.assertEqual(node.domain, node_info.domain)
self.assertEqual(node.index, node_info.index)
self.assertEqual(node.role, node_info.role)
self.assertEqual(node.init_at, node_info.init_at)
self.assertEqual(node.created_at, node_info.created_at)
self.assertEqual(node.updated_at, node_info.updated_at)
self.assertEqual(node.status, node_info.status)
self.assertEqual(node.status_reason, node_info.status_reason)
self.assertEqual(node.meta_data, node_info.metadata)
self.assertEqual(node.data, node_info.data)
self.assertEqual(self.profile.name, node_info.rt['profile'].name)
def test_node_load_diff_project(self):
self._create_node('NODE_ID')
new_ctx = utils.dummy_context(project='a-different-project')
ex = self.assertRaises(exception.NodeNotFound,
nodem.Node.load,
new_ctx, 'NODE_ID', None)
self.assertEqual('The node (NODE_ID) could not be found.',
six.text_type(ex))
res = nodem.Node.load(new_ctx, 'NODE_ID', project_safe=False)
self.assertIsNotNone(res)
self.assertEqual('NODE_ID', res.id)
def test_node_load_all(self):
node_info = nodem.Node.load_all(self.context)
self.assertEqual([], [c for c in node_info])
node1 = self._create_node('NODE1')
node2 = self._create_node('NODE2')
# NOTE: we don't test all other parameters because the db api tests
# already covered that
nodes = nodem.Node.load_all(self.context)
self.assertEqual(2, len(nodes))
self.assertEqual(node1.id, nodes[0].id)
self.assertEqual(node2.id, nodes[1].id)
def test_node_to_dict(self):
node = self._create_node('NODE1')
self.assertIsNotNone(node.id)
expected = {
'id': node.id,
'name': node.name,
'cluster_id': node.cluster_id,
'physical_id': node.physical_id,
'profile_id': node.profile_id,
'user': node.user,
'project': node.project,
'domain': node.domain,
'index': node.index,
'role': node.role,
'init_at': common_utils.format_time(node.init_at),
'created_at': common_utils.format_time(node.created_at),
'updated_at': common_utils.format_time(node.updated_at),
'status': node.status,
'status_reason': node.status_reason,
'data': node.data,
'metadata': node.meta_data,
'profile_name': self.profile.name,
}
result = nodem.Node.load(self.context, 'NODE1')
dt = result.to_dict()
self.assertEqual(expected, dt)
@mock.patch.object(db_api, 'profile_get')
def test_node_to_dict_no_profile(self, mock_profile_get):
node = self._create_node('NODE1')
self.assertIsNotNone(node.id)
expected = {
'id': node.id,
'name': node.name,
'cluster_id': node.cluster_id,
'physical_id': node.physical_id,
'profile_id': node.profile_id,
'user': node.user,
'project': node.project,
'domain': node.domain,
'index': node.index,
'role': node.role,
'init_at': common_utils.format_time(node.init_at),
'created_at': common_utils.format_time(node.created_at),
'updated_at': common_utils.format_time(node.updated_at),
'status': node.status,
'status_reason': node.status_reason,
'data': node.data,
'metadata': node.meta_data,
'profile_name': 'Unknown',
}
mock_profile_get.return_value = None
result = nodem.Node.load(self.context, 'NODE1')
dt = result.to_dict()
self.assertEqual(expected, dt)
def test_node_set_status(self):
node = nodem.Node('node1', self.profile.id, self.cluster.id,
self.context)
node.store(self.context)
self.assertEqual(nodem.Node.INIT, node.status)
self.assertIsNotNone(node.init_at)
self.assertIsNone(node.created_at)
self.assertIsNone(node.updated_at)
# create
node.set_status(self.context, node.CREATING,
reason='Creation in progress')
self.assertEqual('CREATING', node.status)
self.assertEqual('Creation in progress', node.status_reason)
self.assertIsNone(node.created_at)
self.assertIsNone(node.updated_at)
node.set_status(self.context, node.ACTIVE,
reason='Creation succeeded')
self.assertEqual('ACTIVE', node.status)
self.assertEqual('Creation succeeded', node.status_reason)
self.assertIsNotNone(node.created_at)
self.assertIsNone(node.updated_at)
# update
node.set_status(self.context, node.UPDATING,
reason='Update in progress')
self.assertEqual('UPDATING', node.status)
self.assertEqual('Update in progress', node.status_reason)
self.assertIsNotNone(node.created_at)
self.assertIsNone(node.updated_at)
node.set_status(self.context, node.ACTIVE,
reason='Update succeeded')
self.assertEqual('ACTIVE', node.status)
self.assertEqual('Update succeeded', node.status_reason)
self.assertIsNotNone(node.created_at)
self.assertIsNotNone(node.updated_at)
node.set_status(self.context, node.ACTIVE)
self.assertEqual('ACTIVE', node.status)
self.assertIsNotNone(node.created_at)
self.assertIsNotNone(node.updated_at)
# delete
node.set_status(self.context, node.DELETING,
reason='Deletion in progress')
self.assertEqual('DELETING', node.status)
self.assertEqual('Deletion in progress', node.status_reason)
self.assertIsNotNone(node.created_at)
@mock.patch.object(profiles_base.Profile, 'get_details')
def test_node_get_details(self, mock_details):
node = nodem.Node('node1', self.profile.id, None)
for physical_id in (None, ''):
node.physical_id = physical_id
self.assertEqual({}, node.get_details(self.context))
self.assertEqual(0, mock_details.call_count)
node.physical_id = 'FAKE_ID'
mock_details.return_value = {'foo': 'bar'}
res = node.get_details(self.context)
mock_details.assert_called_once_with(self.context, node)
self.assertEqual({'foo': 'bar'}, res)
def test_node_handle_exception(self):
ex = exception.ResourceStatusError(resource_id='FAKE_ID',
status='FAKE_STATUS',
reason='FAKE_REASON')
node = nodem.Node('node1', self.profile.id, None, self.context)
node.store(self.context)
node._handle_exception(self.context, 'ACTION', 'STATUS', ex)
db_node = db_api.node_get(self.context, node.id)
self.assertEqual(node.ERROR, db_node.status)
self.assertEqual('Profile failed in ACTIOing resource '
'(FAKE_ID) due to: %s' % six.text_type(ex),
db_node.status_reason)
self.assertEqual('FAKE_ID', db_node.physical_id)
# Exception happens before physical node creation started.
ex = exception.ResourceCreationFailure(rtype='stack',
code=400,
message='Bad request')
node = nodem.Node('node1', self.profile.id, None, self.context)
node.store(self.context)
node._handle_exception(self.context, 'CREATE', 'STATUS', ex)
db_node = db_api.node_get(self.context, node.id)
self.assertEqual(node.ERROR, db_node.status)
self.assertEqual('Profile failed in creating node due to: '
'%s' % six.text_type(ex), db_node.status_reason)
self.assertIsNone(db_node.physical_id)
@mock.patch.object(nodem.Node, 'store')
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(profiles_base.Profile, 'create_object')
def test_node_create(self, mock_create, mock_status, mock_store):
node = nodem.Node('node1', self.profile.id, self.cluster.id,
self.context)
physical_id = 'fake_id'
mock_create.return_value = physical_id
res = node.do_create(self.context)
self.assertTrue(res)
mock_status.assert_any_call(self.context, node.CREATING,
reason='Creation in progress')
mock_status.assert_any_call(self.context, node.ACTIVE,
'Creation succeeded')
mock_store.assert_called_once_with(self.context)
self.assertEqual(physical_id, node.physical_id)
def test_node_create_not_init(self):
node = nodem.Node('node1', self.profile.id, self.cluster.id,
self.context)
node.status = 'NOT_INIT'
res = node.do_create(self.context)
self.assertFalse(res)
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(profiles_base.Profile, 'create_object')
def test_node_create_not_created(self, mock_create, mock_status):
node = nodem.Node('node1', self.profile.id, self.cluster.id,
self.context)
mock_create.return_value = None
res = node.do_create(self.context)
self.assertFalse(res)
mock_status.assert_called_once_with(self.context, node.CREATING,
reason='Creation in progress')
@mock.patch.object(nodem.Node, 'store')
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(profiles_base.Profile, 'create_object')
def test_node_create_internal_error(self, mock_create, mock_status,
mock_store):
ex = exception.InternalError(code=500, message='internal error')
mock_create.side_effect = ex
node = nodem.Node('node1', self.profile.id, self.cluster.id,
self.context)
res = node.do_create(self.context)
self.assertFalse(res)
mock_status.assert_any_call(self.context, node.CREATING,
reason='Creation in progress')
reason = _('Profile failed in creating node due to: %(msg)s') % {
'msg': six.text_type(ex)}
mock_status.assert_any_call(self.context, node.ERROR, reason)
@mock.patch.object(db_api, 'node_delete')
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(profiles_base.Profile, 'delete_object')
def test_node_delete(self, mock_delete, mock_status, mock_db_delete):
node = nodem.Node('node1', self.profile.id, self.cluster.id,
self.context)
node.physical_id = 'fake_id'
res = node.do_delete(self.context)
self.assertTrue(res)
mock_delete.assert_called_once_with(mock.ANY, node)
mock_db_delete.assert_called_once_with(mock.ANY, node.id, False)
mock_status.assert_called_once_with(self.context, node.DELETING,
reason='Deletion in progress')
@mock.patch.object(db_api, 'node_delete')
@mock.patch.object(profiles_base.Profile, 'delete_object')
def test_node_delete_not_created(self, mock_delete, mock_db_delete):
node = nodem.Node('node1', self.profile.id, self.cluster.id,
self.context)
self.assertEqual('', node.physical_id)
res = node.do_delete(self.context)
self.assertTrue(res)
self.assertFalse(mock_delete.called)
self.assertTrue(mock_db_delete.called)
@mock.patch.object(nodem.Node, '_handle_exception')
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(profiles_base.Profile, 'delete_object')
def test_node_delete_resource_status_error(self, mock_delete, mock_status,
mock_handle_exception):
ex = exception.ResourceStatusError(resource_id='id', status='ERROR',
reason='some reason')
mock_delete.side_effect = ex
node = nodem.Node('node1', self.profile.id, self.cluster.id,
self.context)
node.physical_id = 'fake_id'
res = node.do_delete(self.context)
self.assertFalse(res)
mock_delete.assert_called_once_with(self.context, node)
mock_handle_exception.assert_called_once_with(self.context, 'delete',
'ERROR', ex)
mock_status.assert_any_call(self.context, 'ERROR',
reason='Deletion failed')
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(profiles_base.Profile, 'update_object')
def test_node_update_new_profile(self, mock_update, mock_status):
node = nodem.Node('node1', self.profile.id, self.cluster.id,
self.context)
new_profile = self._create_profile('NEW_PROFILE_ID')
node.physical_id = 'fake_id'
res = node.do_update(self.context, {'new_profile_id': new_profile.id})
self.assertTrue(res)
mock_update.assert_called_once_with(self.context, node,
new_profile.id)
self.assertEqual('NEW_PROFILE_ID', node.profile_id)
self.assertEqual('NEW_PROFILE_ID', node.rt['profile'].id)
mock_status.assert_any_call(self.context, 'UPDATING',
reason='Update in progress')
mock_status.assert_any_call(self.context, 'ACTIVE',
reason='Update succeeded')
@mock.patch.object(nodem.Node, 'set_status')
def test_node_update_name(self, mock_status):
node = nodem.Node('node1', self.profile.id, self.cluster.id,
self.context)
node.physical_id = 'fake_id'
res = node.do_update(self.context, {'name': 'new_name'})
self.assertTrue(res)
self.assertEqual(node.name, 'new_name')
mock_status.assert_any_call(self.context, 'UPDATING',
reason='Update in progress')
mock_status.assert_any_call(self.context, 'ACTIVE',
reason='Update succeeded')
def test_node_update_not_created(self):
node = nodem.Node('node1', self.profile.id, self.cluster.id,
self.context)
self.assertEqual('', node.physical_id)
res = node.do_update(self.context, 'new_profile_id')
self.assertFalse(res)
@mock.patch.object(nodem.Node, '_handle_exception')
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(profiles_base.Profile, 'update_object')
def test_node_update_resource_status_error(self, mock_update, mock_status,
mock_handle_exception):
node = nodem.Node('node1', self.profile.id, self.cluster.id,
self.context)
ex = exception.ResourceStatusError(resource_id='id', status='ERROR',
reason='some reason')
mock_update.side_effect = ex
new_profile = self._create_profile('NEW_PROFILE_ID')
node.physical_id = 'fake_id'
res = node.do_update(self.context, {'new_profile_id': new_profile.id})
self.assertFalse(res)
mock_handle_exception.assert_called_once_with(self.context, 'update',
'ERROR', ex)
self.assertNotEqual('NEW_PROFILE_ID', node.profile_id)
@mock.patch.object(db_api, 'node_migrate')
def test_node_join_same_cluster(self, mock_migrate):
node = nodem.Node('node1', self.profile.id, self.cluster.id,
self.context)
node.index = 1
res = node.do_join(self.context, self.cluster.id)
self.assertTrue(res)
self.assertEqual(1, node.index)
self.assertIsNone(node.updated_at)
self.assertFalse(mock_migrate.called)
@mock.patch.object(timeutils, 'utcnow')
@mock.patch.object(profiles_base.Profile, 'join_cluster')
@mock.patch.object(db_api, 'node_migrate')
def test_node_join(self, mock_migrate, mock_join_cluster, mock_time):
node = nodem.Node('node1', self.profile.id, self.cluster.id,
self.context)
mock_join_cluster.return_value = True
res = node.do_join(self.context, 'NEW_CLUSTER_ID')
self.assertTrue(res)
mock_migrate.assert_called_once_with(self.context, node.id,
'NEW_CLUSTER_ID', mock_time(),
None)
mock_join_cluster.assert_called_once_with(self.context, node,
'NEW_CLUSTER_ID')
self.assertEqual('NEW_CLUSTER_ID', node.cluster_id)
self.assertEqual(mock_migrate.return_value.index, node.index)
self.assertIsNotNone(node.updated_at)
@mock.patch.object(profiles_base.Profile, 'join_cluster')
def test_node_join_fail_update_server_metadata(self, mock_join):
node = nodem.Node('node1', self.profile.id, None, self.context)
mock_join.return_value = False
res = node.do_join(self.context, 'NEW_CLUSTER_ID')
self.assertFalse(res)
self.assertEqual('', node.cluster_id)
self.assertEqual(-1, node.index)
self.assertIsNone(node.updated_at)
mock_join.assert_called_once_with(self.context, node,
'NEW_CLUSTER_ID')
@mock.patch.object(db_api, 'node_migrate')
def test_node_leave_no_cluster(self, mock_migrate):
node = nodem.Node('node1', self.profile.id, '', self.context)
self.assertTrue(node.do_leave(self.context))
self.assertFalse(mock_migrate.called)
self.assertEqual('', node.cluster_id)
self.assertIsNone(node.updated_at)
@mock.patch.object(timeutils, 'utcnow')
@mock.patch.object(profiles_base.Profile, 'leave_cluster')
@mock.patch.object(db_api, 'node_migrate')
def test_node_leave(self, mock_migrate, mock_leave_cluster, mock_time):
node = nodem.Node('node1', self.profile.id, self.cluster.id,
self.context)
mock_leave_cluster.return_value = True
res = node.do_leave(self.context)
self.assertTrue(res)
self.assertEqual('', node.cluster_id)
self.assertIsNotNone(node.updated_at)
self.assertEqual(-1, node.index)
mock_migrate.assert_called_once_with(self.context, node.id,
None, mock_time(), None)
mock_leave_cluster.assert_called_once_with(self.context, node)
@mock.patch.object(profiles_base.Profile, 'leave_cluster')
def test_node_leave_fail_update_server_metadata(self, mock_leave):
node = nodem.Node('node1', self.profile.id, self.cluster.id,
self.context, index=1)
mock_leave.return_value = False
res = node.do_leave(self.context)
self.assertFalse(res)
self.assertNotEqual('', node.cluster_id)
self.assertIsNone(node.updated_at)
self.assertEqual(1, node.index)
@mock.patch.object(profiles_base.Profile, 'check_object')
def test_node_check(self, mock_check):
node = nodem.Node('node1', self.profile.id, '')
node.physical_id = 'fake_id'
mock_check.return_value = True
res = node.do_check(self.context)
self.assertTrue(res)
mock_check.assert_called_once_with(self.context, node)
@mock.patch.object(nodem.Node, 'store')
@mock.patch.object(profiles_base.Profile, 'check_object')
def test_node_check_failed_check(self, mock_check, mock_store):
node = nodem.Node('node1', self.profile.id, '')
node.physical_id = 'fake_id'
mock_check.return_value = False
res = node.do_check(self.context)
self.assertFalse(res)
self.assertEqual('ERROR', node.status)
def test_node_check_no_physical_id(self):
node = nodem.Node('node1', self.profile.id, '')
res = node.do_check(self.context)
self.assertFalse(res)
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(profiles_base.Profile, 'recover_object')
def test_node_recover_new_object(self, mock_recover, mock_status):
node = nodem.Node('node1', self.profile.id, '')
node.physical_id = 'fake_id'
mock_recover.return_value = 'new_physical_id'
res = node.do_recover(self.context)
self.assertTrue(res)
mock_recover.assert_called_once_with(self.context, node)
self.assertEqual('node1', node.name)
self.assertEqual('new_physical_id', node.physical_id)
self.assertEqual(self.profile.id, node.profile_id)
mock_status.assert_has_calls([
mock.call(self.context, 'RECOVERING',
reason='Recover in progress'),
mock.call(self.context, node.ACTIVE,
reason='Recover succeeded')])
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(profiles_base.Profile, 'recover_object')
def test_node_recover_in_place(self, mock_recover, mock_status):
node = nodem.Node('node1', self.profile.id, '')
node.physical_id = 'fake_id'
mock_recover.return_value = 'fake_id'
res = node.do_recover(self.context)
self.assertTrue(res)
mock_recover.assert_called_once_with(self.context, node)
self.assertEqual('node1', node.name)
self.assertEqual('fake_id', node.physical_id)
self.assertEqual(self.profile.id, node.profile_id)
mock_status.assert_has_calls([
mock.call(self.context, 'RECOVERING',
reason='Recover in progress'),
mock.call(self.context, node.ACTIVE,
reason='Recover succeeded')])
@mock.patch.object(nodem.Node, 'set_status')
@mock.patch.object(profiles_base.Profile, 'recover_object')
def test_node_recover_failed_recover(self, mock_recover, mock_status):
node = nodem.Node('node1', self.profile.id, '')
node.physical_id = 'fake_id'
mock_recover.return_value = None
res = node.do_recover(self.context)
self.assertFalse(res)
mock_status.assert_has_calls([
mock.call(self.context, 'RECOVERING',
reason='Recover in progress'),
mock.call(self.context, node.ERROR,
reason='Recover failed')])
def test_node_recover_no_physical_id(self):
node = nodem.Node('node1', self.profile.id, '')
res = node.do_recover(self.context)
self.assertFalse(res)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#########################################################################
# Copyright/License Notice (BSD License) #
#########################################################################
#########################################################################
# Copyright (c) 2010-2012, Daniel Knaggs - 2E0DPK/M6DPK #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: - #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the author nor the names of its contributors #
# may be used to endorse or promote products derived from this #
# software without specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR #
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT #
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, #
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT #
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, #
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY #
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT #
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE #
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #
#########################################################################
from danlog import DanLog
from ddp import *
import os
import sys
from xml.dom import minidom
#############
# Constants #
#############
log = DanLog("BroadcastClient")
#############
# Constants #
#############
ALLOW_UNSIGNED_PACKETS = False
BACKEND_DATAMODE = "PSK500R"
BACKEND_HOSTNAME = "localhost"
BACKEND_PORT = 7362
DB_VERSION = 1000
DEBUG_MODE = False
DISABLE_CRYPTO = False
SPECIFICATION = 0
USE_TCP = 0
XML_SETTINGS_FILE = "broadcastclient-settings.xml"
###############
# Subroutines #
###############
def cBool(value):
if str(value).lower() == "false" or str(value) == "0":
return False
elif str(value).lower() == "true" or str(value) == "1":
return True
else:
return False
def exitProgram():
sys.exit(0)
def main():
log.info("""
#########################################################################
# Copyright/License Notice (BSD License) #
#########################################################################
#########################################################################
# Copyright (c) 2010-2012, Daniel Knaggs - 2E0DPK/M6DPK #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: - #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the author nor the names of its contributors #
# may be used to endorse or promote products derived from this #
# software without specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR #
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT #
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, #
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT #
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, #
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY #
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT #
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE #
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #
#########################################################################
""")
log.info("")
log.info("Broadcast Client")
log.info("================")
log.info("Checking settings...")
if os.path.exists(XML_SETTINGS_FILE) == False:
log.warn("The XML settings file doesn't exist, create one...")
xmlBROADCASTSettingsWrite()
log.info("The XML settings file has been created using the default settings. Please edit it and restart the broadcast client once you're happy with the settings.")
exitProgram()
else:
log.info("Reading XML settings...")
xmlBROADCASTSettingsRead()
# This will ensure it will have any new settings in
if os.path.exists(XML_SETTINGS_FILE + ".bak"):
os.unlink(XML_SETTINGS_FILE + ".bak")
os.rename(XML_SETTINGS_FILE, XML_SETTINGS_FILE + ".bak")
xmlBROADCASTSettingsWrite()
log.info("Setting up DDP...")
ddp = DDP(hostname = BACKEND_HOSTNAME, port = BACKEND_PORT, data_mode = BACKEND_DATAMODE, timeout = 60., ack_timeout = 30., tx_hangtime = 1.25, data_length = 512, specification = SPECIFICATION, disable_ec = False, disable_crypto = DISABLE_CRYPTO, allow_unsigned_packets = ALLOW_UNSIGNED_PACKETS, application = "DDP Example: Broadcast Client", ignore_broadcast_packets = False, debug_mode = DEBUG_MODE)
log.info("Listening for broadcast packets...")
while True:
try:
data = ddp.receiveDataFromAny(ddp.BROADCAST_CALLSIGN, True)
if data is not None:
# Check the flags
d = data[0]
packet = data[1]
if len(d) > 0:
if packet[ddp.SECTION_DESTINATION] == ddp.BROADCAST_CALLSIGN:
log.info("")
log.info("*********************")
log.info("* BROADCAST MESSAGE *")
log.info("*********************")
log.info("From: %s" % packet[ddp.SECTION_SOURCE])
log.info("Message: %s" % str(d))
log.info("")
else:
log.warn("Unable to receive the message from %s as nothing was received." % packet[ddp.SECTION_SOURCE])
else:
log.warn("No packets have been received.")
except KeyboardInterrupt:
break
except Exception, ex:
log.fatal(ex)
log.info("Cleaning up...")
ddp.dispose()
ddp = None
log.info("Exiting...")
sys.exit(0)
def xmlBROADCASTSettingsRead():
global ALLOW_UNSIGNED_PACKETS, BACKEND_DATAMODE, BACKEND_HOSTNAME, BACKEND_PORT, DEBUG_MODE, DISABLE_CRYPTO, SPECIFICATION, USE_TCP
if os.path.exists(XML_SETTINGS_FILE):
xmldoc = minidom.parse(XML_SETTINGS_FILE)
myvars = xmldoc.getElementsByTagName("Setting")
for var in myvars:
for key in var.attributes.keys():
val = str(var.attributes[key].value)
# Now put the correct values to correct key
if key == "BackendDataMode":
BACKEND_DATAMODE = val.upper()
elif key == "BackendHostname":
BACKEND_HOSTNAME = val
elif key == "BackendPort":
BACKEND_PORT = val.upper()
elif key == "Specification":
SPECIFICATION = int(val)
elif key == "UseTCP":
USE_TCP = int(val)
elif key == "AllowUnsignedPackets":
ALLOW_UNSIGNED_PACKETS = cBool(val)
elif key == "DisableCrypto":
DISABLE_CRYPTO = cBool(val)
elif key == "DebugMode":
DEBUG_MODE = cBool(val)
else:
log.warn("XML setting attribute \"%s\" isn't known. Ignoring..." % key)
def xmlBROADCASTSettingsWrite():
if os.path.exists(XML_SETTINGS_FILE) == False:
xmloutput = file(XML_SETTINGS_FILE, "w")
xmldoc = minidom.Document()
# Create header
settings = xmldoc.createElement("Broadcast")
xmldoc.appendChild(settings)
# Write each of the details one at a time, makes it easier for someone to alter the file using a text editor
var = xmldoc.createElement("Setting")
var.setAttribute("BackendDataMode", str(BACKEND_DATAMODE))
settings.appendChild(var)
var = xmldoc.createElement("Setting")
var.setAttribute("BackendHostname", str(BACKEND_HOSTNAME))
settings.appendChild(var)
var = xmldoc.createElement("Setting")
var.setAttribute("BackendPort", str(BACKEND_PORT))
settings.appendChild(var)
var = xmldoc.createElement("Setting")
var.setAttribute("Specification", str(SPECIFICATION))
settings.appendChild(var)
var = xmldoc.createElement("Setting")
var.setAttribute("UseTCP", str(USE_TCP))
settings.appendChild(var)
var = xmldoc.createElement("Setting")
var.setAttribute("AllowUnsignedPackets", str(ALLOW_UNSIGNED_PACKETS))
settings.appendChild(var)
var = xmldoc.createElement("Setting")
var.setAttribute("DisableCrypto", str(DISABLE_CRYPTO))
settings.appendChild(var)
var = xmldoc.createElement("Setting")
var.setAttribute("DebugMode", str(DEBUG_MODE))
settings.appendChild(var)
# Finally, save to the file
xmloutput.write(xmldoc.toprettyxml())
xmloutput.close()
########
# Main #
########
if __name__ == "__main__":
main()
|
|
import pandas as pd
import numpy as np
import xlrd
import ceo
from ceo import data_cleaning as dc
import os
import os.path as op
import inspect
#Location for Original Data
data_path = op.join(ceo.__path__[0], 'Data')
data_path = op.join(data_path, 'Original Data')
#Input data
df = pd.read_excel(op.join(data_path, 'State Energy Data System.xlsx'),sheetname=3)
gdp1=pd.read_csv(op.join(data_path, 'GDP.csv'),skiprows=4,index_col=1)
climate_data = pd.read_csv(op.join(data_path, 'climate_annual.txt'),sep = ' ',encoding = 'utf-8')
oil = pd.read_excel(op.join(data_path, 'Annual Average Crude Oil Price.xlsx'),skiprows=4)
clprb = xlrd.open_workbook(op.join(data_path, 'CLPRB.xlsx'))
emfdb=pd.read_excel(op.join(data_path, 'EMFDB.xlsx'))
enprp=pd.read_excel(op.join(data_path, 'ENPRP.xlsx'))
ngmpb=pd.read_excel(op.join(data_path, 'NGMPB.xlsx'))
paprb=pd.read_excel(op.join(data_path, 'PAPRB.xlsx'))
data = pd.DataFrame([['a','aa',1,2,3,4,5],['a','bb',6,7,8,9,10],['a','cc',11,12,13,14,15],['b','aa',1,2,3,4,5],['b','bb',6,7,8,9,10],['b','cc',11,12,13,14,15],['c','aa',1,2,3,4,5],['c','bb',6,7,8,9,10],['c','cc',11,12,13,14,15]],columns=['State','MSN',1960,1970,1980,1990,2000])
US_states_missing = ["AL","AK","AZ","AR","CA","CO","CT","DE","DC","FL","GA","HI","ID","IL","IN","IA","KS","KY","LA","ME","MD","MA","MI","MN","MS","MO","MT","NE","NV","NH","NJ","NM","NY","NC","ND","OH","OK","OR","PA","RI","SC","SD","TN","TX","VT","VA","WA","WV","WI"]
def test_data_extract():
"""
Function to test data_extract
Input:
None
Returns:
None
"""
data = pd.DataFrame([['a','aa',1,2,3,4,5],['a','bb',6,7,8,9,10],['a','cc',11,12,13,14,15],['b','aa',1,2,3,4,5],['b','bb',6,7,8,9,10],['b','cc',11,12,13,14,15],['c','aa',1,2,3,4,5],['c','bb',6,7,8,9,10],['c','cc',11,12,13,14,15]],columns=['State','MSN',1960,1970,1980,1990,2000])
result1 = pd.DataFrame([[1960,1,6],[1970,2,7],[1980,3,8],[1990,4,9],[2000,5,10]],columns=['Year','aa','bb'])
df1 = dc.data_extract(data,'a',['aa','bb'])
assert all(df1 == result1), 'Data extraction incorrect!'
try:
dc.data_extract([[1,2,3]],1,['aa','bb'])
assert False, 'Error not raised'
except TypeError:
pass
try:
dc.data_extract(data,1,['aa','bb'])
assert False, 'Error not raised'
except TypeError:
pass
try:
dc.data_extract(data,'state','aa')
assert False, 'Error not raised'
except TypeError:
pass
return
def test_data_extract_all():
"""
Function to test data_extract_all
Input:
None
Returns:
None
"""
#Location of Test Data
path= os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
path = op.join(path, 'Data')
path_test = op.join(path, 'Test Data')
if not os.path.exists(path_test):
os.makedirs(path_test)
try:
dc.data_extract_all(data,['a',2],['aa','bb'],path_test)
assert False, 'Error not raised'
except TypeError:
pass
try:
dc.data_extract_all(data,['a','b'],[['a'],'bb'],path_test)
assert False, 'Error not raised'
except TypeError:
pass
names = os.listdir(path_test)
#Removing creating csv files
for i in names:
os.remove(path_test+'\\%s'%i)
return
def test_add_clprb():
"""
Function to test add_clprb
Input:
None
Returns:
None
"""
#Location of Test Data
path= os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
path = op.join(path, 'Data')
path_test = op.join(path, 'Test Data')
if not os.path.exists(path_test):
os.makedirs(path_test)
dc.data_extract_all(df,US_states_missing,['HYTCP'],path_test)
try:
dc.add_clprb(clprb,US_states_missing,path_test)
assert False, 'Error not raised'
except AssertionError:
pass
names = os.listdir(path_test)
#Removing creating csv files
for i in names:
os.remove(path_test+'\\%s'%i)
return
def test_add_msn():
"""
Function to test add_msn
Input:
None
Returns:
None
"""
#Location of Test Data
path= os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
path = op.join(path, 'Data')
path_test = op.join(path, 'Test Data')
if not os.path.exists(path_test):
os.makedirs(path_test)
statelist=["AL","AK","AZ","AR","CA","CO","CT","DE","DC","FL","GA","HI","ID","IL","IN","IA","KS","KY","LA","ME","MD","MA","MI","MN","MS","MO","MT","NE","NV","NH","NJ","NM","NY","NC","ND","OH","OK","OR","PA","RI","SC","SD","TN","TX","UT","VT","VA","WA","WV","WI","WY"]
#Extracting data, adding MSN
dc.data_extract_all(df,statelist,["HYTCP","WYTCP","SOEGP","NUETP"],path_test)
dc.add_msn(paprb,statelist,'PAPRB',path_test)
names = os.listdir(path_test)
for i in names:
d = pd.read_csv(path_test+'\\%s' %i)
assert any('PAPRB' == c for c in d.columns),'Data Cleaning Incorrect'
names = os.listdir(path_test)
#Removing creating csv files
for i in names:
os.remove(path_test+'\\%s'%i)
return
def test_climate():
"""
Function to test climate
Input:
None
Returns:
None
"""
#Location of Test Data
path= os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
path = op.join(path, 'Data')
path_test = op.join(path, 'Test Data')
if not os.path.exists(path_test):
os.makedirs(path_test)
statelist=["AL","AK","AZ","AR","CA","CO","CT","DE","DC","FL","GA","HI","ID","IL","IN","IA","KS","KY","LA","ME","MD","MA","MI","MN","MS","MO","MT","NE","NV","NH","NJ","NM","NY","NC","ND","OH","OK","OR","PA","RI","SC","SD","TN","TX","UT","VT","VA","WA","WV","WI","WY"]
#Extracting data, adding GDP and Climate data
dc.data_extract_all(df,statelist,["HYTCP","WYTCP","SOEGP","NUETP"],path_test)
dc.add_gdp(gdp1,statelist,path_test)
dc.climate(climate_data,'PCP',statelist,path_test)
names = os.listdir(path_test)
for i in names:
d = pd.read_csv(path_test+'\\%s' %i)
assert any('PCP' == c for c in d.columns),'Data Cleaning Incorrect'
names = os.listdir(path_test)
#Removing creating csv files
for i in names:
os.remove(path_test+'\\%s'%i)
return
def test_oil_price():
"""
Function to test oil_price
Input:
None
Returns:
None
"""
#Location of Test Data
path= os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
path = op.join(path, 'Data')
path_test = op.join(path, 'Test Data')
if not os.path.exists(path_test):
os.makedirs(path_test)
statelist=["AL","AK","AZ","AR","CA","CO","CT","DE","DC","FL","GA","HI","ID","IL","IN","IA","KS","KY","LA","ME","MD","MA","MI","MN","MS","MO","MT","NE","NV","NH","NJ","NM","NY","NC","ND","OH","OK","OR","PA","RI","SC","SD","TN","TX","UT","VT","VA","WA","WV","WI","WY"]
#Extracting data, adding GDP and Oil Price
dc.data_extract_all(df,statelist,["HYTCP","WYTCP","SOEGP","NUETP"],path_test)
dc.add_gdp(gdp1,statelist,path_test)
dc.oil_price(oil,statelist,path_test)
names = os.listdir(path_test)
for i in names:
d = pd.read_csv(path_test+'\\%s' %i)
#Checking column 'Inflation Adjusted Price' present in CSV
assert any('Inflation Adjusted Price' == c for c in d.columns),'Data Cleaning Incorrect'
names = os.listdir(path_test)
#Removing creating csv files
for i in names:
os.remove(path_test+'\\%s'%i)
return
def test_add_gdp():
"""
Function to test add_gdp
Input:
None
Returns:
None
"""
#Location of Test Data
path= os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
path = op.join(path, 'Data')
path_test = op.join(path, 'Test Data')
if not os.path.exists(path_test):
os.makedirs(path_test)
statelist=["AL","AK","AZ","AR","CA","CO","CT","DE","DC","FL","GA","HI","ID","IL","IN","IA","KS","KY","LA","ME","MD","MA","MI","MN","MS","MO","MT","NE","NV","NH","NJ","NM","NY","NC","ND","OH","OK","OR","PA","RI","SC","SD","TN","TX","UT","VT","VA","WA","WV","WI","WY"]
#Extracting data, adding GDP
dc.data_extract_all(df,statelist,["HYTCP","WYTCP","SOEGP","NUETP"],path_test)
dc.add_gdp(gdp1,statelist,path_test)
names = os.listdir(path_test)
for i in names:
d = pd.read_csv(path_test+'\\%s' %i)
#Checking column 'GDP' present in CSV
assert any('GDP' == c for c in d.columns),'Data Cleaning Incorrect'
names = os.listdir(path_test)
#Removing creating csv files
for i in names:
os.remove(path_test+'\\%s'%i)
return
def test_clean_all_data():
"""
Function to test clean_all_data
Input:
None
Returns:
None
"""
dc.clean_all_data()
#Location of Missing Data
path= os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
path = op.join(path, 'Data')
path_clean = op.join(path, 'Cleaned Data with Missing Predictors')
states = os.listdir(path_clean)
for i in states:
path_csv = op.join(path_clean,i)
pred = pd.read_csv(path_csv)
#Checking column name in CSV
assert any('EMFDB' == pred.columns), 'predict_all incorrect'
assert any('HYTCP' == pred.columns), 'predict_all incorrect'
assert any('NGMPB' == pred.columns), 'predict_all incorrect'
assert any('PCP' == pred.columns), 'predict_all incorrect'
return
|
|
# Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import logging
import optparse
import os
import shlex
import socket
import sys
from telemetry.core import platform
from telemetry.core import util
from telemetry.internal.browser import browser_finder
from telemetry.internal.browser import browser_finder_exceptions
from telemetry.internal.browser import profile_types
from telemetry.internal.platform import device_finder
from telemetry.internal.platform.profiler import profiler_finder
from telemetry.util import wpr_modes
import net_configs
class BrowserFinderOptions(optparse.Values):
"""Options to be used for discovering a browser."""
def __init__(self, browser_type=None):
optparse.Values.__init__(self)
self.browser_type = browser_type
self.browser_executable = None
self.chrome_root = None
self.device = None
self.cros_ssh_identity = None
self.extensions_to_load = []
# If set, copy the generated profile to this path on exit.
self.output_profile_path = None
self.cros_remote = None
self.profiler = None
self.verbosity = 0
self.browser_options = BrowserOptions()
self.output_file = None
self.android_rndis = False
self.no_performance_mode = False
def __repr__(self):
return str(sorted(self.__dict__.items()))
def Copy(self):
return copy.deepcopy(self)
def CreateParser(self, *args, **kwargs):
parser = optparse.OptionParser(*args, **kwargs)
# Selection group
group = optparse.OptionGroup(parser, 'Which browser to use')
group.add_option('--browser',
dest='browser_type',
default=None,
help='Browser type to run, '
'in order of priority. Supported values: list,%s' %
','.join(browser_finder.FindAllBrowserTypes(self)))
group.add_option('--browser-executable',
dest='browser_executable',
help='The exact browser to run.')
group.add_option('--chrome-root',
dest='chrome_root',
help='Where to look for chrome builds.'
'Defaults to searching parent dirs by default.')
group.add_option('--device',
dest='device',
help='The device ID to use.'
'If not specified, only 0 or 1 connected devices are supported. If'
'specified as "android", all available Android devices are used.')
group.add_option('--target-arch',
dest='target_arch',
help='The target architecture of the browser. Options available are: '
'x64, x86_64, arm, arm64 and mips. '
'Defaults to the default architecture of the platform if omitted.')
group.add_option(
'--remote',
dest='cros_remote',
help='The hostname of a remote ChromeOS device to use.')
group.add_option(
'--remote-ssh-port',
type=int,
default=socket.getservbyname('ssh'),
dest='cros_remote_ssh_port',
help='The SSH port of the remote ChromeOS device (requires --remote).')
identity = None
testing_rsa = os.path.join(
util.GetTelemetryThirdPartyDir(), 'chromite', 'ssh_keys', 'testing_rsa')
if os.path.exists(testing_rsa):
identity = testing_rsa
group.add_option('--identity',
dest='cros_ssh_identity',
default=identity,
help='The identity file to use when ssh\'ing into the ChromeOS device')
parser.add_option_group(group)
# Debugging options
group = optparse.OptionGroup(parser, 'When things go wrong')
profiler_choices = profiler_finder.GetAllAvailableProfilers()
group.add_option(
'--profiler', default=None, type='choice',
choices=profiler_choices,
help='Record profiling data using this tool. Supported values: ' +
', '.join(profiler_choices))
group.add_option(
'-v', '--verbose', action='count', dest='verbosity',
help='Increase verbosity level (repeat as needed)')
group.add_option('--print-bootstrap-deps',
action='store_true',
help='Output bootstrap deps list.')
parser.add_option_group(group)
# Platform options
group = optparse.OptionGroup(parser, 'Platform options')
group.add_option('--no-performance-mode', action='store_true',
help='Some platforms run on "full performance mode" where the '
'test is executed at maximum CPU speed in order to minimize noise '
'(specially important for dashboards / continuous builds). '
'This option prevents Telemetry from tweaking such platform settings.')
group.add_option('--android-rndis', dest='android_rndis', default=False,
action='store_true', help='Use RNDIS forwarding on Android.')
group.add_option('--no-android-rndis', dest='android_rndis',
action='store_false', help='Do not use RNDIS forwarding on Android.'
' [default]')
parser.add_option_group(group)
# Browser options.
self.browser_options.AddCommandLineArgs(parser)
real_parse = parser.parse_args
def ParseArgs(args=None):
defaults = parser.get_default_values()
for k, v in defaults.__dict__.items():
if k in self.__dict__ and self.__dict__[k] != None:
continue
self.__dict__[k] = v
ret = real_parse(args, self) # pylint: disable=E1121
if self.verbosity >= 2:
logging.getLogger().setLevel(logging.DEBUG)
elif self.verbosity:
logging.getLogger().setLevel(logging.INFO)
else:
logging.getLogger().setLevel(logging.WARNING)
if self.device == 'list':
devices = device_finder.GetDevicesMatchingOptions(self)
print 'Available devices:'
for device in devices:
print ' ', device.name
sys.exit(0)
if self.browser_executable and not self.browser_type:
self.browser_type = 'exact'
if self.browser_type == 'list':
devices = device_finder.GetDevicesMatchingOptions(self)
if not devices:
sys.exit(0)
browser_types = {}
for device in devices:
try:
possible_browsers = browser_finder.GetAllAvailableBrowsers(self,
device)
browser_types[device.name] = sorted(
[browser.browser_type for browser in possible_browsers])
except browser_finder_exceptions.BrowserFinderException as ex:
print >> sys.stderr, 'ERROR: ', ex
sys.exit(1)
print 'Available browsers:'
if len(browser_types) == 0:
print ' No devices were found.'
for device_name in sorted(browser_types.keys()):
print ' ', device_name
for browser_type in browser_types[device_name]:
print ' ', browser_type
sys.exit(0)
# Parse browser options.
self.browser_options.UpdateFromParseResults(self)
return ret
parser.parse_args = ParseArgs
return parser
def AppendExtraBrowserArgs(self, args):
self.browser_options.AppendExtraBrowserArgs(args)
def MergeDefaultValues(self, defaults):
for k, v in defaults.__dict__.items():
self.ensure_value(k, v)
class BrowserOptions(object):
"""Options to be used for launching a browser."""
def __init__(self):
self.browser_type = None
self.show_stdout = False
# When set to True, the browser will use the default profile. Telemetry
# will not provide an alternate profile directory.
self.dont_override_profile = False
self.profile_dir = None
self.profile_type = None
self._extra_browser_args = set()
self.extra_wpr_args = []
self.wpr_mode = wpr_modes.WPR_OFF
self.netsim = None
self.full_performance_mode = True
# The amount of time Telemetry should wait for the browser to start.
# This property is not exposed as a command line option.
self._browser_startup_timeout = 60
self.disable_background_networking = True
self.no_proxy_server = False
self.browser_user_agent_type = None
self.clear_sytem_cache_for_browser_and_profile_on_start = False
self.startup_url = 'about:blank'
# Background pages of built-in component extensions can interfere with
# performance measurements.
self.disable_component_extensions_with_background_pages = True
# Disable default apps.
self.disable_default_apps = True
# Whether to use the new code path for choosing an ephemeral port for
# DevTools. The bots set this to true. When Chrome 37 reaches stable,
# remove this setting and the old code path. http://crbug.com/379980
self.use_devtools_active_port = False
self.enable_logging = False
# The cloud storage bucket & path for uploading logs data produced by the
# browser to.
self.logs_cloud_bucket = None
self.logs_cloud_remote_path = None
# TODO(danduong): Find a way to store target_os here instead of
# finder_options.
self._finder_options = None
def __repr__(self):
# This works around the infinite loop caused by the introduction of a
# circular reference with _finder_options.
obj = self.__dict__.copy()
del obj['_finder_options']
return str(sorted(obj.items()))
def IsCrosBrowserOptions(self):
return False
@classmethod
def AddCommandLineArgs(cls, parser):
############################################################################
# Please do not add any more options here without first discussing with #
# a telemetry owner. This is not the right place for platform-specific #
# options. #
############################################################################
group = optparse.OptionGroup(parser, 'Browser options')
profile_choices = profile_types.GetProfileTypes()
group.add_option('--profile-type',
dest='profile_type',
type='choice',
default='clean',
choices=profile_choices,
help=('The user profile to use. A clean profile is used by default. '
'Supported values: ' + ', '.join(profile_choices)))
group.add_option('--profile-dir',
dest='profile_dir',
help='Profile directory to launch the browser with. '
'A clean profile is used by default')
group.add_option('--extra-browser-args',
dest='extra_browser_args_as_string',
help='Additional arguments to pass to the browser when it starts')
group.add_option('--extra-wpr-args',
dest='extra_wpr_args_as_string',
help=('Additional arguments to pass to Web Page Replay. '
'See third_party/webpagereplay/replay.py for usage.'))
group.add_option('--netsim', default=None, type='choice',
choices=net_configs.NET_CONFIG_NAMES,
help=('Run benchmark under simulated network conditions. '
'Will prompt for sudo. Supported values: ' +
', '.join(net_configs.NET_CONFIG_NAMES)))
group.add_option('--show-stdout',
action='store_true',
help='When possible, will display the stdout of the process')
# This hidden option is to be removed, and the older code path deleted,
# once Chrome 37 reaches Stable. http://crbug.com/379980
group.add_option('--use-devtools-active-port',
action='store_true',
help=optparse.SUPPRESS_HELP)
group.add_option('--enable-browser-logging',
dest='enable_logging',
action='store_true',
help=('Enable browser logging. The log file is saved in temp directory.'
"Note that enabling this flag affects the browser's "
'performance'))
parser.add_option_group(group)
group = optparse.OptionGroup(parser, 'Compatibility options')
group.add_option('--gtest_output',
help='Ignored argument for compatibility with runtest.py harness')
parser.add_option_group(group)
def UpdateFromParseResults(self, finder_options):
"""Copies our options from finder_options"""
browser_options_list = [
'extra_browser_args_as_string',
'extra_wpr_args_as_string',
'enable_logging',
'netsim',
'profile_dir',
'profile_type',
'show_stdout',
'use_devtools_active_port',
]
for o in browser_options_list:
a = getattr(finder_options, o, None)
if a is not None:
setattr(self, o, a)
delattr(finder_options, o)
self.browser_type = finder_options.browser_type
self._finder_options = finder_options
if hasattr(self, 'extra_browser_args_as_string'): # pylint: disable=E1101
tmp = shlex.split(
self.extra_browser_args_as_string) # pylint: disable=E1101
self.AppendExtraBrowserArgs(tmp)
delattr(self, 'extra_browser_args_as_string')
if hasattr(self, 'extra_wpr_args_as_string'): # pylint: disable=E1101
tmp = shlex.split(
self.extra_wpr_args_as_string) # pylint: disable=E1101
self.extra_wpr_args.extend(tmp)
delattr(self, 'extra_wpr_args_as_string')
if self.profile_type == 'default':
self.dont_override_profile = True
if self.profile_dir and self.profile_type != 'clean':
logging.critical(
"It's illegal to specify both --profile-type and --profile-dir.\n"
"For more information see: http://goo.gl/ngdGD5")
sys.exit(1)
if self.profile_dir and not os.path.isdir(self.profile_dir):
logging.critical(
"Directory specified by --profile-dir (%s) doesn't exist "
"or isn't a directory.\n"
"For more information see: http://goo.gl/ngdGD5" % self.profile_dir)
sys.exit(1)
if not self.profile_dir:
self.profile_dir = profile_types.GetProfileDir(self.profile_type)
# This deferred import is necessary because browser_options is imported in
# telemetry/telemetry/__init__.py.
finder_options.browser_options = CreateChromeBrowserOptions(self)
@property
def finder_options(self):
return self._finder_options
@property
def extra_browser_args(self):
return self._extra_browser_args
@property
def browser_startup_timeout(self):
return self._browser_startup_timeout
@browser_startup_timeout.setter
def browser_startup_timeout(self, value):
self._browser_startup_timeout = value
def AppendExtraBrowserArgs(self, args):
if isinstance(args, list):
self._extra_browser_args.update(args)
else:
self._extra_browser_args.add(args)
def CreateChromeBrowserOptions(br_options):
browser_type = br_options.browser_type
if (platform.GetHostPlatform().GetOSName() == 'chromeos' or
(browser_type and browser_type.startswith('cros'))):
return CrosBrowserOptions(br_options)
return br_options
class ChromeBrowserOptions(BrowserOptions):
"""Chrome-specific browser options."""
def __init__(self, br_options):
super(ChromeBrowserOptions, self).__init__()
# Copy to self.
self.__dict__.update(br_options.__dict__)
class CrosBrowserOptions(ChromeBrowserOptions):
"""ChromeOS-specific browser options."""
def __init__(self, br_options):
super(CrosBrowserOptions, self).__init__(br_options)
# Create a browser with oobe property.
self.create_browser_with_oobe = False
# Clear enterprise policy before logging in.
self.clear_enterprise_policy = True
# Disable GAIA/enterprise services.
self.disable_gaia_services = True
self.auto_login = True
self.gaia_login = False
self.username = 'test@test.test'
self.password = ''
def IsCrosBrowserOptions(self):
return True
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for BigQuery sources and sinks."""
# pytype: skip-file
import datetime
import decimal
import json
import logging
import os
import pickle
import random
import re
import time
import unittest
import uuid
import hamcrest as hc
import mock
import pytz
from nose.plugins.attrib import attr
from parameterized import param
from parameterized import parameterized
import apache_beam as beam
from apache_beam.internal import pickler
from apache_beam.internal.gcp.json_value import to_json_value
from apache_beam.io.filebasedsink_test import _TestCaseWithTempDirCleanUp
from apache_beam.io.gcp import bigquery_tools
from apache_beam.io.gcp.bigquery import TableRowJsonCoder
from apache_beam.io.gcp.bigquery import WriteToBigQuery
from apache_beam.io.gcp.bigquery import _StreamToBigQuery
from apache_beam.io.gcp.bigquery_file_loads_test import _ELEMENTS
from apache_beam.io.gcp.bigquery_read_internal import _JsonToDictCoder
from apache_beam.io.gcp.bigquery_read_internal import bigquery_export_destination_uri
from apache_beam.io.gcp.bigquery_tools import JSON_COMPLIANCE_ERROR
from apache_beam.io.gcp.bigquery_tools import BigQueryWrapper
from apache_beam.io.gcp.bigquery_tools import RetryStrategy
from apache_beam.io.gcp.internal.clients import bigquery
from apache_beam.io.gcp.pubsub import ReadFromPubSub
from apache_beam.io.gcp.tests import utils
from apache_beam.io.gcp.tests.bigquery_matcher import BigqueryFullResultMatcher
from apache_beam.io.gcp.tests.bigquery_matcher import BigqueryFullResultStreamingMatcher
from apache_beam.io.gcp.tests.bigquery_matcher import BigQueryTableMatcher
from apache_beam.options import value_provider
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.options.value_provider import RuntimeValueProvider
from apache_beam.options.value_provider import StaticValueProvider
from apache_beam.runners.dataflow.test_dataflow_runner import TestDataflowRunner
from apache_beam.runners.runner import PipelineState
from apache_beam.testing import test_utils
from apache_beam.testing.pipeline_verifiers import PipelineStateMatcher
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.test_stream import TestStream
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
from apache_beam.transforms.display import DisplayData
from apache_beam.transforms.display_test import DisplayDataItemMatcher
# Protect against environments where bigquery library is not available.
# pylint: disable=wrong-import-order, wrong-import-position
try:
from apitools.base.py.exceptions import HttpError
from google.cloud import bigquery as gcp_bigquery
except ImportError:
gcp_bigquery = None
HttpError = None
# pylint: enable=wrong-import-order, wrong-import-position
_LOGGER = logging.getLogger(__name__)
@unittest.skipIf(
HttpError is None or gcp_bigquery is None,
'GCP dependencies are not installed')
class TestTableRowJsonCoder(unittest.TestCase):
def test_row_as_table_row(self):
schema_definition = [('s', 'STRING'), ('i', 'INTEGER'), ('f', 'FLOAT'),
('b', 'BOOLEAN'), ('n', 'NUMERIC'), ('r', 'RECORD'),
('g', 'GEOGRAPHY')]
data_definition = [
'abc',
123,
123.456,
True,
decimal.Decimal('987654321.987654321'), {
'a': 'b'
},
'LINESTRING(1 2, 3 4, 5 6, 7 8)'
]
str_def = (
'{"s": "abc", '
'"i": 123, '
'"f": 123.456, '
'"b": true, '
'"n": "987654321.987654321", '
'"r": {"a": "b"}, '
'"g": "LINESTRING(1 2, 3 4, 5 6, 7 8)"}')
schema = bigquery.TableSchema(
fields=[
bigquery.TableFieldSchema(name=k, type=v) for k,
v in schema_definition
])
coder = TableRowJsonCoder(table_schema=schema)
def value_or_decimal_to_json(val):
if isinstance(val, decimal.Decimal):
return to_json_value(str(val))
else:
return to_json_value(val)
test_row = bigquery.TableRow(
f=[
bigquery.TableCell(v=value_or_decimal_to_json(e))
for e in data_definition
])
self.assertEqual(str_def, coder.encode(test_row))
self.assertEqual(test_row, coder.decode(coder.encode(test_row)))
# A coder without schema can still decode.
self.assertEqual(
test_row, TableRowJsonCoder().decode(coder.encode(test_row)))
def test_row_and_no_schema(self):
coder = TableRowJsonCoder()
test_row = bigquery.TableRow(
f=[
bigquery.TableCell(v=to_json_value(e))
for e in ['abc', 123, 123.456, True]
])
with self.assertRaisesRegex(AttributeError,
r'^The TableRowJsonCoder requires'):
coder.encode(test_row)
def json_compliance_exception(self, value):
with self.assertRaisesRegex(ValueError, re.escape(JSON_COMPLIANCE_ERROR)):
schema_definition = [('f', 'FLOAT')]
schema = bigquery.TableSchema(
fields=[
bigquery.TableFieldSchema(name=k, type=v) for k,
v in schema_definition
])
coder = TableRowJsonCoder(table_schema=schema)
test_row = bigquery.TableRow(
f=[bigquery.TableCell(v=to_json_value(value))])
coder.encode(test_row)
def test_invalid_json_nan(self):
self.json_compliance_exception(float('nan'))
def test_invalid_json_inf(self):
self.json_compliance_exception(float('inf'))
def test_invalid_json_neg_inf(self):
self.json_compliance_exception(float('-inf'))
@unittest.skipIf(HttpError is None, 'GCP dependencies are not installed')
class TestBigQuerySource(unittest.TestCase):
def test_display_data_item_on_validate_true(self):
source = beam.io.BigQuerySource(
'dataset.table', validate=True, use_dataflow_native_source=True)
dd = DisplayData.create_from(source)
expected_items = [
DisplayDataItemMatcher('validation', True),
DisplayDataItemMatcher('table', 'dataset.table')
]
hc.assert_that(dd.items, hc.contains_inanyorder(*expected_items))
def test_table_reference_display_data(self):
source = beam.io.BigQuerySource(
'dataset.table', use_dataflow_native_source=True)
dd = DisplayData.create_from(source)
expected_items = [
DisplayDataItemMatcher('validation', False),
DisplayDataItemMatcher('table', 'dataset.table')
]
hc.assert_that(dd.items, hc.contains_inanyorder(*expected_items))
source = beam.io.BigQuerySource(
'project:dataset.table', use_dataflow_native_source=True)
dd = DisplayData.create_from(source)
expected_items = [
DisplayDataItemMatcher('validation', False),
DisplayDataItemMatcher('table', 'project:dataset.table')
]
hc.assert_that(dd.items, hc.contains_inanyorder(*expected_items))
source = beam.io.BigQuerySource(
'xyz.com:project:dataset.table', use_dataflow_native_source=True)
dd = DisplayData.create_from(source)
expected_items = [
DisplayDataItemMatcher('validation', False),
DisplayDataItemMatcher('table', 'xyz.com:project:dataset.table')
]
hc.assert_that(dd.items, hc.contains_inanyorder(*expected_items))
def test_parse_table_reference(self):
source = beam.io.BigQuerySource(
'dataset.table', use_dataflow_native_source=True)
self.assertEqual(source.table_reference.datasetId, 'dataset')
self.assertEqual(source.table_reference.tableId, 'table')
source = beam.io.BigQuerySource(
'project:dataset.table', use_dataflow_native_source=True)
self.assertEqual(source.table_reference.projectId, 'project')
self.assertEqual(source.table_reference.datasetId, 'dataset')
self.assertEqual(source.table_reference.tableId, 'table')
source = beam.io.BigQuerySource(
'xyz.com:project:dataset.table', use_dataflow_native_source=True)
self.assertEqual(source.table_reference.projectId, 'xyz.com:project')
self.assertEqual(source.table_reference.datasetId, 'dataset')
self.assertEqual(source.table_reference.tableId, 'table')
source = beam.io.BigQuerySource(
query='my_query', use_dataflow_native_source=True)
self.assertEqual(source.query, 'my_query')
self.assertIsNone(source.table_reference)
self.assertTrue(source.use_legacy_sql)
def test_query_only_display_data(self):
source = beam.io.BigQuerySource(
query='my_query', use_dataflow_native_source=True)
dd = DisplayData.create_from(source)
expected_items = [
DisplayDataItemMatcher('validation', False),
DisplayDataItemMatcher('query', 'my_query')
]
hc.assert_that(dd.items, hc.contains_inanyorder(*expected_items))
def test_specify_query_sql_format(self):
source = beam.io.BigQuerySource(
query='my_query',
use_standard_sql=True,
use_dataflow_native_source=True)
self.assertEqual(source.query, 'my_query')
self.assertFalse(source.use_legacy_sql)
def test_specify_query_flattened_records(self):
source = beam.io.BigQuerySource(
query='my_query',
flatten_results=False,
use_dataflow_native_source=True)
self.assertFalse(source.flatten_results)
def test_specify_query_unflattened_records(self):
source = beam.io.BigQuerySource(
query='my_query', flatten_results=True, use_dataflow_native_source=True)
self.assertTrue(source.flatten_results)
def test_specify_query_without_table(self):
source = beam.io.BigQuerySource(
query='my_query', use_dataflow_native_source=True)
self.assertEqual(source.query, 'my_query')
self.assertIsNone(source.table_reference)
def test_date_partitioned_table_name(self):
source = beam.io.BigQuerySource(
'dataset.table$20030102',
validate=True,
use_dataflow_native_source=True)
dd = DisplayData.create_from(source)
expected_items = [
DisplayDataItemMatcher('validation', True),
DisplayDataItemMatcher('table', 'dataset.table$20030102')
]
hc.assert_that(dd.items, hc.contains_inanyorder(*expected_items))
@unittest.skipIf(HttpError is None, 'GCP dependencies are not installed')
class TestJsonToDictCoder(unittest.TestCase):
@staticmethod
def _make_schema(fields):
def _fill_schema(fields):
for field in fields:
table_field = bigquery.TableFieldSchema()
table_field.name, table_field.type, table_field.mode, nested_fields, \
= field
if nested_fields:
table_field.fields = list(_fill_schema(nested_fields))
yield table_field
schema = bigquery.TableSchema()
schema.fields = list(_fill_schema(fields))
return schema
def test_coder_is_pickable(self):
try:
schema = self._make_schema([
(
'record',
'RECORD',
'NULLABLE', [
('float', 'FLOAT', 'NULLABLE', []),
]),
('integer', 'INTEGER', 'NULLABLE', []),
])
coder = _JsonToDictCoder(schema)
pickler.loads(pickler.dumps(coder))
except pickle.PicklingError:
self.fail('{} is not pickable'.format(coder.__class__.__name__))
def test_values_are_converted(self):
input_row = b'{"float": "10.5", "string": "abc"}'
expected_row = {'float': 10.5, 'string': 'abc'}
schema = self._make_schema([('float', 'FLOAT', 'NULLABLE', []),
('string', 'STRING', 'NULLABLE', [])])
coder = _JsonToDictCoder(schema)
actual = coder.decode(input_row)
self.assertEqual(expected_row, actual)
def test_null_fields_are_preserved(self):
input_row = b'{"float": "10.5"}'
expected_row = {'float': 10.5, 'string': None}
schema = self._make_schema([('float', 'FLOAT', 'NULLABLE', []),
('string', 'STRING', 'NULLABLE', [])])
coder = _JsonToDictCoder(schema)
actual = coder.decode(input_row)
self.assertEqual(expected_row, actual)
def test_record_field_is_properly_converted(self):
input_row = b'{"record": {"float": "55.5"}, "integer": 10}'
expected_row = {'record': {'float': 55.5}, 'integer': 10}
schema = self._make_schema([
(
'record',
'RECORD',
'NULLABLE', [
('float', 'FLOAT', 'NULLABLE', []),
]),
('integer', 'INTEGER', 'NULLABLE', []),
])
coder = _JsonToDictCoder(schema)
actual = coder.decode(input_row)
self.assertEqual(expected_row, actual)
def test_record_and_repeatable_field_is_properly_converted(self):
input_row = b'{"record": [{"float": "55.5"}, {"float": "65.5"}], ' \
b'"integer": 10}'
expected_row = {'record': [{'float': 55.5}, {'float': 65.5}], 'integer': 10}
schema = self._make_schema([
(
'record',
'RECORD',
'REPEATED', [
('float', 'FLOAT', 'NULLABLE', []),
]),
('integer', 'INTEGER', 'NULLABLE', []),
])
coder = _JsonToDictCoder(schema)
actual = coder.decode(input_row)
self.assertEqual(expected_row, actual)
def test_repeatable_field_is_properly_converted(self):
input_row = b'{"repeated": ["55.5", "65.5"], "integer": "10"}'
expected_row = {'repeated': [55.5, 65.5], 'integer': 10}
schema = self._make_schema([
('repeated', 'FLOAT', 'REPEATED', []),
('integer', 'INTEGER', 'NULLABLE', []),
])
coder = _JsonToDictCoder(schema)
actual = coder.decode(input_row)
self.assertEqual(expected_row, actual)
@unittest.skipIf(HttpError is None, 'GCP dependencies are not installed')
class TestReadFromBigQuery(unittest.TestCase):
@classmethod
def setUpClass(cls):
class UserDefinedOptions(PipelineOptions):
@classmethod
def _add_argparse_args(cls, parser):
parser.add_value_provider_argument('--gcs_location')
cls.UserDefinedOptions = UserDefinedOptions
def tearDown(self):
# Reset runtime options to avoid side-effects caused by other tests.
RuntimeValueProvider.set_runtime_options(None)
def test_get_destination_uri_empty_runtime_vp(self):
with self.assertRaisesRegex(ValueError,
'^ReadFromBigQuery requires a GCS '
'location to be provided'):
# Don't provide any runtime values.
RuntimeValueProvider.set_runtime_options({})
options = self.UserDefinedOptions()
bigquery_export_destination_uri(
options.gcs_location, None, uuid.uuid4().hex)
def test_get_destination_uri_none(self):
with self.assertRaisesRegex(ValueError,
'^ReadFromBigQuery requires a GCS '
'location to be provided'):
bigquery_export_destination_uri(None, None, uuid.uuid4().hex)
def test_get_destination_uri_runtime_vp(self):
# Provide values at job-execution time.
RuntimeValueProvider.set_runtime_options({'gcs_location': 'gs://bucket'})
options = self.UserDefinedOptions()
unique_id = uuid.uuid4().hex
uri = bigquery_export_destination_uri(options.gcs_location, None, unique_id)
self.assertEqual(
uri, 'gs://bucket/' + unique_id + '/bigquery-table-dump-*.json')
def test_get_destination_uri_static_vp(self):
unique_id = uuid.uuid4().hex
uri = bigquery_export_destination_uri(
StaticValueProvider(str, 'gs://bucket'), None, unique_id)
self.assertEqual(
uri, 'gs://bucket/' + unique_id + '/bigquery-table-dump-*.json')
def test_get_destination_uri_fallback_temp_location(self):
# Don't provide any runtime values.
RuntimeValueProvider.set_runtime_options({})
options = self.UserDefinedOptions()
with self.assertLogs('apache_beam.io.gcp.bigquery_read_internal',
level='DEBUG') as context:
bigquery_export_destination_uri(
options.gcs_location, 'gs://bucket', uuid.uuid4().hex)
self.assertEqual(
context.output,
[
'DEBUG:apache_beam.io.gcp.bigquery_read_internal:gcs_location is '
'empty, using temp_location instead'
])
@mock.patch.object(BigQueryWrapper, '_delete_dataset')
@mock.patch('apache_beam.io.gcp.internal.clients.bigquery.BigqueryV2')
def test_temp_dataset_location_is_configurable(self, api, delete_dataset):
temp_dataset = bigquery.DatasetReference(
projectId='temp-project', datasetId='bq_dataset')
bq = BigQueryWrapper(client=api, temp_dataset_id=temp_dataset.datasetId)
gcs_location = 'gs://gcs_location'
# bq.get_or_create_dataset.return_value = temp_dataset
c = beam.io.gcp.bigquery._CustomBigQuerySource(
query='select * from test_table',
gcs_location=gcs_location,
validate=True,
pipeline_options=beam.options.pipeline_options.PipelineOptions(),
job_name='job_name',
step_name='step_name',
project='execution_project',
**{'temp_dataset': temp_dataset})
api.datasets.Get.side_effect = HttpError({
'status_code': 404, 'status': 404
},
'',
'')
c._setup_temporary_dataset(bq)
api.datasets.Insert.assert_called_with(
bigquery.BigqueryDatasetsInsertRequest(
dataset=bigquery.Dataset(datasetReference=temp_dataset),
projectId=temp_dataset.projectId))
api.datasets.Get.return_value = temp_dataset
api.datasets.Get.side_effect = None
bq.clean_up_temporary_dataset(temp_dataset.projectId)
delete_dataset.assert_called_with(
temp_dataset.projectId, temp_dataset.datasetId, True)
self.assertEqual(
bq._get_temp_table(temp_dataset.projectId),
bigquery.TableReference(
projectId=temp_dataset.projectId,
datasetId=temp_dataset.datasetId,
tableId=BigQueryWrapper.TEMP_TABLE + bq._temporary_table_suffix))
@unittest.skipIf(HttpError is None, 'GCP dependencies are not installed')
class TestBigQuerySink(unittest.TestCase):
def test_table_spec_display_data(self):
sink = beam.io.BigQuerySink('dataset.table')
dd = DisplayData.create_from(sink)
expected_items = [
DisplayDataItemMatcher('table', 'dataset.table'),
DisplayDataItemMatcher('validation', False)
]
hc.assert_that(dd.items, hc.contains_inanyorder(*expected_items))
def test_parse_schema_descriptor(self):
sink = beam.io.BigQuerySink('dataset.table', schema='s:STRING, n:INTEGER')
self.assertEqual(sink.table_reference.datasetId, 'dataset')
self.assertEqual(sink.table_reference.tableId, 'table')
result_schema = {
field.name: field.type
for field in sink.table_schema.fields
}
self.assertEqual({'n': 'INTEGER', 's': 'STRING'}, result_schema)
def test_project_table_display_data(self):
sinkq = beam.io.BigQuerySink('PROJECT:dataset.table')
dd = DisplayData.create_from(sinkq)
expected_items = [
DisplayDataItemMatcher('table', 'PROJECT:dataset.table'),
DisplayDataItemMatcher('validation', False)
]
hc.assert_that(dd.items, hc.contains_inanyorder(*expected_items))
def test_simple_schema_as_json(self):
sink = beam.io.BigQuerySink(
'PROJECT:dataset.table', schema='s:STRING, n:INTEGER')
self.assertEqual(
json.dumps({
'fields': [{
'name': 's', 'type': 'STRING', 'mode': 'NULLABLE'
}, {
'name': 'n', 'type': 'INTEGER', 'mode': 'NULLABLE'
}]
}),
sink.schema_as_json())
def test_nested_schema_as_json(self):
string_field = bigquery.TableFieldSchema(
name='s', type='STRING', mode='NULLABLE', description='s description')
number_field = bigquery.TableFieldSchema(
name='n', type='INTEGER', mode='REQUIRED', description='n description')
record_field = bigquery.TableFieldSchema(
name='r',
type='RECORD',
mode='REQUIRED',
description='r description',
fields=[string_field, number_field])
schema = bigquery.TableSchema(fields=[record_field])
sink = beam.io.BigQuerySink('dataset.table', schema=schema)
self.assertEqual({
'fields': [{
'name': 'r',
'type': 'RECORD',
'mode': 'REQUIRED',
'description': 'r description',
'fields': [{
'name': 's',
'type': 'STRING',
'mode': 'NULLABLE',
'description': 's description'
},
{
'name': 'n',
'type': 'INTEGER',
'mode': 'REQUIRED',
'description': 'n description'
}]
}]
},
json.loads(sink.schema_as_json()))
@unittest.skipIf(HttpError is None, 'GCP dependencies are not installed')
class TestWriteToBigQuery(unittest.TestCase):
def _cleanup_files(self):
if os.path.exists('insert_calls1'):
os.remove('insert_calls1')
if os.path.exists('insert_calls2'):
os.remove('insert_calls2')
def setUp(self):
self._cleanup_files()
def tearDown(self):
self._cleanup_files()
def test_noop_schema_parsing(self):
expected_table_schema = None
table_schema = beam.io.gcp.bigquery.BigQueryWriteFn.get_table_schema(
schema=None)
self.assertEqual(expected_table_schema, table_schema)
def test_dict_schema_parsing(self):
schema = {
'fields': [{
'name': 's', 'type': 'STRING', 'mode': 'NULLABLE'
}, {
'name': 'n', 'type': 'INTEGER', 'mode': 'NULLABLE'
},
{
'name': 'r',
'type': 'RECORD',
'mode': 'NULLABLE',
'fields': [{
'name': 'x', 'type': 'INTEGER', 'mode': 'NULLABLE'
}]
}]
}
table_schema = beam.io.gcp.bigquery.BigQueryWriteFn.get_table_schema(schema)
string_field = bigquery.TableFieldSchema(
name='s', type='STRING', mode='NULLABLE')
nested_field = bigquery.TableFieldSchema(
name='x', type='INTEGER', mode='NULLABLE')
number_field = bigquery.TableFieldSchema(
name='n', type='INTEGER', mode='NULLABLE')
record_field = bigquery.TableFieldSchema(
name='r', type='RECORD', mode='NULLABLE', fields=[nested_field])
expected_table_schema = bigquery.TableSchema(
fields=[string_field, number_field, record_field])
self.assertEqual(expected_table_schema, table_schema)
def test_string_schema_parsing(self):
schema = 's:STRING, n:INTEGER'
expected_dict_schema = {
'fields': [{
'name': 's', 'type': 'STRING', 'mode': 'NULLABLE'
}, {
'name': 'n', 'type': 'INTEGER', 'mode': 'NULLABLE'
}]
}
dict_schema = (
beam.io.gcp.bigquery.WriteToBigQuery.get_dict_table_schema(schema))
self.assertEqual(expected_dict_schema, dict_schema)
def test_table_schema_parsing(self):
string_field = bigquery.TableFieldSchema(
name='s', type='STRING', mode='NULLABLE')
nested_field = bigquery.TableFieldSchema(
name='x', type='INTEGER', mode='NULLABLE')
number_field = bigquery.TableFieldSchema(
name='n', type='INTEGER', mode='NULLABLE')
record_field = bigquery.TableFieldSchema(
name='r', type='RECORD', mode='NULLABLE', fields=[nested_field])
schema = bigquery.TableSchema(
fields=[string_field, number_field, record_field])
expected_dict_schema = {
'fields': [{
'name': 's', 'type': 'STRING', 'mode': 'NULLABLE'
}, {
'name': 'n', 'type': 'INTEGER', 'mode': 'NULLABLE'
},
{
'name': 'r',
'type': 'RECORD',
'mode': 'NULLABLE',
'fields': [{
'name': 'x', 'type': 'INTEGER', 'mode': 'NULLABLE'
}]
}]
}
dict_schema = (
beam.io.gcp.bigquery.WriteToBigQuery.get_dict_table_schema(schema))
self.assertEqual(expected_dict_schema, dict_schema)
def test_table_schema_parsing_end_to_end(self):
string_field = bigquery.TableFieldSchema(
name='s', type='STRING', mode='NULLABLE')
nested_field = bigquery.TableFieldSchema(
name='x', type='INTEGER', mode='NULLABLE')
number_field = bigquery.TableFieldSchema(
name='n', type='INTEGER', mode='NULLABLE')
record_field = bigquery.TableFieldSchema(
name='r', type='RECORD', mode='NULLABLE', fields=[nested_field])
schema = bigquery.TableSchema(
fields=[string_field, number_field, record_field])
table_schema = beam.io.gcp.bigquery.BigQueryWriteFn.get_table_schema(
beam.io.gcp.bigquery.WriteToBigQuery.get_dict_table_schema(schema))
self.assertEqual(table_schema, schema)
def test_none_schema_parsing(self):
schema = None
expected_dict_schema = None
dict_schema = (
beam.io.gcp.bigquery.WriteToBigQuery.get_dict_table_schema(schema))
self.assertEqual(expected_dict_schema, dict_schema)
def test_noop_dict_schema_parsing(self):
schema = {
'fields': [{
'name': 's', 'type': 'STRING', 'mode': 'NULLABLE'
}, {
'name': 'n', 'type': 'INTEGER', 'mode': 'NULLABLE'
}]
}
expected_dict_schema = schema
dict_schema = (
beam.io.gcp.bigquery.WriteToBigQuery.get_dict_table_schema(schema))
self.assertEqual(expected_dict_schema, dict_schema)
def test_schema_autodetect_not_allowed_with_avro_file_loads(self):
with TestPipeline() as p:
pc = p | beam.Impulse()
with self.assertRaisesRegex(ValueError, '^A schema must be provided'):
_ = (
pc
| 'No Schema' >> beam.io.gcp.bigquery.WriteToBigQuery(
"dataset.table",
schema=None,
temp_file_format=bigquery_tools.FileFormat.AVRO))
with self.assertRaisesRegex(ValueError,
'^Schema auto-detection is not supported'):
_ = (
pc
| 'Schema Autodetected' >> beam.io.gcp.bigquery.WriteToBigQuery(
"dataset.table",
schema=beam.io.gcp.bigquery.SCHEMA_AUTODETECT,
temp_file_format=bigquery_tools.FileFormat.AVRO))
def test_to_from_runner_api(self):
"""Tests that serialization of WriteToBigQuery is correct.
This is not intended to be a change-detector test. As such, this only tests
the more complicated serialization logic of parameters: ValueProviders,
callables, and side inputs.
"""
FULL_OUTPUT_TABLE = 'test_project:output_table'
p = TestPipeline()
# Used for testing side input parameters.
table_record_pcv = beam.pvalue.AsDict(
p | "MakeTable" >> beam.Create([('table', FULL_OUTPUT_TABLE)]))
# Used for testing value provider parameters.
schema = value_provider.StaticValueProvider(str, '"a:str"')
original = WriteToBigQuery(
table=lambda _,
side_input: side_input['table'],
table_side_inputs=(table_record_pcv, ),
schema=schema)
# pylint: disable=expression-not-assigned
p | 'MyWriteToBigQuery' >> original
# Run the pipeline through to generate a pipeline proto from an empty
# context. This ensures that the serialization code ran.
pipeline_proto, context = TestPipeline.from_runner_api(
p.to_runner_api(), p.runner, p.get_pipeline_options()).to_runner_api(
return_context=True)
# Find the transform from the context.
write_to_bq_id = [
k for k,
v in pipeline_proto.components.transforms.items()
if v.unique_name == 'MyWriteToBigQuery'
][0]
deserialized_node = context.transforms.get_by_id(write_to_bq_id)
deserialized = deserialized_node.transform
self.assertIsInstance(deserialized, WriteToBigQuery)
# Test that the serialization of a value provider is correct.
self.assertEqual(original.schema, deserialized.schema)
# Test that the serialization of a callable is correct.
self.assertEqual(
deserialized._table(None, {'table': FULL_OUTPUT_TABLE}),
FULL_OUTPUT_TABLE)
# Test that the serialization of a side input is correct.
self.assertEqual(
len(original.table_side_inputs), len(deserialized.table_side_inputs))
original_side_input_data = original.table_side_inputs[0]._side_input_data()
deserialized_side_input_data = deserialized.table_side_inputs[
0]._side_input_data()
self.assertEqual(
original_side_input_data.access_pattern,
deserialized_side_input_data.access_pattern)
self.assertEqual(
original_side_input_data.window_mapping_fn,
deserialized_side_input_data.window_mapping_fn)
self.assertEqual(
original_side_input_data.view_fn, deserialized_side_input_data.view_fn)
@unittest.skipIf(HttpError is None, 'GCP dependencies are not installed')
class BigQueryStreamingInsertTransformTests(unittest.TestCase):
def test_dofn_client_process_performs_batching(self):
client = mock.Mock()
client.tables.Get.return_value = bigquery.Table(
tableReference=bigquery.TableReference(
projectId='project_id', datasetId='dataset_id', tableId='table_id'))
client.insert_rows_json.return_value = []
create_disposition = beam.io.BigQueryDisposition.CREATE_NEVER
write_disposition = beam.io.BigQueryDisposition.WRITE_APPEND
fn = beam.io.gcp.bigquery.BigQueryWriteFn(
batch_size=2,
create_disposition=create_disposition,
write_disposition=write_disposition,
kms_key=None,
test_client=client)
fn.process(('project_id:dataset_id.table_id', {'month': 1}))
# InsertRows not called as batch size is not hit yet
self.assertFalse(client.insert_rows_json.called)
def test_dofn_client_process_flush_called(self):
client = mock.Mock()
client.tables.Get.return_value = bigquery.Table(
tableReference=bigquery.TableReference(
projectId='project_id', datasetId='dataset_id', tableId='table_id'))
client.insert_rows_json.return_value = []
create_disposition = beam.io.BigQueryDisposition.CREATE_NEVER
write_disposition = beam.io.BigQueryDisposition.WRITE_APPEND
fn = beam.io.gcp.bigquery.BigQueryWriteFn(
batch_size=2,
create_disposition=create_disposition,
write_disposition=write_disposition,
kms_key=None,
test_client=client)
fn.start_bundle()
fn.process(('project_id:dataset_id.table_id', ({'month': 1}, 'insertid1')))
fn.process(('project_id:dataset_id.table_id', ({'month': 2}, 'insertid2')))
# InsertRows called as batch size is hit
self.assertTrue(client.insert_rows_json.called)
def test_dofn_client_finish_bundle_flush_called(self):
client = mock.Mock()
client.tables.Get.return_value = bigquery.Table(
tableReference=bigquery.TableReference(
projectId='project_id', datasetId='dataset_id', tableId='table_id'))
client.insert_rows_json.return_value = []
create_disposition = beam.io.BigQueryDisposition.CREATE_IF_NEEDED
write_disposition = beam.io.BigQueryDisposition.WRITE_APPEND
fn = beam.io.gcp.bigquery.BigQueryWriteFn(
batch_size=2,
create_disposition=create_disposition,
write_disposition=write_disposition,
kms_key=None,
test_client=client)
fn.start_bundle()
# Destination is a tuple of (destination, schema) to ensure the table is
# created.
fn.process(('project_id:dataset_id.table_id', ({'month': 1}, 'insertid3')))
self.assertTrue(client.tables.Get.called)
# InsertRows not called as batch size is not hit
self.assertFalse(client.insert_rows_json.called)
fn.finish_bundle()
# InsertRows called in finish bundle
self.assertTrue(client.insert_rows_json.called)
def test_dofn_client_no_records(self):
client = mock.Mock()
client.tables.Get.return_value = bigquery.Table(
tableReference=bigquery.TableReference(
projectId='project_id', datasetId='dataset_id', tableId='table_id'))
client.tabledata.InsertAll.return_value = \
bigquery.TableDataInsertAllResponse(insertErrors=[])
create_disposition = beam.io.BigQueryDisposition.CREATE_NEVER
write_disposition = beam.io.BigQueryDisposition.WRITE_APPEND
fn = beam.io.gcp.bigquery.BigQueryWriteFn(
batch_size=2,
create_disposition=create_disposition,
write_disposition=write_disposition,
kms_key=None,
test_client=client)
fn.start_bundle()
# InsertRows not called as batch size is not hit
self.assertFalse(client.tabledata.InsertAll.called)
fn.finish_bundle()
# InsertRows not called in finish bundle as no records
self.assertFalse(client.tabledata.InsertAll.called)
def test_with_batched_input(self):
client = mock.Mock()
client.tables.Get.return_value = bigquery.Table(
tableReference=bigquery.TableReference(
projectId='project_id', datasetId='dataset_id', tableId='table_id'))
client.insert_rows_json.return_value = []
create_disposition = beam.io.BigQueryDisposition.CREATE_IF_NEEDED
write_disposition = beam.io.BigQueryDisposition.WRITE_APPEND
fn = beam.io.gcp.bigquery.BigQueryWriteFn(
batch_size=10,
create_disposition=create_disposition,
write_disposition=write_disposition,
kms_key=None,
with_batched_input=True,
test_client=client)
fn.start_bundle()
# Destination is a tuple of (destination, schema) to ensure the table is
# created.
fn.process((
'project_id:dataset_id.table_id',
[({
'month': 1
}, 'insertid3'), ({
'month': 2
}, 'insertid2'), ({
'month': 3
}, 'insertid1')]))
# InsertRows called since the input is already batched.
self.assertTrue(client.insert_rows_json.called)
@unittest.skipIf(HttpError is None, 'GCP dependencies are not installed')
class PipelineBasedStreamingInsertTest(_TestCaseWithTempDirCleanUp):
def test_failure_has_same_insert_ids(self):
tempdir = '%s%s' % (self._new_tempdir(), os.sep)
file_name_1 = os.path.join(tempdir, 'file1')
file_name_2 = os.path.join(tempdir, 'file2')
def store_callback(table, **kwargs):
insert_ids = [r for r in kwargs['row_ids']]
colA_values = [r['columnA'] for r in kwargs['json_rows']]
json_output = {'insertIds': insert_ids, 'colA_values': colA_values}
# The first time we try to insert, we save those insertions in
# file insert_calls1.
if not os.path.exists(file_name_1):
with open(file_name_1, 'w') as f:
json.dump(json_output, f)
raise RuntimeError()
else:
with open(file_name_2, 'w') as f:
json.dump(json_output, f)
return []
client = mock.Mock()
client.insert_rows_json = mock.Mock(side_effect=store_callback)
# Using the bundle based direct runner to avoid pickling problems
# with mocks.
with beam.Pipeline(runner='BundleBasedDirectRunner') as p:
_ = (
p
| beam.Create([{
'columnA': 'value1', 'columnB': 'value2'
}, {
'columnA': 'value3', 'columnB': 'value4'
}, {
'columnA': 'value5', 'columnB': 'value6'
}])
| _StreamToBigQuery(
table_reference='project:dataset.table',
table_side_inputs=[],
schema_side_inputs=[],
schema='anyschema',
batch_size=None,
create_disposition='CREATE_NEVER',
write_disposition=None,
kms_key=None,
retry_strategy=None,
additional_bq_parameters=[],
ignore_insert_ids=False,
with_auto_sharding=False,
test_client=client))
with open(file_name_1) as f1, open(file_name_2) as f2:
self.assertEqual(json.load(f1), json.load(f2))
@parameterized.expand([
param(with_auto_sharding=False),
param(with_auto_sharding=True),
])
def test_batch_size_with_auto_sharding(self, with_auto_sharding):
tempdir = '%s%s' % (self._new_tempdir(), os.sep)
file_name_1 = os.path.join(tempdir, 'file1')
file_name_2 = os.path.join(tempdir, 'file2')
def store_callback(table, **kwargs):
insert_ids = [r for r in kwargs['row_ids']]
colA_values = [r['columnA'] for r in kwargs['json_rows']]
json_output = {'insertIds': insert_ids, 'colA_values': colA_values}
# Expect two batches of rows will be inserted. Store them separately.
if not os.path.exists(file_name_1):
with open(file_name_1, 'w') as f:
json.dump(json_output, f)
else:
with open(file_name_2, 'w') as f:
json.dump(json_output, f)
return []
client = mock.Mock()
client.insert_rows_json = mock.Mock(side_effect=store_callback)
# Using the bundle based direct runner to avoid pickling problems
# with mocks.
with beam.Pipeline(runner='BundleBasedDirectRunner') as p:
_ = (
p
| beam.Create([{
'columnA': 'value1', 'columnB': 'value2'
}, {
'columnA': 'value3', 'columnB': 'value4'
}, {
'columnA': 'value5', 'columnB': 'value6'
}])
| _StreamToBigQuery(
table_reference='project:dataset.table',
table_side_inputs=[],
schema_side_inputs=[],
schema='anyschema',
# Set a batch size such that the input elements will be inserted
# in 2 batches.
batch_size=2,
create_disposition='CREATE_NEVER',
write_disposition=None,
kms_key=None,
retry_strategy=None,
additional_bq_parameters=[],
ignore_insert_ids=False,
with_auto_sharding=with_auto_sharding,
test_client=client))
with open(file_name_1) as f1, open(file_name_2) as f2:
out1 = json.load(f1)
self.assertEqual(out1['colA_values'], ['value1', 'value3'])
out2 = json.load(f2)
self.assertEqual(out2['colA_values'], ['value5'])
class BigQueryStreamingInsertTransformIntegrationTests(unittest.TestCase):
BIG_QUERY_DATASET_ID = 'python_bq_streaming_inserts_'
# Prevent nose from finding and running tests that were not
# specified in the Gradle file.
# See "More tests may be found" in:
# https://nose.readthedocs.io/en/latest/doc_tests/test_multiprocess
# /multiprocess.html#other-differences-in-test-running
_multiprocess_can_split_ = True
def setUp(self):
self.test_pipeline = TestPipeline(is_integration_test=True)
self.runner_name = type(self.test_pipeline.runner).__name__
self.project = self.test_pipeline.get_option('project')
self.dataset_id = '%s%s%d' % (
self.BIG_QUERY_DATASET_ID,
str(int(time.time())),
random.randint(0, 10000))
self.bigquery_client = bigquery_tools.BigQueryWrapper()
self.bigquery_client.get_or_create_dataset(self.project, self.dataset_id)
self.output_table = "%s.output_table" % (self.dataset_id)
_LOGGER.info(
"Created dataset %s in project %s", self.dataset_id, self.project)
@attr('IT')
def test_value_provider_transform(self):
output_table_1 = '%s%s' % (self.output_table, 1)
output_table_2 = '%s%s' % (self.output_table, 2)
schema = {
'fields': [{
'name': 'name', 'type': 'STRING', 'mode': 'NULLABLE'
}, {
'name': 'language', 'type': 'STRING', 'mode': 'NULLABLE'
}]
}
additional_bq_parameters = {
'timePartitioning': {
'type': 'DAY'
},
'clustering': {
'fields': ['language']
}
}
table_ref = bigquery_tools.parse_table_reference(output_table_1)
table_ref2 = bigquery_tools.parse_table_reference(output_table_2)
pipeline_verifiers = [
BigQueryTableMatcher(
project=self.project,
dataset=table_ref.datasetId,
table=table_ref.tableId,
expected_properties=additional_bq_parameters),
BigQueryTableMatcher(
project=self.project,
dataset=table_ref2.datasetId,
table=table_ref2.tableId,
expected_properties=additional_bq_parameters),
BigqueryFullResultMatcher(
project=self.project,
query="SELECT name, language FROM %s" % output_table_1,
data=[(d['name'], d['language']) for d in _ELEMENTS
if 'language' in d]),
BigqueryFullResultMatcher(
project=self.project,
query="SELECT name, language FROM %s" % output_table_2,
data=[(d['name'], d['language']) for d in _ELEMENTS
if 'language' in d])
]
args = self.test_pipeline.get_full_options_as_args(
on_success_matcher=hc.all_of(*pipeline_verifiers))
with beam.Pipeline(argv=args) as p:
input = p | beam.Create([row for row in _ELEMENTS if 'language' in row])
_ = (
input
| "WriteWithMultipleDests" >> beam.io.gcp.bigquery.WriteToBigQuery(
table=value_provider.StaticValueProvider(
str, '%s:%s' % (self.project, output_table_1)),
schema=value_provider.StaticValueProvider(dict, schema),
additional_bq_parameters=additional_bq_parameters,
method='STREAMING_INSERTS'))
_ = (
input
| "WriteWithMultipleDests2" >> beam.io.gcp.bigquery.WriteToBigQuery(
table=value_provider.StaticValueProvider(
str, '%s:%s' % (self.project, output_table_2)),
schema=beam.io.gcp.bigquery.SCHEMA_AUTODETECT,
additional_bq_parameters=lambda _: additional_bq_parameters,
method='FILE_LOADS'))
@attr('IT')
def test_multiple_destinations_transform(self):
streaming = self.test_pipeline.options.view_as(StandardOptions).streaming
if streaming and isinstance(self.test_pipeline.runner, TestDataflowRunner):
self.skipTest("TestStream is not supported on TestDataflowRunner")
output_table_1 = '%s%s' % (self.output_table, 1)
output_table_2 = '%s%s' % (self.output_table, 2)
full_output_table_1 = '%s:%s' % (self.project, output_table_1)
full_output_table_2 = '%s:%s' % (self.project, output_table_2)
schema1 = {
'fields': [{
'name': 'name', 'type': 'STRING', 'mode': 'NULLABLE'
}, {
'name': 'language', 'type': 'STRING', 'mode': 'NULLABLE'
}]
}
schema2 = {
'fields': [{
'name': 'name', 'type': 'STRING', 'mode': 'NULLABLE'
}, {
'name': 'foundation', 'type': 'STRING', 'mode': 'NULLABLE'
}]
}
bad_record = {'language': 1, 'manguage': 2}
if streaming:
pipeline_verifiers = [
PipelineStateMatcher(PipelineState.RUNNING),
BigqueryFullResultStreamingMatcher(
project=self.project,
query="SELECT name, language FROM %s" % output_table_1,
data=[(d['name'], d['language']) for d in _ELEMENTS
if 'language' in d]),
BigqueryFullResultStreamingMatcher(
project=self.project,
query="SELECT name, foundation FROM %s" % output_table_2,
data=[(d['name'], d['foundation']) for d in _ELEMENTS
if 'foundation' in d])
]
else:
pipeline_verifiers = [
BigqueryFullResultMatcher(
project=self.project,
query="SELECT name, language FROM %s" % output_table_1,
data=[(d['name'], d['language']) for d in _ELEMENTS
if 'language' in d]),
BigqueryFullResultMatcher(
project=self.project,
query="SELECT name, foundation FROM %s" % output_table_2,
data=[(d['name'], d['foundation']) for d in _ELEMENTS
if 'foundation' in d])
]
args = self.test_pipeline.get_full_options_as_args(
on_success_matcher=hc.all_of(*pipeline_verifiers))
with beam.Pipeline(argv=args) as p:
if streaming:
_SIZE = len(_ELEMENTS)
test_stream = (
TestStream().advance_watermark_to(0).add_elements(
_ELEMENTS[:_SIZE // 2]).advance_watermark_to(100).add_elements(
_ELEMENTS[_SIZE // 2:]).advance_watermark_to_infinity())
input = p | test_stream
else:
input = p | beam.Create(_ELEMENTS)
schema_table_pcv = beam.pvalue.AsDict(
p | "MakeSchemas" >> beam.Create([(full_output_table_1, schema1),
(full_output_table_2, schema2)]))
table_record_pcv = beam.pvalue.AsDict(
p | "MakeTables" >> beam.Create([('table1', full_output_table_1),
('table2', full_output_table_2)]))
input2 = p | "Broken record" >> beam.Create([bad_record])
input = (input, input2) | beam.Flatten()
r = (
input
| "WriteWithMultipleDests" >> beam.io.gcp.bigquery.WriteToBigQuery(
table=lambda x,
tables:
(tables['table1'] if 'language' in x else tables['table2']),
table_side_inputs=(table_record_pcv, ),
schema=lambda dest,
table_map: table_map.get(dest, None),
schema_side_inputs=(schema_table_pcv, ),
insert_retry_strategy=RetryStrategy.RETRY_ON_TRANSIENT_ERROR,
method='STREAMING_INSERTS'))
assert_that(
r[beam.io.gcp.bigquery.BigQueryWriteFn.FAILED_ROWS],
equal_to([(full_output_table_1, bad_record)]))
def tearDown(self):
request = bigquery.BigqueryDatasetsDeleteRequest(
projectId=self.project, datasetId=self.dataset_id, deleteContents=True)
try:
_LOGGER.info(
"Deleting dataset %s in project %s", self.dataset_id, self.project)
self.bigquery_client.client.datasets.Delete(request)
except HttpError:
_LOGGER.debug(
'Failed to clean up dataset %s in project %s',
self.dataset_id,
self.project)
@unittest.skipIf(HttpError is None, 'GCP dependencies are not installed')
class PubSubBigQueryIT(unittest.TestCase):
INPUT_TOPIC = 'psit_topic_output'
INPUT_SUB = 'psit_subscription_input'
BIG_QUERY_DATASET_ID = 'python_pubsub_bq_'
SCHEMA = {
'fields': [{
'name': 'number', 'type': 'INTEGER', 'mode': 'NULLABLE'
}]
}
_SIZE = 4
WAIT_UNTIL_FINISH_DURATION = 15 * 60 * 1000
def setUp(self):
# Set up PubSub
self.test_pipeline = TestPipeline(is_integration_test=True)
self.runner_name = type(self.test_pipeline.runner).__name__
self.project = self.test_pipeline.get_option('project')
self.uuid = str(uuid.uuid4())
from google.cloud import pubsub
self.pub_client = pubsub.PublisherClient()
self.input_topic = self.pub_client.create_topic(
self.pub_client.topic_path(self.project, self.INPUT_TOPIC + self.uuid))
self.sub_client = pubsub.SubscriberClient()
self.input_sub = self.sub_client.create_subscription(
self.sub_client.subscription_path(
self.project, self.INPUT_SUB + self.uuid),
self.input_topic.name)
# Set up BQ
self.dataset_ref = utils.create_bq_dataset(
self.project, self.BIG_QUERY_DATASET_ID)
self.output_table = "%s.output_table" % (self.dataset_ref.dataset_id)
def tearDown(self):
# Tear down PubSub
test_utils.cleanup_topics(self.pub_client, [self.input_topic])
test_utils.cleanup_subscriptions(self.sub_client, [self.input_sub])
# Tear down BigQuery
utils.delete_bq_dataset(self.project, self.dataset_ref)
def _run_pubsub_bq_pipeline(self, method, triggering_frequency=None):
l = [i for i in range(self._SIZE)]
matchers = [
PipelineStateMatcher(PipelineState.RUNNING),
BigqueryFullResultStreamingMatcher(
project=self.project,
query="SELECT number FROM %s" % self.output_table,
data=[(i, ) for i in l])
]
args = self.test_pipeline.get_full_options_as_args(
on_success_matcher=hc.all_of(*matchers),
wait_until_finish_duration=self.WAIT_UNTIL_FINISH_DURATION,
streaming=True,
allow_unsafe_triggers=True)
def add_schema_info(element):
yield {'number': element}
messages = [str(i).encode('utf-8') for i in l]
for message in messages:
self.pub_client.publish(self.input_topic.name, message)
with beam.Pipeline(argv=args) as p:
mesages = (
p
| ReadFromPubSub(subscription=self.input_sub.name)
| beam.ParDo(add_schema_info))
_ = mesages | WriteToBigQuery(
self.output_table,
schema=self.SCHEMA,
method=method,
triggering_frequency=triggering_frequency)
@attr('IT')
def test_streaming_inserts(self):
self._run_pubsub_bq_pipeline(WriteToBigQuery.Method.STREAMING_INSERTS)
@attr('IT')
def test_file_loads(self):
self._run_pubsub_bq_pipeline(
WriteToBigQuery.Method.FILE_LOADS, triggering_frequency=20)
class BigQueryFileLoadsIntegrationTests(unittest.TestCase):
BIG_QUERY_DATASET_ID = 'python_bq_file_loads_'
def setUp(self):
self.test_pipeline = TestPipeline(is_integration_test=True)
self.runner_name = type(self.test_pipeline.runner).__name__
self.project = self.test_pipeline.get_option('project')
self.dataset_id = '%s%s%s' % (
self.BIG_QUERY_DATASET_ID,
str(int(time.time())),
random.randint(0, 10000))
self.bigquery_client = bigquery_tools.BigQueryWrapper()
self.bigquery_client.get_or_create_dataset(self.project, self.dataset_id)
self.output_table = '%s.output_table' % (self.dataset_id)
self.table_ref = bigquery_tools.parse_table_reference(self.output_table)
_LOGGER.info(
'Created dataset %s in project %s', self.dataset_id, self.project)
@attr('IT')
def test_avro_file_load(self):
# Construct elements such that they can be written via Avro but not via
# JSON. See BEAM-8841.
from apache_beam.io.gcp import bigquery_file_loads
old_max_files = bigquery_file_loads._MAXIMUM_SOURCE_URIS
old_max_file_size = bigquery_file_loads._DEFAULT_MAX_FILE_SIZE
bigquery_file_loads._MAXIMUM_SOURCE_URIS = 1
bigquery_file_loads._DEFAULT_MAX_FILE_SIZE = 100
elements = [
{
'name': u'Negative infinity',
'value': -float('inf'),
'timestamp': datetime.datetime(1970, 1, 1, tzinfo=pytz.utc),
},
{
'name': u'Not a number',
'value': float('nan'),
'timestamp': datetime.datetime(2930, 12, 9, tzinfo=pytz.utc),
},
]
schema = beam.io.gcp.bigquery.WriteToBigQuery.get_dict_table_schema(
bigquery.TableSchema(
fields=[
bigquery.TableFieldSchema(
name='name', type='STRING', mode='REQUIRED'),
bigquery.TableFieldSchema(
name='value', type='FLOAT', mode='REQUIRED'),
bigquery.TableFieldSchema(
name='timestamp', type='TIMESTAMP', mode='REQUIRED'),
]))
pipeline_verifiers = [
# Some gymnastics here to avoid comparing NaN since NaN is not equal to
# anything, including itself.
BigqueryFullResultMatcher(
project=self.project,
query="SELECT name, value, timestamp FROM {} WHERE value<0".format(
self.output_table),
data=[(d['name'], d['value'], d['timestamp'])
for d in elements[:1]],
),
BigqueryFullResultMatcher(
project=self.project,
query="SELECT name, timestamp FROM {}".format(self.output_table),
data=[(d['name'], d['timestamp']) for d in elements],
),
]
args = self.test_pipeline.get_full_options_as_args(
on_success_matcher=hc.all_of(*pipeline_verifiers),
)
with beam.Pipeline(argv=args) as p:
input = p | 'CreateInput' >> beam.Create(elements)
schema_pc = p | 'CreateSchema' >> beam.Create([schema])
_ = (
input
| 'WriteToBigQuery' >> beam.io.gcp.bigquery.WriteToBigQuery(
table='%s:%s' % (self.project, self.output_table),
schema=lambda _,
schema: schema,
schema_side_inputs=(beam.pvalue.AsSingleton(schema_pc), ),
method='FILE_LOADS',
temp_file_format=bigquery_tools.FileFormat.AVRO,
))
bigquery_file_loads._MAXIMUM_SOURCE_URIS = old_max_files
bigquery_file_loads._DEFAULT_MAX_FILE_SIZE = old_max_file_size
def tearDown(self):
request = bigquery.BigqueryDatasetsDeleteRequest(
projectId=self.project, datasetId=self.dataset_id, deleteContents=True)
try:
_LOGGER.info(
"Deleting dataset %s in project %s", self.dataset_id, self.project)
self.bigquery_client.client.datasets.Delete(request)
except HttpError:
_LOGGER.debug(
'Failed to clean up dataset %s in project %s',
self.dataset_id,
self.project)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
|
|
# MIT License
#
# Copyright (c) 2020 Tri Minh Cao
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Useful functions for DEF/LEF parsers.
Author: Tri Minh Cao
Email: tricao@utdallas.edu
Date: August 2016
"""
SCALE = 2000
import math
def nCr(n,r):
f = math.factorial
return f(n) / f(r) / f(n-r)
def str_to_list(s):
"""
Function to turn a string separated by space into list of words
:param s: input string
:return: a list of words
"""
result = s.split()
# check if the last word is ';' and remove it
#if len(result) >= 1:
# if result[len(result) - 1] == ";":
# result.pop()
return result
def scalePts(pts, alpha):
"""
scale a list of points
:return:
"""
scaled = []
for pt in pts:
scaled_pt = [alpha*pt[0], alpha*pt[1]]
scaled.append(scaled_pt)
return scaled
def rect_to_polygon(rect_pts):
"""
Convert the rect point list into polygon point list (for easy plotting)
:param pts:
:return:
"""
poly_pt = []
pt1 = list(rect_pts[0])
poly_pt.append(pt1)
pt2 = [rect_pts[0][0], rect_pts[1][1]]
poly_pt.append(pt2)
pt3 = list(rect_pts[1])
poly_pt.append(pt3)
pt4 = [rect_pts[1][0], rect_pts[0][1]]
poly_pt.append(pt4)
return poly_pt
def split_parentheses(info):
"""
make all strings inside parentheses a list
:param s: a list of strings (called info)
:return: info list without parentheses
"""
# if we see the "(" sign, then we start adding stuff to a temp list
# in case of ")" sign, we append the temp list to the new_info list
# otherwise, just add the string to the new_info list
new_info = []
make_list = False
current_list = []
for idx in range(len(info)):
if info[idx] == "(":
make_list = True
elif info[idx] == ")":
make_list = False
new_info.append(current_list)
current_list = []
else:
if make_list:
current_list.append(info[idx])
else:
new_info.append(info[idx])
return new_info
def split_plus(line):
"""
Split a line according to the + (plus) sign.
:param line:
:return:
"""
new_line = line.split("+")
return new_line
def split_space(line):
"""
Split a line according to space.
:param line:
:return:
"""
new_line = line.split()
return new_line
def compare_metal(metal_a, metal_b):
"""
Compare metal layers
:param metal_a: the first metal layer description
:param metal_b: the second metal layer description
:return:
"""
if metal_a == "poly":
if metal_b == "poly":
return 0
else:
return -1
else:
if metal_b == "poly":
return 1
else:
metal_a_num = get_metal_num(metal_a)
metal_b_num = get_metal_num(metal_b)
return (metal_a_num - metal_b_num)
def get_metal_num(metal):
"""
Get mental layer number from a string, such as "metal1" or "metal10"
:param metal: string that describes the metal layer
:return: metal number
"""
len_metal = len("metal")
parse_num = ""
for idx in range(len_metal, len(metal)):
parse_num += metal[idx]
return int(parse_num)
def inside_area(location, corners):
"""
Check if the location is inside an area.
:param location: location
:param corners: corner points of the rectangle area.
:return:
"""
x1 = corners[0][0]
x2 = corners[1][0]
y1 = corners[0][1]
y2 = corners[1][1]
return (location[0] > x1 and location[0] < x2
and location[1] > y1 and location[1] < y2)
def relocate_area(left_pt, corners):
"""
Relocate the corners based on the new bottom left point
:param left_pt:
:param corners:
:return:
"""
x = left_pt[0]
y = left_pt[1]
new_corners = []
for each in corners:
new_pt = [each[0] + x, each[1] + y]
new_corners.append(new_pt)
return new_corners
def macro_and_via1(def_info, via_type):
"""
Method to get macros/cells info and via1 information.
:param def_info: information from a DEF file
:param via_type: the name of the via type, such as "via1" or "M2_M1_via"
:return: a macro dictionary that contains via info
"""
result_dict = {}
# add components to the dictionary
for each_comp in def_info.components.comps:
result_dict[each_comp.name] = {}
result_dict[each_comp.name]["MACRO"] = each_comp.macro
# process the nets
for net in def_info.nets.nets:
for route in net.routed:
if route.end_via != None:
# check for the via type of the end_via
if route.end_via[:len(via_type)] == via_type:
via_loc = route.end_via_loc
via_name = route.end_via
via_info = (via_loc, via_name)
# add the via to the component dict
for each_comp in net.comp_pin:
comp_name = each_comp[0]
pin_name = each_comp[1]
if comp_name in result_dict:
if pin_name in result_dict[comp_name]:
result_dict[comp_name][pin_name].append(via_info)
else:
result_dict[comp_name][pin_name] = [via_info]
#print (result_dict)
return result_dict
def predict_score(predicts, actuals):
"""
Find the number of correct cell predictions.
:param predicts: a list of predictions.
:param actuals: a list of actual cells.
:return: # correct predictions, # cells
"""
len_preds = len(predicts)
len_actuals = len(actuals)
shorter_len = min(len_preds, len_actuals)
gap_predict = 0
gap_actual = 0
num_correct = 0
# print (shorter_len)
for i in range(shorter_len):
# print (i)
# print (gap_predict)
# print (gap_actual)
# print ()
if predicts[i + gap_predict] == actuals[i + gap_actual]:
num_correct += 1
else:
if len_preds < len_actuals:
gap_actual += 1
len_preds += 1
elif len_preds > len_actuals:
gap_predict += 1
len_actuals += 1
return num_correct, len(actuals)
def get_all_vias(def_info, via_type):
"""
method to get all vias of the via_type and put them in a list
:param def_info: DEF data
:param via_type: via type
:return: a list of all vias
"""
vias = []
# process the nets
for net in def_info.nets.nets:
for route in net.routed:
if route.end_via != None:
# check for the via type of the end_via
if route.end_via[:len(via_type)] == via_type:
via_loc = route.end_via_loc
via_name = route.end_via
default_via_type = -1 # 0 = input, 1 = output
via_info = [via_loc, via_name, net.name, default_via_type]
# add a via to the vias list
vias.append(via_info)
#print (result_dict)
return vias
def sort_vias_by_row(layout_area, row_height, vias):
"""
Sort the vias by row
:param layout_area: a list [x, y] that stores the area of the layout
:param vias: a list of vias that need to be sorted
:return: a list of rows, each containing a list of vias in that row.
"""
num_rows = layout_area[1] // row_height + 1
rows = []
for i in range(num_rows):
rows.append([])
for via in vias:
via_y = via[0][1]
row_dest = via_y // row_height
rows[row_dest].append(via)
# sort vias in each row based on x-coordinate
for each_row in rows:
each_row.sort(key = lambda x: x[0][0])
return rows
def group_via(via_list, max_number, max_distance):
"""
Method to group the vias together to check if they belong to a cell.
:param via_list: a list of all vias.
:return: a list of groups of vias.
"""
groups = []
length = len(via_list)
for i in range(length):
# one_group = [via_list[i]]
curr_via = via_list[i]
curr_list = []
for j in range(2, max_number + 1):
if i + j - 1 < length:
right_via = via_list[i + j - 1]
dist = right_via[0][0] - curr_via[0][0]
if dist < max_distance:
curr_list.append(via_list[i:i+j])
# only add via group list that is not empty
if len(curr_list) > 0:
groups.append(curr_list)
return groups
def sorted_components(layout_area, row_height, comps):
"""
Sort the components by row
:param layout_area: a list [x, y] that stores the area of the layout
:param comps: a list of components that need to be sorted
:return: a list of rows, each containing a list of components in that row.
"""
num_rows = layout_area[1] // row_height + 1
rows = []
for i in range(num_rows):
rows.append([])
for comp in comps:
comp_y = comp.placed[1]
row_dest = comp_y // row_height
rows[row_dest].append(comp)
# sort vias in each row based on x-coordinate
for each_row in rows:
each_row.sort(key = lambda x: x.placed[0])
return rows
|
|
#!/usr/bin/env python3
#
# Copyright (c) 2018-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Download or build previous releases.
# Needs curl and tar to download a release, or the build dependencies when
# building a release.
import argparse
import contextlib
from fnmatch import fnmatch
import os
from pathlib import Path
import re
import shutil
import subprocess
import sys
import hashlib
SHA256_SUMS = {
"d40f18b4e43c6e6370ef7db9131f584fbb137276ec2e3dba67a4b267f81cb644": "bitcoin-0.15.2-aarch64-linux-gnu.tar.gz",
"54fb877a148a6ad189a1e1ab1ff8b11181e58ff2aaf430da55b3fd46ae549a6b": "bitcoin-0.15.2-arm-linux-gnueabihf.tar.gz",
"87e9340ff3d382d543b2b69112376077f0c8b4f7450d372e83b68f5a1e22b2df": "bitcoin-0.15.2-osx64.tar.gz",
"566be44190fd76daa01f13d428939dadfb8e3daacefc8fa17f433cad28f73bd5": "bitcoin-0.15.2-x86_64-linux-gnu.tar.gz",
#
"0768c6c15caffbaca6524824c9563b42c24f70633c681c2744649158aa3fd484": "bitcoin-0.16.3-aarch64-linux-gnu.tar.gz",
"fb2818069854a6ad20ea03b28b55dbd35d8b1f7d453e90b83eace5d0098a2a87": "bitcoin-0.16.3-arm-linux-gnueabihf.tar.gz",
"78c3bff3b619a19aed575961ea43cc9e142959218835cf51aede7f0b764fc25d": "bitcoin-0.16.3-osx64.tar.gz",
"5d422a9d544742bc0df12427383f9c2517433ce7b58cf672b9a9b17c2ef51e4f": "bitcoin-0.16.3-x86_64-linux-gnu.tar.gz",
#
"5a6b35d1a348a402f2d2d6ab5aed653a1a1f13bc63aaaf51605e3501b0733b7a": "bitcoin-0.17.2-aarch64-linux-gnu.tar.gz",
"d1913a5d19c8e8da4a67d1bd5205d03c8614dfd2e02bba2fe3087476643a729e": "bitcoin-0.17.2-arm-linux-gnueabihf.tar.gz",
"a783ba20706dbfd5b47fbedf42165fce70fbbc7d78003305d964f6b3da14887f": "bitcoin-0.17.2-osx64.tar.gz",
"943f9362b9f11130177839116f48f809d83478b4c28591d486ee9a7e35179da6": "bitcoin-0.17.2-x86_64-linux-gnu.tar.gz",
#
"88f343af72803b851c7da13874cc5525026b0b55e63e1b5e1298390c4688adc6": "bitcoin-0.18.1-aarch64-linux-gnu.tar.gz",
"cc7d483e4b20c5dabd4dcaf304965214cf4934bcc029ca99cbc9af00d3771a1f": "bitcoin-0.18.1-arm-linux-gnueabihf.tar.gz",
"b7bbcee7a7540f711b171d6981f939ca8482005fde22689bc016596d80548bb1": "bitcoin-0.18.1-osx64.tar.gz",
"425ee5ec631ae8da71ebc1c3f5c0269c627cf459379b9b030f047107a28e3ef8": "bitcoin-0.18.1-riscv64-linux-gnu.tar.gz",
"600d1db5e751fa85903e935a01a74f5cc57e1e7473c15fd3e17ed21e202cfe5a": "bitcoin-0.18.1-x86_64-linux-gnu.tar.gz",
#
"3a80431717842672df682bdb619e66523b59541483297772a7969413be3502ff": "bitcoin-0.19.1-aarch64-linux-gnu.tar.gz",
"657f28213823d240dd3324d14829702f9ad6f0710f8bdd1c379cb3c447197f48": "bitcoin-0.19.1-arm-linux-gnueabihf.tar.gz",
"1ae1b87de26487075cd2fd22e0d4ead87d969bd55c44f2f1d873ecdc6147ebb3": "bitcoin-0.19.1-osx64.tar.gz",
"aa7a9563b48aa79252c8e7b6a41c07a5441bd9f14c5e4562cc72720ea6cb0ee5": "bitcoin-0.19.1-riscv64-linux-gnu.tar.gz",
"5fcac9416e486d4960e1a946145566350ca670f9aaba99de6542080851122e4c": "bitcoin-0.19.1-x86_64-linux-gnu.tar.gz",
#
"60c93e3462c303eb080be7cf623f1a7684b37fd47a018ad3848bc23e13c84e1c": "bitcoin-0.20.1-aarch64-linux-gnu.tar.gz",
"55b577e0fb306fb429d4be6c9316607753e8543e5946b542d75d876a2f08654c": "bitcoin-0.20.1-arm-linux-gnueabihf.tar.gz",
"b9024dde373ea7dad707363e07ec7e265383204127539ae0c234bff3a61da0d1": "bitcoin-0.20.1-osx64.tar.gz",
"fa71cb52ee5e0459cbf5248cdec72df27995840c796f58b304607a1ed4c165af": "bitcoin-0.20.1-riscv64-linux-gnu.tar.gz",
"376194f06596ecfa40331167c39bc70c355f960280bd2a645fdbf18f66527397": "bitcoin-0.20.1-x86_64-linux-gnu.tar.gz",
"43416854330914992bbba2d0e9adf2a6fff4130be9af8ae2ef1186e743d9a3fe": "bitcoin-0.21.0-aarch64-linux-gnu.tar.gz",
"f028af308eda45a3c4c90f9332f96b075bf21e3495c945ebce48597151808176": "bitcoin-0.21.0-arm-linux-gnueabihf.tar.gz",
"695fb624fa6423f5da4f443b60763dd1d77488bfe5ef63760904a7b54e91298d": "bitcoin-0.21.0-osx64.tar.gz",
"f8b2adfeae021a672effbc7bd40d5c48d6b94e53b2dd660f787340bf1a52e4e9": "bitcoin-0.21.0-riscv64-linux-gnu.tar.gz",
"da7766775e3f9c98d7a9145429f2be8297c2672fe5b118fd3dc2411fb48e0032": "bitcoin-0.21.0-x86_64-linux-gnu.tar.gz",
"ac718fed08570a81b3587587872ad85a25173afa5f9fbbd0c03ba4d1714cfa3e": "bitcoin-22.0-aarch64-linux-gnu.tar.gz",
"b8713c6c5f03f5258b54e9f436e2ed6d85449aa24c2c9972f91963d413e86311": "bitcoin-22.0-arm-linux-gnueabihf.tar.gz",
"2744d199c3343b2d94faffdfb2c94d75a630ba27301a70e47b0ad30a7e0155e9": "bitcoin-22.0-osx64.tar.gz",
"2cca5f99007d060aca9d8c7cbd035dfe2f040dd8200b210ce32cdf858479f70d": "bitcoin-22.0-powerpc64-linux-gnu.tar.gz",
"91b1e012975c5a363b5b5fcc81b5b7495e86ff703ec8262d4b9afcfec633c30d": "bitcoin-22.0-powerpc64le-linux-gnu.tar.gz",
"9cc3a62c469fe57e11485fdd32c916f10ce7a2899299855a2e479256ff49ff3c": "bitcoin-22.0-riscv64-linux-gnu.tar.gz",
"59ebd25dd82a51638b7a6bb914586201e67db67b919b2a1ff08925a7936d1b16": "bitcoin-22.0-x86_64-linux-gnu.tar.gz",
# Particl
"fc649cb46d9f4ea4919bb87be8b685474f95f89ae82996dd1e36f2089b69f90d": "particl-0.18.1.7-aarch64-linux-gnu.tar.gz",
"779e57c7e4d680736f972de07276a1037de6c2aa8a2c95c8087c43c56927dc60": "particl-0.18.1.7-arm-linux-gnueabihf.tar.gz",
"d5a2ac8dac2b3d262a1684c21b444890837ad51f2b93e2372f54fc51d7fccbcd": "particl-0.18.1.7-i686-pc-linux-gnu.tar.gz",
"f85b7ee98dab3fbccc7a2de0d560c6861df05d7fbb11664f1da1a5f24d4dc58a": "particl-0.18.1.7-riscv64-linux-gnu.tar.gz",
"e758db39812dd2edf2c4aec215dfce4802c37e3a881d81233d24afff9d61af32": "particl-0.18.1.7-x86_64-linux-gnu.tar.gz",
"44c9f60a1f5fc8377cc1cef278a99b401a67ef0ff64429ce1aa21ca71bf73c04": "particl-0.18.1.7-osx64.tar.gz",
"6b240634cbf589cfc26266bdb73a546bb35449b23fdb5ae052005f9ae9335b1e": "particl-0.19.2.19-aarch64-linux-gnu.tar.gz",
"34daa533252016194762695c05f5e3e44813a92b25a85846f908869ec5630816": "particl-0.19.2.19-arm-linux-gnueabihf.tar.gz",
"b776e16d32674e98fb8ace0c731f59524ebc209ece4088c97486fd52b06cabfb": "particl-0.19.2.19-i686-pc-linux-gnu.tar.gz",
"66d131968f4b81cb7647492e4e6158f2a09acdd4b7c19d14f6495d3da33af4cc": "particl-0.19.2.19-riscv64-linux-gnu.tar.gz",
"853421893c0cfc6d6baf204bc15b5dfb2ba61be08b87868e86b76891f7d85bbc": "particl-0.19.2.19-x86_64-linux-gnu.tar.gz",
"f5670b245048eb6aebe787589b6f7bc9167a9741cd8ab3e4bdcbda7134808b43": "particl-0.19.2.19-osx64.tar.gz",
"074ea147fa3535f29c6cec34d57d491d6a6431fa01a8bbd7df53a10523b661b3": "particl-0.21.2.7-aarch64-linux-gnu.tar.gz",
"2f153b59fd37ede6d274c48fd4c82b5cd8f7065132231936a461ee135fa133a1": "particl-0.21.2.7-arm-linux-gnueabihf.tar.gz",
"f2dba69aed3902c8c958b1c2ff64c1ecb8ca6d79a9ef983d72e732b28e0af2e5": "particl-0.21.2.7-osx64.tar.gz",
"bdcd3c520f196d27a0d2fbfeb22f51b8e01be83556d764e15d836cca3b90a20c": "particl-0.21.2.7-riscv64-linux-gnu.tar.gz",
"973e3594820e74d6ce0d11d6090a5395d81d7018a56d2c074933ec73f98e25e9": "particl-0.21.2.7-x86_64-linux-gnu.tar.gz",
}
@contextlib.contextmanager
def pushd(new_dir) -> None:
previous_dir = os.getcwd()
os.chdir(new_dir)
try:
yield
finally:
os.chdir(previous_dir)
def download_binary(tag, args) -> int:
if Path(tag).is_dir():
if not args.remove_dir:
print('Using cached {}'.format(tag))
return 0
shutil.rmtree(tag)
Path(tag).mkdir()
bin_path = 'bin/particl-core-{}'.format(tag[1:])
match = re.compile('v(.*)(rc[0-9]+)$').search(tag)
if match:
bin_path = 'bin/particl-core-{}/test.{}'.format(
match.group(1), match.group(2))
tarball = 'particl-{tag}-{platform}.tar.gz'.format(
tag=tag[1:], platform=args.platform)
#tarballUrl = 'https://bitcoincore.org/{bin_path}/{tarball}'.format(
# bin_path=bin_path, tarball=tarball)
tarballUrl = 'https://github.com/particl/particl-core/releases/download/v{tag}/{tarball}'.format(
tag=tag[1:], tarball=tarball)
print('Fetching: {tarballUrl}'.format(tarballUrl=tarballUrl))
header, status = subprocess.Popen(
['curl', '--head', tarballUrl], stdout=subprocess.PIPE).communicate()
if re.search("404 Not Found", header.decode("utf-8")):
print("Binary tag was not found")
return 1
curlCmds = [
['curl', '-L', '--remote-name', tarballUrl]
]
for cmd in curlCmds:
ret = subprocess.run(cmd).returncode
if ret:
return ret
hasher = hashlib.sha256()
with open(tarball, "rb") as afile:
hasher.update(afile.read())
tarballHash = hasher.hexdigest()
if tarballHash not in SHA256_SUMS or SHA256_SUMS[tarballHash] != tarball:
if tarball in SHA256_SUMS.values():
print("Checksum did not match")
return 1
print("Checksum for given version doesn't exist")
return 1
print("Checksum matched")
# Extract tarball
ret = subprocess.run(['tar', '-zxf', tarball, '-C', tag,
'--strip-components=1',
'particl-{tag}'.format(tag=tag[1:])]).returncode
if ret:
return ret
Path(tarball).unlink()
return 0
def build_release(tag, args) -> int:
githubUrl = "https://github.com/particl/particl-core"
if args.remove_dir:
if Path(tag).is_dir():
shutil.rmtree(tag)
if not Path(tag).is_dir():
# fetch new tags
subprocess.run(
["git", "fetch", githubUrl, "--tags"])
output = subprocess.check_output(['git', 'tag', '-l', tag])
if not output:
print('Tag {} not found'.format(tag))
return 1
ret = subprocess.run([
'git', 'clone', githubUrl, tag
]).returncode
if ret:
return ret
with pushd(tag):
ret = subprocess.run(['git', 'checkout', tag]).returncode
if ret:
return ret
host = args.host
if args.depends:
with pushd('depends'):
ret = subprocess.run(['make', 'NO_QT=1']).returncode
if ret:
return ret
host = os.environ.get(
'HOST', subprocess.check_output(['./config.guess']))
config_flags = '--prefix={pwd}/depends/{host} '.format(
pwd=os.getcwd(),
host=host) + args.config_flags
cmds = [
'./autogen.sh',
'./configure {}'.format(config_flags),
'make',
]
for cmd in cmds:
ret = subprocess.run(cmd.split()).returncode
if ret:
return ret
# Move binaries, so they're in the same place as in the
# release download
Path('bin').mkdir(exist_ok=True)
files = ['particld', 'particl-cli', 'particl-tx']
for f in files:
Path('src/'+f).rename('bin/'+f)
return 0
def check_host(args) -> int:
args.host = os.environ.get('HOST', subprocess.check_output(
'./depends/config.guess').decode())
if args.download_binary:
platforms = {
'aarch64-*-linux*': 'aarch64-linux-gnu',
'x86_64-*-linux*': 'x86_64-linux-gnu',
'x86_64-apple-darwin*': 'osx64',
'aarch64-apple-darwin*': 'osx64',
}
args.platform = ''
for pattern, target in platforms.items():
if fnmatch(args.host, pattern):
args.platform = target
if not args.platform:
print('Not sure which binary to download for {}'.format(args.host))
return 1
return 0
def main(args) -> int:
Path(args.target_dir).mkdir(exist_ok=True, parents=True)
print("Releases directory: {}".format(args.target_dir))
ret = check_host(args)
if ret:
return ret
if args.download_binary:
with pushd(args.target_dir):
for tag in args.tags:
ret = download_binary(tag, args)
if ret:
return ret
return 0
args.config_flags = os.environ.get('CONFIG_FLAGS', '')
args.config_flags += ' --without-gui --disable-tests --disable-bench'
with pushd(args.target_dir):
for tag in args.tags:
ret = build_release(tag, args)
if ret:
return ret
return 0
if __name__ == '__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-r', '--remove-dir', action='store_true',
help='remove existing directory.')
parser.add_argument('-d', '--depends', action='store_true',
help='use depends.')
parser.add_argument('-b', '--download-binary', action='store_true',
help='download release binary.')
parser.add_argument('-t', '--target-dir', action='store',
help='target directory.', default='releases')
parser.add_argument('tags', nargs='+',
help="release tags. e.g.: v0.18.1 v0.20.0rc2")
args = parser.parse_args()
sys.exit(main(args))
|
|
import time
import numpy as np
import tensorflow as tf
import utils
from urllib.request import urlretrieve
from os.path import isfile, isdir
from tqdm import tqdm
import zipfile
dataset_folder_path = 'data'
dataset_filename = 'text8.zip'
dataset_name = 'Text8 Dataset'
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile(dataset_filename):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc=dataset_name) as pbar:
urlretrieve(
'http://mattmahoney.net/dc/text8.zip',
dataset_filename,
pbar.hook)
if not isdir(dataset_folder_path):
with zipfile.ZipFile(dataset_filename) as zip_ref:
zip_ref.extractall(dataset_folder_path)
with open('data/text8') as f:
text = f.read()
################################################3
# preprocessing
words = utils.preprocess(text)
print(words[:30])
print("Total words: {}".format(len(words)))
print("Unique words: {}".format(len(set(words))))
vocab_to_int, int_to_vocab = utils.create_lookup_tables(words)
int_words = [vocab_to_int[word] for word in words]
#############################################################
# subsampling
## Your code here
th=1e-5
# my implementation - very inefficient !!
# def freq(word,corpus):
# return corpus.count(word)/len(corpus)
#
# def prob(word,freqs,th):
# return 1-np.sqrt(th/freqs[word])
#
# freqs = {word:freq(word,int_words) for word in int_words}
# p_drop = {word: prob(word,freqs,th) for word in int_words}
# train_words = {w for w in int_words if p_drop[w]>np.random.rand()}
# reference implementation
from collections import Counter
import random
word_counts=Counter(int_words) # dictionary like with k:v=int_words:count
total_count = len(int_words)
freqs={word: count/total_count for word,count in word_counts.items()}
p_drop={word: 1-np.sqrt(th/freqs[word]) for word in word_counts}
train_words = [word for word in int_words if p_drop[word]<random.random()]
##############################################################
# Making Batches
def get_target(words, idx, window_size=5):
''' Get a list of words in a window around an index. '''
# Your code here
# get random number in the range (1,window_size) - this will be the number of words we'll take
R=random.randint(1,window_size)
# what about warping arond ? do we want to allow it ?
start = max(idx-R,0)
stop = min(idx+R+1,len(words))
return words[start:idx]+words[idx+1:stop]
# note that the reference solution used np.random.randint
# note that the reference solution returned list(set(words[start:idx]+words[idx+1:stop])). not clear why the set() is needed...
def get_batches(words, batch_size, window_size=5):
''' Create a generator of word batches as a tuple (inputs, targets) '''
n_batches = len(words) // batch_size
# only full batches
words = words[:n_batches * batch_size]
for idx in range(0, len(words), batch_size):
x, y = [], []
batch = words[idx:idx + batch_size]
for ii in range(len(batch)):
batch_x = batch[ii]
batch_y = get_target(batch, ii, window_size)
y.extend(batch_y)
x.extend([batch_x] * len(batch_y))
yield x, y
# note that we created a generator (because of the yield) . probably a recommended pattern for getting batches
# or any generator that has internal stage.
###############################################################################
# Building the graph
train_graph = tf.Graph()
with train_graph.as_default():
inputs = tf.placeholder(dtype=tf.int32,shape=[None], name='inputs')
labels = tf.placeholder(dtype=tf.int32,shape=[None,None],name='labels') # ??? To make things work later, you'll need to set the second dimension of labels to None or 1.
# Embedding
n_vocab = len(int_to_vocab)
n_embedding = 200
with train_graph.as_default():
embedding = tf.Variable(tf.random_uniform((n_vocab,n_embedding),-1,1))
embed = tf.nn.embedding_lookup(embedding,inputs)
################################################################################
# Negative sampling
# Number of negative labels to sample
n_sampled = 100
with train_graph.as_default():
softmax_w = tf.Variable(tf.truncated_normal((n_vocab,n_embedding),stddev=0.1))
softmax_b = tf.Variable(tf.zeros(n_vocab))
# Calculate the loss using negative sampling
loss = tf.nn.sampled_softmax_loss(softmax_w,softmax_b,labels,embed,n_sampled,n_vocab)
cost = tf.reduce_mean(loss)
optimizer = tf.train.AdamOptimizer().minimize(cost)
with train_graph.as_default():
## From Thushan Ganegedara's implementation
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100
# pick 8 samples from (0,100) and (1000,1100) each ranges. lower id implies more frequent
valid_examples = np.array(random.sample(range(valid_window), valid_size // 2))
valid_examples = np.append(valid_examples,
random.sample(range(1000, 1000 + valid_window), valid_size // 2))
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# We use the cosine distance:
norm = tf.sqrt(tf.reduce_sum(tf.square(embedding), 1, keep_dims=True))
normalized_embedding = embedding / norm
valid_embedding = tf.nn.embedding_lookup(normalized_embedding, valid_dataset)
similarity = tf.matmul(valid_embedding, tf.transpose(normalized_embedding))
# If the checkpoints directory doesn't exist:
!mkdir checkpoints
################################################################################################
# Training
epochs = 10
batch_size = 1000
window_size = 10
with train_graph.as_default():
saver = tf.train.Saver()
with tf.Session(graph=train_graph) as sess:
iteration = 1
loss = 0
sess.run(tf.global_variables_initializer())
for e in range(1, epochs + 1):
batches = get_batches(train_words, batch_size, window_size)
start = time.time()
for x, y in batches:
feed = {inputs: x,
labels: np.array(y)[:, None]}
train_loss, _ = sess.run([cost, optimizer], feed_dict=feed)
loss += train_loss
if iteration % 100 == 0:
end = time.time()
print("Epoch {}/{}".format(e, epochs),
"Iteration: {}".format(iteration),
"Avg. Training loss: {:.4f}".format(loss / 100),
"{:.4f} sec/batch".format((end - start) / 100))
loss = 0
start = time.time()
if iteration % 1000 == 0:
## From Thushan Ganegedara's implementation
# note that this is expensive (~20% slowdown if computed every 500 steps)
sim = similarity.eval()
for i in range(valid_size):
valid_word = int_to_vocab[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log = 'Nearest to %s:' % valid_word
for k in range(top_k):
close_word = int_to_vocab[nearest[k]]
log = '%s %s,' % (log, close_word)
print(log)
iteration += 1
save_path = saver.save(sess, "checkpoints/text8.ckpt")
embed_mat = sess.run(normalized_embedding)
###############################################################################################
# save checkpoint to be restored later
with train_graph.as_default():
saver = tf.train.Saver()
with tf.Session(graph=train_graph) as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
embed_mat = sess.run(embedding)
#################################################################################################
# Visualization
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
viz_words = 500
tsne = TSNE()
embed_tsne = tsne.fit_transform(embed_mat[:viz_words, :])
fig, ax = plt.subplots(figsize=(14, 14))
for idx in range(viz_words):
plt.scatter(*embed_tsne[idx, :], color='steelblue')
plt.annotate(int_to_vocab[idx], (embed_tsne[idx, 0], embed_tsne[idx, 1]), alpha=0.7)
|
|
"""
This file contains the class definition for the sampler MCMCSample classes.
"""
__author__ = 'Brandon C. Kelly'
import numpy as np
from matplotlib import pyplot as plt
import scipy.stats
import acor
class MCMCSample(object):
"""
Class for parameter samples generated by a yamcmc++ sampler. This class contains a dictionary of samples
generated by an MCMC sampler for a set of parameters, as well as methods for plotting and summarizing the results.
In general, the MCMCSample object is empty upon instantiation. One adds parameters to the dictionary through the
AddStep method of a Sampler object. Running a Sampler object then fills the dictionary up with the parameter values.
After running a Sampler object, the MCMCSample object will contain the parameter values, which can then be analyzed
further.
Alternatively, one can load the parameters and their values from a file. This is done through the method
generate_from_file. This is helpful if one has a set of MCMC samples generated by a different program.
"""
def __init__(self, filename=None, logpost=None, trace=None):
"""
Constructor for an MCMCSample object. If no arguments are supplied, then this just creates an empty dictionary
that will contain the MCMC samples. In this case parameters are added to the dictionary through the addstep
method of a Sampler object, and the values are generated by running the Sampler object. Otherwise, if a
filename is supplied then the parameter names and MCMC samples are read in from that file.
:param filename: A string giving the name of an asciifile containing the MCMC samples.
"""
self._samples = dict() # Empty dictionary. We will place the samples for each tracked parameter here.
if logpost is not None:
self.set_logpost(logpost)
if trace is not None:
self.generate_from_trace(trace)
elif filename is not None:
# Construct MCMCSample object by reading in MCMC samples from one or more asciifiles.
self.generate_from_file([filename])
def get_samples(self, name):
"""
Returns a copy of the numpy array containing the samples for a parameter. This is safer then directly
accessing the dictionary object containing the samples to prevent one from inadvertently changes the values of
the samples output from an MCMC sampler.
:param name: The name of the parameter for which the samples are desired.
"""
return self._samples[name].copy()
def generate_from_file(self, filename):
"""
Build the dictionary of parameter samples from an ascii file of MCMC samples. The first line of this file
should contain the parameter names.
:param filename: The name of the file containing the MCMC samples.
"""
# TODO: put in exceptions to make sure files are ready correctly
for fname in filename:
file = open(fname, 'r')
name = file.readline()
# Grab the MCMC output
trace = np.genfromtxt(fname, skip_header=1)
if name not in self._samples:
# Parameter is not already in the dictionary, so add it. Otherwise do nothing.
self._samples[name] = trace
def autocorr_timescale(self, trace):
"""
Compute the autocorrelation time scale as estimated by the `acor` module.
:param trace: The parameter trace, a numpy array.
"""
acors = []
for i in range(trace.shape[1]):
tau, mean, sigma = acor.acor(trace[:, i].real) # Warning, does not work with numpy.complex
acors.append(tau)
return np.array(acors)
def effective_samples(self, name):
"""
Return the effective number of independent samples of the MCMC sampler.
:param name: The name of the parameter to compute the effective number of independent samples for.
"""
if not self._samples.has_key(name):
print "WARNING: sampler does not have", name
return
else:
print "Calculating effective number of samples"
traces = self._samples[name] # Get the sampled parameter values
npts = traces.shape[0]
timescale = self.autocorr_timescale(traces)
return npts / timescale
def plot_trace(self, name, doShow=False):
"""
Plot the trace of the values, a time series showing the evolution of the parameter values for the MCMC sampler.
Only a single parameter element trace is shown per plot, and all plots are shown on the same plotting window. In
particular, if a parameter is array-valued, then the traces for each element of its array are plotted on a
separate subplot.
:param name: The parameter name.
:param doShow: If true, then show the plot.
"""
if not self._samples.has_key(name):
print "WARNING: sampler does not have", name
return
else:
print "Plotting Trace"
fig = plt.figure()
traces = self._samples[name] # Get the sampled parameter values
ntrace = traces.shape[1]
spN = plt.subplot(ntrace, 1, ntrace)
spN.plot(traces[:,-1], ".", markersize=2)
spN.set_xlabel("Step")
spN.set_ylabel("par %d" % (ntrace-1))
for i in range(ntrace-1):
sp = plt.subplot(ntrace, 1, i+1, sharex=spN)
sp.plot(traces[:,i], ".", markersize=2)
sp.set_ylabel("par %d" % (i))
plt.setp(sp.get_xticklabels(), visible=False)
plt.suptitle(name)
if doShow:
plt.show()
def plot_1dpdf(self, name, doShow=False):
"""
Plot histograms of the parameter values generated by the MCMC sampler. If the parameter is array valued then
histograms of all of the parameter's elements will be plotted.
:param name: The parameter name.
:param doShow: If true, then show the plot.
"""
if not self._samples.has_key(name):
print "WARNING: sampler does not have", name
return
else:
print "Plotting 1d PDF"
fig = plt.figure()
traces = self._samples[name] # Get the sampled parameter values
ntrace = traces.shape[1]
for i in range(ntrace):
sp = plt.subplot(ntrace, 1, i+1)
sp.hist(traces[:,i], bins=50, normed=True)
sp.set_ylabel("par %d" % (i))
if i == ntrace-1:
sp.set_xlabel("val")
plt.suptitle(name)
if doShow:
plt.show()
def plot_2dpdf(self, name1, name2, pindex1=0, pindex2=0, doShow=False):
"""
Plot joint distribution of the parameter values generated by the MCMC sampler.
:param name1: The parameter name along x-axis
:param name2: The parameter name along y-axis
:param pindex1: Which element of the array to plot
:param pindex2: Which element of the array to plot
:param doShow: Call plt.show()
"""
if (not self._samples.has_key(name1)) or (not self._samples.has_key(name2)) :
print "WARNING: sampler does not have", name1, name2
return
if pindex1 >= self._samples[name1].shape[1]:
print "WARNING: not enough data in", name1
return
if pindex2 >= self._samples[name2].shape[1]:
print "WARNING: not enough data in", name2
return
print "Plotting 2d PDF"
fig = plt.figure()
trace1 = self._samples[name1][:,pindex1]
trace2 = self._samples[name2][:,pindex2]
# joint distribution
axJ = fig.add_axes([0.1, 0.1, 0.7, 0.7]) # [left, bottom, width, height]
# y histogram
axY = fig.add_axes([0.8, 0.1, 0.125, 0.7], sharey=axJ)
# x histogram
axX = fig.add_axes([0.1, 0.8, 0.7, 0.125], sharex=axJ)
axJ.plot(trace1, trace2, 'ro', ms=1, alpha=0.5)
axX.hist(trace1, bins=100)
axY.hist(trace2, orientation='horizontal', bins=100)
axJ.set_xlabel("%s %d" % (name1, pindex1))
axJ.set_ylabel("%s %d" % (name2, pindex2))
plt.setp(axX.get_xticklabels()+axX.get_yticklabels(), visible=False)
plt.setp(axY.get_xticklabels()+axY.get_yticklabels(), visible=False)
if doShow:
plt.show()
def plot_2dkde(self, name1, name2, pindex1=0, pindex2=0,
nbins=100, doPlotStragglers=True, doShow=False):
"""
Plot joint distribution of the parameter values generated by the MCMC sampler using a kernel density estimate.
:param name1: The parameter name along x-axis
:param name2: The parameter name along y-axis
:param pindex1: Which element of the array to plot
:param pindex2: Which element of the array to plot
:param doShow: Call plt.show()
:param nbins: Number of bins along each axis for KDE
:param doPlotStragglers: Plot individual data points outside KDE contours. Works poorly for small samples.
"""
if (not self._samples.has_key(name1)) or (not self._samples.has_key(name2)) :
print "WARNING: sampler does not have", name1, name2
return
if pindex1 >= self._samples[name1].shape[1]:
print "WARNING: not enough data in", name1
return
if pindex2 >= self._samples[name2].shape[1]:
print "WARNING: not enough data in", name2
return
print "Plotting 2d PDF w KDE"
fig = plt.figure()
trace1 = self._samples[name1][:,pindex1].real # JIC we get something imaginary?
trace2 = self._samples[name2][:,pindex2].real
npts = trace1.shape[0]
kde = scipy.stats.gaussian_kde((trace1, trace2))
bins1 = np.linspace(trace1.min(), trace1.max(), nbins)
bins2 = np.linspace(trace2.min(), trace2.max(), nbins)
mesh1, mesh2 = np.meshgrid(bins1, bins2)
hist = kde([mesh1.ravel(), mesh2.ravel()]).reshape(mesh1.shape)
clevels = []
for frac in [0.9973, 0.9545, 0.6827]:
hfrac = lambda level, hist=hist, frac=frac: hist[hist>=level].sum()/hist.sum() - frac
level = scipy.optimize.bisect(hfrac, hist.min(), hist.max())
clevels.append(level)
# joint distribution
axJ = fig.add_axes([0.1, 0.1, 0.7, 0.7]) # [left, bottom, width, height]
# y histogram
axY = fig.add_axes([0.8, 0.1, 0.125, 0.7], sharey=axJ)
# x histogram
axX = fig.add_axes([0.1, 0.8, 0.7, 0.125], sharex=axJ)
cont = axJ.contour(mesh1, mesh2, hist, clevels, linestyles="solid", cmap=plt.cm.jet)
axX.hist(trace1, bins=100)
axY.hist(trace2, orientation='horizontal', bins=100)
axJ.set_xlabel(name1 + '[' + str(pindex1) + ']')
axJ.set_ylabel(name2 + '[' + str(pindex2) + ']')
plt.setp(axX.get_xticklabels()+axX.get_yticklabels(), visible=False)
plt.setp(axY.get_xticklabels()+axY.get_yticklabels(), visible=False)
# Note to self: you need to set up the contours above to have
# the outer one first, for collections[0] to work below.
#
# Also a note: this does not work if the outer contour is not
# fully connected.
if doPlotStragglers:
outer = cont.collections[0]._paths
sx = []
sy = []
for i in range(npts):
found = [o.contains_point((trace1[i], trace2[i])) for o in outer]
if not (True in found):
sx.append(trace1[i])
sy.append(trace2[i])
axJ.plot(sx, sy, 'k.', ms = 1, alpha = 0.1)
if doShow:
plt.show()
def plot_autocorr(self, name, acorrFac = 10.0, doShow=False):
"""
Plot the autocorrelation functions of the traces for a parameter. If the parameter is array-value then
autocorrelation plots for each of the parameter's elements will be plotted.
:param name: The parameter name.
:param acorrFac: The maximum number of lags to plot, in terms of the autocorrelation time scale of the MCMC
samples. The default is 10 autocorrelation time scales.
:param doShow:
"""
if not self._samples.has_key(name):
print "WARNING: sampler does not have", name
return
else:
print "Plotting autocorrelation function (this make take a while)"
fig = plt.figure()
traces = self._samples[name] # Get the sampled parameter values
mtrace = np.mean(traces, axis=0)
ntrace = traces.shape[1]
acorr = self.autocorr_timescale(traces)
for i in range(ntrace):
sp = plt.subplot(ntrace, 1, i+1)
lags, acf, not_needed1, not_needed2 = plt.acorr(traces[:, i] - mtrace[i], maxlags=traces.shape[0]-1, lw=2)
sp.set_xlim(-0.5, acorrFac * acorr[i])
sp.set_ylim(-0.01, 1.01)
sp.axhline(y=0.5, c='k', linestyle='--')
sp.axvline(x=acorr[i], c='r', linestyle='--')
sp.set_ylabel("par %d autocorr" % (i))
if i == ntrace-1:
sp.set_xlabel("lag")
plt.suptitle(name)
if doShow:
plt.show()
def plot_parameter(self, name, pindex=0, doShow=False):
"""
Simultaneously plots the trace, histogram, and autocorrelation of this parameter's values. If the parameter
is array-valued, then the user must specify the index of the array to plot, as these are all 1-d plots on a
single plotting window.
:param name: The name of the parameter that the plots are made for.
:param pindex: If the parameter is array-valued, then this is the index of the array that the plots are made
for.
:param doShow: Call plt.show().
"""
if not self._samples.has_key(name):
print "WARNING: sampler does not have", name
return
else:
print "Plotting parameter summary"
fig = plt.figure()
traces = self._samples[name]
plot_title = name
if traces.ndim > 1:
# Parameter is array valued, grab the column corresponding to pindex
if traces.ndim > 2:
# Parameter values are at least matrix-valued, reshape to a vector
traces = traces.reshape(traces.shape[0], np.prod(traces.shape[1:]))
traces = traces[:, pindex]
plot_title = name + "[" + str(pindex) + "]"
# First plot the trace
plt.subplot(211)
plt.plot(traces, '.', markersize=2, alpha=0.5, rasterized=(len(traces) > 1e4))
plt.xlim(0, traces.size)
plt.xlabel("Iteration")
plt.ylabel("Value")
plt.title(plot_title)
# Now add the histogram of values to the trace plot axes
pdf, bin_edges = np.histogram(traces, bins=25)
bin_edges = bin_edges[0:pdf.size]
# Stretch the PDF so that it is readable on the trace plot when plotted horizontally
pdf = pdf / float(pdf.max()) * 0.34 * traces.size
# Add the histogram to the plot
plt.barh(bin_edges, pdf, height=bin_edges[1] - bin_edges[0], alpha=0.5, color='DarkOrange')
# Finally, plot the autocorrelation function of the trace
plt.subplot(212)
centered_trace = traces - traces.mean()
lags, acf, not_needed1, not_needed2 = plt.acorr(centered_trace, maxlags=traces.size - 1, lw=2)
plt.ylabel("ACF")
plt.xlabel("Lag")
# Compute the autocorrelation timescale, and then reset the x-axis limits accordingly
acf_timescale = self.autocorr_timescale(traces[:, np.newaxis])
plt.xlim(0, np.min([5 * acf_timescale[0], len(traces)]))
if doShow:
plt.show()
def posterior_summaries(self, name):
"""
Print out the posterior medians, standard deviations, and 68th, 95th, and 99th credibility intervals.
:param name: The name of the parameter for which the summaries are desired.
"""
traces = self._samples[name] # Get the sampled parameter values
effective_nsamples = self.effective_samples(name) # Get the effective number of independent samples
if traces.ndim == 1:
# Parameter is scalar valued, so this is easy
print "Posterior summary for parameter", name
print "----------------------------------------------"
print "Effective number of independent samples:", effective_nsamples
print "Median:", np.median(traces)
print "Standard deviation:", np.std(traces)
print "68% credibility interval:", np.percentile(traces, (16.0, 84.0))
print "95% credibility interval:", np.percentile(traces, (2.5, 97.5))
print "99% credibility interval:", np.percentile(traces, (0.5, 99.5))
else:
if traces.ndim > 2:
# Parameter values are at least matrix-valued, reshape to a vector.
traces = traces.reshape(traces.shape[0], np.prod(traces.shape[1:]))
for i in xrange(traces.shape[1]):
# give summary for each element of this parameter separately
# Parameter is scalar valued, so this is easy
print "Posterior summary for parameter", name, " element", i
print "----------------------------------------------"
print "Effective number of independent samples:", effective_nsamples[i]
print "Median:", np.median(traces[:, i])
print "Standard deviation:", np.std(traces[:, i])
print "68% credibility interval:", np.percentile(traces[:, i], (16.0, 84.0))
print "95% credibility interval:", np.percentile(traces[:, i], (2.5, 97.5))
print "99% credibility interval:", np.percentile(traces[:, i], (0.5, 99.5))
def newaxis(self):
for key in self._samples.keys():
if len(self._samples[key].shape) == 1:
self._samples[key] = self._samples[key][:,np.newaxis]
|
|
#
# Copyright (c) 2001-2014, Scott D. Peckham
#
# Sept 2014. Moved some functions into outlets.py to avoid cyclic
# dependencies between BMI_base.py and basins.py.
#
# January, August 2009
# May 2010 (changes to unit_test(), initialize(), etc.)
#
################################################################
#
# NB! The update_volume_in() method ONLY tracks precip now.
# "channels_base.py" now has update_volume_out() also.
#
################################################################
#-----------------------------------------------------------------------
#
# unit_test()
#
# class basins_component (inherits from BMI_base.py)
#
# initialize()
# update() # (non-OpenMI arguments)
# finalize()
# read_config_file()
# -----------------------
# update_volume_in() # (commented out)
# update_volume_out() # (commented out)
#
#-----------------------------------------------------------------------
## import numpy as np # (no longer needed)
import os
import os.path
import BMI_base
import outlets
import tf_utils
## from topoflow.utils import BMI_base
## from topoflow.utils import outlets
## from topoflow.utils import tf_utils
#-----------------------------------------------------------------------
def unit_test():
b = basins_component()
b.CCA = False
b.DEBUG = False
#-----------------------------------------
# This function adjusts for the platform
# and can be changed in "tf_utils.py".
#-----------------------------------------
cfg_directory = tf_utils.TF_Test_Directory()
## os.chdir( cfg_directory )
b.cfg_directory = cfg_directory
b.site_prefix = 'Treynor' ###########
## cfg_prefix = tf_utils.TF_Test_case_prefix()
cfg_file = None #####
b.initialize( cfg_file=cfg_file, mode='driver' )
print 'outlet_ID =', b.outlet_ID
print 'basin_area =', b.basin_area
print 'basin_relief =', b.basin_relief
print ' '
print 'n_outlets =', b.n_outlets
print 'outlet_cols =', b.outlet_cols
print 'outlet_rows =', b.outlet_rows
print 'reliefs =', b.basin_reliefs
print 'areas =', b.basin_areas
print ' '
print 'nx =', b.nx
print 'ny =', b.ny
print ' '
print "get_status() = ", b.get_status()
print "is_scalar('n_outlets') = ", b.is_scalar('n_outlets')
print "is_grid('n_outlets') = ", b.is_grid('n_outlets')
# Next one has double size, since its really a tuple.
print 'b.outlet_IDs =', b.outlet_IDs
print 'b.basin_area =', b.basin_area
print 'Finished with unit_test().'
print ' '
# unit_test()
#-----------------------------------------------------------------------
class basins_component( BMI_base.BMI_component ):
def initialize(self, cfg_file=None, mode="nondriver",
SILENT=False):
if not(SILENT):
print 'Basins component: Initializing...'
self.status = 'initializing' # (OpenMI 2.0 convention)
self.mode = mode
self.cfg_file = cfg_file
#-----------------------------------------------
# Load component parameters from a config file
#-----------------------------------------------
## self.set_constants()
self.initialize_config_vars()
self.read_grid_info()
#---------------------------------------------
# Read outlet IDs (IDs of monitored pixels)
# and their attributes like area and relief.
# Then read IDs of all cells in the first
# (or main) basin, i.e. above first outlet.
#---------------------------------------------
outlets.read_outlet_data( self ) # (uses nx and ny)
# outlets.read_main_basin_IDs( self )
#-------------------------------------------
# Prepare to track total water in and out
# of the main basin (using basin RTM file)
#-------------------------------------------
self.get_pvolume = False ####
TRACK_VOLUME = (self.get_pvolume and (self.basin_RTM_file != ''))
self.TRACK_VOLUME = TRACK_VOLUME
if (TRACK_VOLUME):
#-------------------------------------------
# Prepare to track total water in and out
# of the main basin (using basin RTM file)
#----------------------------------------------
# This requires knowing the IDs of the pixels
# that lie within the basin (basin_IDs).
#----------------------------------------------
outlets.read_main_basin_IDs( self )
self.volume_in = self.initialize_scalar( 0, dtype='float64')
self.volume_out = self.initialize_scalar( 0, dtype='float64')
self.status = 'initialized'
# initialize()
#-------------------------------------------------------------------
def update(self, Q, time, dt, da, pv):
self.status = 'updating' # (OpenMI)
if (self.TRACK_VOLUME):
self.update_volume_out(Q, dt)
self.update_volume_in(time, dt, da, pv)
#------------------------
# Update internal clock
#------------------------
# self.update_time()
self.status = 'updated' # (OpenMI)
# update()
#-------------------------------------------------------------------
def finalize(self):
self.status = 'finalized' # (OpenMI)
# finalize()
#-------------------------------------------------------------------
def read_config_file(self):
#---------------------------------------------------
# 6/28/10. Need this, since there is no CFG file.
#---------------------------------------------------
pass
# read_config_file()
#-------------------------------------------------------------------
#-------------------------------------------------------------------
# NOTE: May be better to move this into "precip_base.py".
#-------------------------------------------------------------------
## def update_volume_in(self, time, dt, da, pv):
##
## #----------------------------------------------------------
## # Notes: This procedure integrates precip. over the main
## # basin using the model vs. sampling timestep.
##
## # Recall that da is a grid [km^2].
## #----------------------------------------------------------
## if (pv.method == 0): return
##
## if (pv.method == 1):
## #------------------------------------------------
## # In this case, pv.rates and pv.durations are
## # 1D vectors (vs. scalar or grid), which does
## # not conform to the general approach now used
## # throughout TopoFlow and by PRECIP_METHOD 2.
## #------------------------------------------------
## wd = where(time < pv.duration_sums)
## nwd = size(wd[0])
## if (nwd != 0):
## # rate = pv.rates[wd[0]] ########
## rate = pv.rates[wd[0][0]] ######################
## dvol = dt * rate * pv.basin_area * float64(1000000)
## self.volume_in += dvol
## else:
## #----------------------------------------------------
## # If pv.durations is a scalar, then duration_sums
## # is equal to the same scalar, but this still works
## # as written (3/20/07)
## #----------------------------------------------------
## n_rates = size(pv.rates)
## if (n_rates == 1):
## P_rates = pv.rates
## else:
## P_rates = pv.rates[self.basin_IDs]
## #-------------------------------------------------------
## n_durs = size(pv.duration_sums)
## if (time <= pv.duration_sums[n_durs - 1]):
## if (size(da) == 1):
## nb = size(self.basin_IDs[0]) ### BUG FIX.
## dvol = dt * sum(double(P_rates * da * nb))
## else:
## dvol = dt * sum(double(P_rates * da[self.basin_IDs]))
## self.volume_in += dvol
##
## # update_volume_in()
## #-------------------------------------------------------------------
## def update_volume_out(self, Q, dt):
##
## self.volume_out += (Q[self.outlet_ID] * dt)
##
## # update_volume_out()
#-------------------------------------------------------------------
|
|
"""Convert between frames and higher-level AMQP methods"""
# Copyright (C) 2007-2008 Barry Pederson <bp@barryp.org>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
from __future__ import absolute_import
from collections import defaultdict, deque
from struct import pack, unpack
from .basic_message import Message
from .exceptions import AMQPError, UnexpectedFrame
from .five import range, string
from .serialization import AMQPReader
__all__ = ['MethodReader']
#
# MethodReader needs to know which methods are supposed
# to be followed by content headers and bodies.
#
_CONTENT_METHODS = [
(60, 50), # Basic.return
(60, 60), # Basic.deliver
(60, 71), # Basic.get_ok
]
class _PartialMessage(object):
"""Helper class to build up a multi-frame method."""
def __init__(self, method_sig, args, channel):
self.method_sig = method_sig
self.args = args
self.msg = Message()
self.body_parts = []
self.body_received = 0
self.body_size = None
self.complete = False
def add_header(self, payload):
class_id, weight, self.body_size = unpack('>HHQ', payload[:12])
self.msg._load_properties(payload[12:])
self.complete = (self.body_size == 0)
def add_payload(self, payload):
parts = self.body_parts
self.body_received += len(payload)
if self.body_received == self.body_size:
if parts:
parts.append(payload)
self.msg.body = bytes().join(parts)
else:
self.msg.body = payload
self.complete = True
else:
parts.append(payload)
class MethodReader(object):
"""Helper class to receive frames from the broker, combine them if
necessary with content-headers and content-bodies into complete methods.
Normally a method is represented as a tuple containing
(channel, method_sig, args, content).
In the case of a framing error, an :exc:`ConnectionError` is placed
in the queue.
In the case of unexpected frames, a tuple made up of
``(channel, ChannelError)`` is placed in the queue.
"""
def __init__(self, source):
self.source = source
self.queue = deque()
self.running = False
self.partial_messages = {}
self.heartbeats = 0
# For each channel, which type is expected next
self.expected_types = defaultdict(lambda: 1)
# not an actual byte count, just incremented whenever we receive
self.bytes_recv = 0
self._quick_put = self.queue.append
self._quick_get = self.queue.popleft
def _next_method(self):
"""Read the next method from the source, once one complete method has
been assembled it is placed in the internal queue."""
queue = self.queue
put = self._quick_put
read_frame = self.source.read_frame
while not queue:
try:
frame_type, channel, payload = read_frame()
except Exception as exc:
#
# Connection was closed? Framing Error?
#
put(exc)
break
self.bytes_recv += 1
if frame_type not in (self.expected_types[channel], 8):
put((
channel,
UnexpectedFrame(
'Received frame {0} while expecting type: {1}'.format(
frame_type, self.expected_types[channel]))))
elif frame_type == 1:
self._process_method_frame(channel, payload)
elif frame_type == 2:
self._process_content_header(channel, payload)
elif frame_type == 3:
self._process_content_body(channel, payload)
elif frame_type == 8:
self._process_heartbeat(channel, payload)
def _process_heartbeat(self, channel, payload):
self.heartbeats += 1
def _process_method_frame(self, channel, payload):
"""Process Method frames"""
method_sig = unpack('>HH', payload[:4])
args = AMQPReader(payload[4:])
if method_sig in _CONTENT_METHODS:
#
# Save what we've got so far and wait for the content-header
#
self.partial_messages[channel] = _PartialMessage(
method_sig, args, channel,
)
self.expected_types[channel] = 2
else:
self._quick_put((channel, method_sig, args, None))
def _process_content_header(self, channel, payload):
"""Process Content Header frames"""
partial = self.partial_messages[channel]
partial.add_header(payload)
if partial.complete:
#
# a bodyless message, we're done
#
self._quick_put((channel, partial.method_sig,
partial.args, partial.msg))
self.partial_messages.pop(channel, None)
self.expected_types[channel] = 1
else:
#
# wait for the content-body
#
self.expected_types[channel] = 3
def _process_content_body(self, channel, payload):
"""Process Content Body frames"""
partial = self.partial_messages[channel]
partial.add_payload(payload)
if partial.complete:
#
# Stick the message in the queue and go back to
# waiting for method frames
#
self._quick_put((channel, partial.method_sig,
partial.args, partial.msg))
self.partial_messages.pop(channel, None)
self.expected_types[channel] = 1
def read_method(self):
"""Read a method from the peer."""
self._next_method()
m = self._quick_get()
if isinstance(m, Exception):
raise m
if isinstance(m, tuple) and isinstance(m[1], AMQPError):
raise m[1]
return m
class MethodWriter(object):
"""Convert AMQP methods into AMQP frames and send them out
to the peer."""
def __init__(self, dest, frame_max):
self.dest = dest
self.frame_max = frame_max
self.bytes_sent = 0
def write_method(self, channel, method_sig, args, content=None):
write_frame = self.dest.write_frame
payload = pack('>HH', method_sig[0], method_sig[1]) + args
if content:
# do this early, so we can raise an exception if there's a
# problem with the content properties, before sending the
# first frame
body = content.body
if isinstance(body, string):
coding = content.properties.get('content_encoding', None)
if coding is None:
coding = content.properties['content_encoding'] = 'UTF-8'
body = body.encode(coding)
properties = content._serialize_properties()
write_frame(1, channel, payload)
if content:
payload = pack('>HHQ', method_sig[0], 0, len(body)) + properties
write_frame(2, channel, payload)
chunk_size = self.frame_max - 8
for i in range(0, len(body), chunk_size):
write_frame(3, channel, body[i:i + chunk_size])
self.bytes_sent += 1
|
|
# Generated from C.bnf by ANTLR 4.5.1
from antlr4 import *
from io import StringIO
def serializedATN():
with StringIO() as buf:
buf.write("\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\2s")
buf.write("\u04e7\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7")
buf.write("\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r")
buf.write("\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23")
buf.write("\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30")
buf.write("\4\31\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36")
buf.write("\t\36\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4$\t$\4%\t%")
buf.write("\4&\t&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4,\t,\4-\t-\4.")
buf.write("\t.\4/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63\t\63\4\64")
buf.write("\t\64\4\65\t\65\4\66\t\66\4\67\t\67\48\t8\49\t9\4:\t:")
buf.write("\4;\t;\4<\t<\4=\t=\4>\t>\4?\t?\4@\t@\4A\tA\4B\tB\4C\t")
buf.write("C\4D\tD\4E\tE\4F\tF\4G\tG\4H\tH\4I\tI\4J\tJ\4K\tK\4L\t")
buf.write("L\4M\tM\4N\tN\4O\tO\4P\tP\4Q\tQ\4R\tR\4S\tS\4T\tT\4U\t")
buf.write("U\4V\tV\4W\tW\4X\tX\4Y\tY\4Z\tZ\4[\t[\4\\\t\\\4]\t]\4")
buf.write("^\t^\4_\t_\4`\t`\4a\ta\4b\tb\4c\tc\4d\td\4e\te\4f\tf\4")
buf.write("g\tg\4h\th\4i\ti\4j\tj\4k\tk\4l\tl\4m\tm\4n\tn\4o\to\4")
buf.write("p\tp\4q\tq\4r\tr\4s\ts\4t\tt\4u\tu\4v\tv\4w\tw\4x\tx\4")
buf.write("y\ty\4z\tz\4{\t{\4|\t|\4}\t}\4~\t~\4\177\t\177\4\u0080")
buf.write("\t\u0080\4\u0081\t\u0081\4\u0082\t\u0082\4\u0083\t\u0083")
buf.write("\4\u0084\t\u0084\4\u0085\t\u0085\4\u0086\t\u0086\4\u0087")
buf.write("\t\u0087\4\u0088\t\u0088\4\u0089\t\u0089\4\u008a\t\u008a")
buf.write("\4\u008b\t\u008b\4\u008c\t\u008c\4\u008d\t\u008d\4\u008e")
buf.write("\t\u008e\4\u008f\t\u008f\4\u0090\t\u0090\4\u0091\t\u0091")
buf.write("\4\u0092\t\u0092\4\u0093\t\u0093\4\u0094\t\u0094\4\u0095")
buf.write("\t\u0095\4\u0096\t\u0096\4\u0097\t\u0097\4\u0098\t\u0098")
buf.write("\3\2\3\2\3\2\3\2\3\2\3\2\3\2\3\2\3\2\3\2\3\2\3\2\3\2\3")
buf.write("\2\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3")
buf.write("\3\3\3\3\3\3\3\3\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3")
buf.write("\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\5\3\5\3\5\3\5")
buf.write("\3\5\3\5\3\5\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\7\3\7\3")
buf.write("\7\3\7\3\7\3\7\3\7\3\7\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\b")
buf.write("\3\b\3\b\3\b\3\t\3\t\3\t\3\t\3\t\3\t\3\t\3\t\3\t\3\t\3")
buf.write("\t\3\n\3\n\3\n\3\n\3\n\3\n\3\n\3\n\3\n\3\n\3\13\3\13\3")
buf.write("\13\3\13\3\13\3\13\3\13\3\13\3\13\3\13\3\13\3\f\3\f\3")
buf.write("\f\3\f\3\f\3\f\3\r\3\r\3\r\3\r\3\r\3\r\3\r\3\r\3\r\3\r")
buf.write("\3\r\3\r\3\r\3\r\3\16\3\16\3\16\3\16\3\16\3\16\3\16\3")
buf.write("\16\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17")
buf.write("\3\17\3\17\3\17\3\20\3\20\3\20\3\20\3\20\3\21\3\21\3\21")
buf.write("\3\21\3\21\3\21\3\22\3\22\3\22\3\22\3\22\3\23\3\23\3\23")
buf.write("\3\23\3\23\3\24\3\24\3\24\3\24\3\24\3\24\3\25\3\25\3\25")
buf.write("\3\25\3\25\3\25\3\25\3\25\3\25\3\26\3\26\3\26\3\26\3\26")
buf.write("\3\26\3\26\3\26\3\27\3\27\3\27\3\30\3\30\3\30\3\30\3\30")
buf.write("\3\30\3\30\3\31\3\31\3\31\3\31\3\31\3\32\3\32\3\32\3\32")
buf.write("\3\32\3\33\3\33\3\33\3\33\3\33\3\33\3\33\3\34\3\34\3\34")
buf.write("\3\34\3\34\3\34\3\35\3\35\3\35\3\35\3\36\3\36\3\36\3\36")
buf.write("\3\36\3\37\3\37\3\37\3 \3 \3 \3 \3 \3 \3 \3!\3!\3!\3!")
buf.write("\3\"\3\"\3\"\3\"\3\"\3#\3#\3#\3#\3#\3#\3#\3#\3#\3$\3$")
buf.write("\3$\3$\3$\3$\3$\3$\3$\3%\3%\3%\3%\3%\3%\3%\3&\3&\3&\3")
buf.write("&\3&\3&\3\'\3\'\3\'\3\'\3\'\3\'\3\'\3(\3(\3(\3(\3(\3(")
buf.write("\3(\3)\3)\3)\3)\3)\3)\3)\3*\3*\3*\3*\3*\3*\3*\3+\3+\3")
buf.write("+\3+\3+\3+\3+\3,\3,\3,\3,\3,\3,\3,\3,\3-\3-\3-\3-\3-\3")
buf.write("-\3.\3.\3.\3.\3.\3.\3.\3.\3.\3/\3/\3/\3/\3/\3\60\3\60")
buf.write("\3\60\3\60\3\60\3\60\3\60\3\60\3\60\3\61\3\61\3\61\3\61")
buf.write("\3\61\3\61\3\62\3\62\3\62\3\62\3\62\3\62\3\62\3\62\3\62")
buf.write("\3\63\3\63\3\63\3\63\3\63\3\63\3\63\3\63\3\63\3\64\3\64")
buf.write("\3\64\3\64\3\64\3\64\3\64\3\64\3\65\3\65\3\65\3\65\3\65")
buf.write("\3\65\3\66\3\66\3\66\3\66\3\66\3\66\3\66\3\66\3\66\3\67")
buf.write("\3\67\3\67\3\67\3\67\3\67\3\67\3\67\3\67\38\38\38\38\3")
buf.write("8\38\38\38\38\38\38\39\39\39\39\39\39\39\39\39\39\3:\3")
buf.write(":\3:\3:\3:\3:\3:\3:\3:\3:\3:\3:\3:\3:\3:\3;\3;\3;\3;\3")
buf.write(";\3;\3;\3;\3;\3;\3;\3;\3;\3;\3<\3<\3=\3=\3>\3>\3?\3?\3")
buf.write("@\3@\3A\3A\3B\3B\3C\3C\3C\3D\3D\3E\3E\3E\3F\3F\3F\3G\3")
buf.write("G\3G\3H\3H\3I\3I\3I\3J\3J\3K\3K\3K\3L\3L\3M\3M\3N\3N\3")
buf.write("O\3O\3P\3P\3Q\3Q\3Q\3R\3R\3R\3S\3S\3T\3T\3U\3U\3V\3V\3")
buf.write("W\3W\3X\3X\3Y\3Y\3Z\3Z\3[\3[\3[\3\\\3\\\3\\\3]\3]\3]\3")
buf.write("^\3^\3^\3_\3_\3_\3`\3`\3`\3`\3a\3a\3a\3a\3b\3b\3b\3c\3")
buf.write("c\3c\3d\3d\3d\3e\3e\3e\3f\3f\3f\3g\3g\3g\3h\3h\3i\3i\3")
buf.write("i\3i\3j\3j\3j\7j\u0381\nj\fj\16j\u0384\13j\3k\3k\5k\u0388")
buf.write("\nk\3l\3l\3m\3m\3n\3n\3n\3n\3n\3n\3n\3n\3n\3n\5n\u0398")
buf.write("\nn\3o\3o\3o\3o\3o\3p\3p\3p\5p\u03a2\np\3q\3q\5q\u03a6")
buf.write("\nq\3q\3q\5q\u03aa\nq\3q\3q\5q\u03ae\nq\5q\u03b0\nq\3")
buf.write("r\3r\7r\u03b4\nr\fr\16r\u03b7\13r\3s\3s\7s\u03bb\ns\f")
buf.write("s\16s\u03be\13s\3t\3t\6t\u03c2\nt\rt\16t\u03c3\3u\3u\3")
buf.write("u\3v\3v\3w\3w\3x\3x\3y\3y\5y\u03d1\ny\3y\3y\3y\3y\3y\5")
buf.write("y\u03d8\ny\3y\3y\5y\u03dc\ny\5y\u03de\ny\3z\3z\3{\3{\3")
buf.write("|\3|\3|\3|\5|\u03e8\n|\3}\3}\5}\u03ec\n}\3~\3~\5~\u03f0")
buf.write("\n~\3~\5~\u03f3\n~\3~\3~\3~\5~\u03f8\n~\5~\u03fa\n~\3")
buf.write("\177\3\177\3\177\3\177\5\177\u0400\n\177\3\177\3\177\3")
buf.write("\177\3\177\5\177\u0406\n\177\5\177\u0408\n\177\3\u0080")
buf.write("\5\u0080\u040b\n\u0080\3\u0080\3\u0080\3\u0080\3\u0080")
buf.write("\3\u0080\5\u0080\u0412\n\u0080\3\u0081\3\u0081\5\u0081")
buf.write("\u0416\n\u0081\3\u0081\3\u0081\3\u0081\5\u0081\u041b\n")
buf.write("\u0081\3\u0081\5\u0081\u041e\n\u0081\3\u0082\3\u0082\3")
buf.write("\u0083\6\u0083\u0423\n\u0083\r\u0083\16\u0083\u0424\3")
buf.write("\u0084\5\u0084\u0428\n\u0084\3\u0084\3\u0084\3\u0084\3")
buf.write("\u0084\3\u0084\5\u0084\u042f\n\u0084\3\u0085\3\u0085\5")
buf.write("\u0085\u0433\n\u0085\3\u0085\3\u0085\3\u0085\5\u0085\u0438")
buf.write("\n\u0085\3\u0085\5\u0085\u043b\n\u0085\3\u0086\6\u0086")
buf.write("\u043e\n\u0086\r\u0086\16\u0086\u043f\3\u0087\3\u0087")
buf.write("\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write("\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write("\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write("\3\u0088\5\u0088\u045a\n\u0088\3\u0089\6\u0089\u045d\n")
buf.write("\u0089\r\u0089\16\u0089\u045e\3\u008a\3\u008a\5\u008a")
buf.write("\u0463\n\u008a\3\u008b\3\u008b\3\u008b\3\u008b\5\u008b")
buf.write("\u0469\n\u008b\3\u008c\3\u008c\3\u008c\3\u008d\3\u008d")
buf.write("\3\u008d\3\u008d\3\u008d\3\u008d\3\u008d\3\u008d\3\u008d")
buf.write("\3\u008d\3\u008d\5\u008d\u0479\n\u008d\3\u008e\3\u008e")
buf.write("\3\u008e\3\u008e\6\u008e\u047f\n\u008e\r\u008e\16\u008e")
buf.write("\u0480\3\u008f\5\u008f\u0484\n\u008f\3\u008f\3\u008f\5")
buf.write("\u008f\u0488\n\u008f\3\u008f\3\u008f\3\u0090\3\u0090\3")
buf.write("\u0090\5\u0090\u048f\n\u0090\3\u0091\6\u0091\u0492\n\u0091")
buf.write("\r\u0091\16\u0091\u0493\3\u0092\3\u0092\5\u0092\u0498")
buf.write("\n\u0092\3\u0093\3\u0093\5\u0093\u049c\n\u0093\3\u0093")
buf.write("\3\u0093\5\u0093\u04a0\n\u0093\3\u0093\3\u0093\7\u0093")
buf.write("\u04a4\n\u0093\f\u0093\16\u0093\u04a7\13\u0093\3\u0093")
buf.write("\3\u0093\3\u0094\3\u0094\5\u0094\u04ad\n\u0094\3\u0094")
buf.write("\3\u0094\3\u0094\3\u0094\3\u0094\3\u0094\3\u0094\3\u0094")
buf.write("\3\u0094\7\u0094\u04b8\n\u0094\f\u0094\16\u0094\u04bb")
buf.write("\13\u0094\3\u0094\3\u0094\3\u0095\6\u0095\u04c0\n\u0095")
buf.write("\r\u0095\16\u0095\u04c1\3\u0095\3\u0095\3\u0096\3\u0096")
buf.write("\5\u0096\u04c8\n\u0096\3\u0096\5\u0096\u04cb\n\u0096\3")
buf.write("\u0096\3\u0096\3\u0097\3\u0097\3\u0097\3\u0097\7\u0097")
buf.write("\u04d3\n\u0097\f\u0097\16\u0097\u04d6\13\u0097\3\u0097")
buf.write("\3\u0097\3\u0097\3\u0097\3\u0097\3\u0098\3\u0098\3\u0098")
buf.write("\3\u0098\7\u0098\u04e1\n\u0098\f\u0098\16\u0098\u04e4")
buf.write("\13\u0098\3\u0098\3\u0098\3\u04d4\2\u0099\3\3\5\4\7\5")
buf.write("\t\6\13\7\r\b\17\t\21\n\23\13\25\f\27\r\31\16\33\17\35")
buf.write("\20\37\21!\22#\23%\24\'\25)\26+\27-\30/\31\61\32\63\33")
buf.write("\65\34\67\359\36;\37= ?!A\"C#E$G%I&K\'M(O)Q*S+U,W-Y.[")
buf.write("/]\60_\61a\62c\63e\64g\65i\66k\67m8o9q:s;u<w=y>{?}@\177")
buf.write("A\u0081B\u0083C\u0085D\u0087E\u0089F\u008bG\u008dH\u008f")
buf.write("I\u0091J\u0093K\u0095L\u0097M\u0099N\u009bO\u009dP\u009f")
buf.write("Q\u00a1R\u00a3S\u00a5T\u00a7U\u00a9V\u00abW\u00adX\u00af")
buf.write("Y\u00b1Z\u00b3[\u00b5\\\u00b7]\u00b9^\u00bb_\u00bd`\u00bf")
buf.write("a\u00c1b\u00c3c\u00c5d\u00c7e\u00c9f\u00cbg\u00cdh\u00cf")
buf.write("i\u00d1j\u00d3k\u00d5\2\u00d7\2\u00d9\2\u00db\2\u00dd")
buf.write("\2\u00dfl\u00e1\2\u00e3\2\u00e5\2\u00e7\2\u00e9\2\u00eb")
buf.write("\2\u00ed\2\u00ef\2\u00f1\2\u00f3\2\u00f5\2\u00f7\2\u00f9")
buf.write("\2\u00fb\2\u00fd\2\u00ff\2\u0101\2\u0103\2\u0105\2\u0107")
buf.write("\2\u0109\2\u010b\2\u010d\2\u010f\2\u0111\2\u0113\2\u0115")
buf.write("\2\u0117\2\u0119\2\u011b\2\u011dm\u011f\2\u0121\2\u0123")
buf.write("\2\u0125n\u0127o\u0129p\u012bq\u012dr\u012fs\3\2\22\5")
buf.write("\2C\\aac|\3\2\62;\4\2ZZzz\3\2\63;\3\2\629\5\2\62;CHch")
buf.write("\4\2WWww\4\2NNnn\4\2--//\6\2HHNNhhnn\6\2\f\f\17\17))^")
buf.write("^\f\2$$))AA^^cdhhppttvvxx\5\2NNWWww\6\2\f\f\17\17$$^^")
buf.write("\4\2\f\f\17\17\4\2\13\13\"\"\u0503\2\3\3\2\2\2\2\5\3\2")
buf.write("\2\2\2\7\3\2\2\2\2\t\3\2\2\2\2\13\3\2\2\2\2\r\3\2\2\2")
buf.write("\2\17\3\2\2\2\2\21\3\2\2\2\2\23\3\2\2\2\2\25\3\2\2\2\2")
buf.write("\27\3\2\2\2\2\31\3\2\2\2\2\33\3\2\2\2\2\35\3\2\2\2\2\37")
buf.write("\3\2\2\2\2!\3\2\2\2\2#\3\2\2\2\2%\3\2\2\2\2\'\3\2\2\2")
buf.write("\2)\3\2\2\2\2+\3\2\2\2\2-\3\2\2\2\2/\3\2\2\2\2\61\3\2")
buf.write("\2\2\2\63\3\2\2\2\2\65\3\2\2\2\2\67\3\2\2\2\29\3\2\2\2")
buf.write("\2;\3\2\2\2\2=\3\2\2\2\2?\3\2\2\2\2A\3\2\2\2\2C\3\2\2")
buf.write("\2\2E\3\2\2\2\2G\3\2\2\2\2I\3\2\2\2\2K\3\2\2\2\2M\3\2")
buf.write("\2\2\2O\3\2\2\2\2Q\3\2\2\2\2S\3\2\2\2\2U\3\2\2\2\2W\3")
buf.write("\2\2\2\2Y\3\2\2\2\2[\3\2\2\2\2]\3\2\2\2\2_\3\2\2\2\2a")
buf.write("\3\2\2\2\2c\3\2\2\2\2e\3\2\2\2\2g\3\2\2\2\2i\3\2\2\2\2")
buf.write("k\3\2\2\2\2m\3\2\2\2\2o\3\2\2\2\2q\3\2\2\2\2s\3\2\2\2")
buf.write("\2u\3\2\2\2\2w\3\2\2\2\2y\3\2\2\2\2{\3\2\2\2\2}\3\2\2")
buf.write("\2\2\177\3\2\2\2\2\u0081\3\2\2\2\2\u0083\3\2\2\2\2\u0085")
buf.write("\3\2\2\2\2\u0087\3\2\2\2\2\u0089\3\2\2\2\2\u008b\3\2\2")
buf.write("\2\2\u008d\3\2\2\2\2\u008f\3\2\2\2\2\u0091\3\2\2\2\2\u0093")
buf.write("\3\2\2\2\2\u0095\3\2\2\2\2\u0097\3\2\2\2\2\u0099\3\2\2")
buf.write("\2\2\u009b\3\2\2\2\2\u009d\3\2\2\2\2\u009f\3\2\2\2\2\u00a1")
buf.write("\3\2\2\2\2\u00a3\3\2\2\2\2\u00a5\3\2\2\2\2\u00a7\3\2\2")
buf.write("\2\2\u00a9\3\2\2\2\2\u00ab\3\2\2\2\2\u00ad\3\2\2\2\2\u00af")
buf.write("\3\2\2\2\2\u00b1\3\2\2\2\2\u00b3\3\2\2\2\2\u00b5\3\2\2")
buf.write("\2\2\u00b7\3\2\2\2\2\u00b9\3\2\2\2\2\u00bb\3\2\2\2\2\u00bd")
buf.write("\3\2\2\2\2\u00bf\3\2\2\2\2\u00c1\3\2\2\2\2\u00c3\3\2\2")
buf.write("\2\2\u00c5\3\2\2\2\2\u00c7\3\2\2\2\2\u00c9\3\2\2\2\2\u00cb")
buf.write("\3\2\2\2\2\u00cd\3\2\2\2\2\u00cf\3\2\2\2\2\u00d1\3\2\2")
buf.write("\2\2\u00d3\3\2\2\2\2\u00df\3\2\2\2\2\u011d\3\2\2\2\2\u0125")
buf.write("\3\2\2\2\2\u0127\3\2\2\2\2\u0129\3\2\2\2\2\u012b\3\2\2")
buf.write("\2\2\u012d\3\2\2\2\2\u012f\3\2\2\2\3\u0131\3\2\2\2\5\u013f")
buf.write("\3\2\2\2\7\u0150\3\2\2\2\t\u0163\3\2\2\2\13\u016a\3\2")
buf.write("\2\2\r\u0172\3\2\2\2\17\u017a\3\2\2\2\21\u0185\3\2\2\2")
buf.write("\23\u0190\3\2\2\2\25\u019a\3\2\2\2\27\u01a5\3\2\2\2\31")
buf.write("\u01ab\3\2\2\2\33\u01b9\3\2\2\2\35\u01c1\3\2\2\2\37\u01ce")
buf.write("\3\2\2\2!\u01d3\3\2\2\2#\u01d9\3\2\2\2%\u01de\3\2\2\2")
buf.write("\'\u01e3\3\2\2\2)\u01e9\3\2\2\2+\u01f2\3\2\2\2-\u01fa")
buf.write("\3\2\2\2/\u01fd\3\2\2\2\61\u0204\3\2\2\2\63\u0209\3\2")
buf.write("\2\2\65\u020e\3\2\2\2\67\u0215\3\2\2\29\u021b\3\2\2\2")
buf.write(";\u021f\3\2\2\2=\u0224\3\2\2\2?\u0227\3\2\2\2A\u022e\3")
buf.write("\2\2\2C\u0232\3\2\2\2E\u0237\3\2\2\2G\u0240\3\2\2\2I\u0249")
buf.write("\3\2\2\2K\u0250\3\2\2\2M\u0256\3\2\2\2O\u025d\3\2\2\2")
buf.write("Q\u0264\3\2\2\2S\u026b\3\2\2\2U\u0272\3\2\2\2W\u0279\3")
buf.write("\2\2\2Y\u0281\3\2\2\2[\u0287\3\2\2\2]\u0290\3\2\2\2_\u0295")
buf.write("\3\2\2\2a\u029e\3\2\2\2c\u02a4\3\2\2\2e\u02ad\3\2\2\2")
buf.write("g\u02b6\3\2\2\2i\u02be\3\2\2\2k\u02c4\3\2\2\2m\u02cd\3")
buf.write("\2\2\2o\u02d6\3\2\2\2q\u02e1\3\2\2\2s\u02eb\3\2\2\2u\u02fa")
buf.write("\3\2\2\2w\u0308\3\2\2\2y\u030a\3\2\2\2{\u030c\3\2\2\2")
buf.write("}\u030e\3\2\2\2\177\u0310\3\2\2\2\u0081\u0312\3\2\2\2")
buf.write("\u0083\u0314\3\2\2\2\u0085\u0316\3\2\2\2\u0087\u0319\3")
buf.write("\2\2\2\u0089\u031b\3\2\2\2\u008b\u031e\3\2\2\2\u008d\u0321")
buf.write("\3\2\2\2\u008f\u0324\3\2\2\2\u0091\u0326\3\2\2\2\u0093")
buf.write("\u0329\3\2\2\2\u0095\u032b\3\2\2\2\u0097\u032e\3\2\2\2")
buf.write("\u0099\u0330\3\2\2\2\u009b\u0332\3\2\2\2\u009d\u0334\3")
buf.write("\2\2\2\u009f\u0336\3\2\2\2\u00a1\u0338\3\2\2\2\u00a3\u033b")
buf.write("\3\2\2\2\u00a5\u033e\3\2\2\2\u00a7\u0340\3\2\2\2\u00a9")
buf.write("\u0342\3\2\2\2\u00ab\u0344\3\2\2\2\u00ad\u0346\3\2\2\2")
buf.write("\u00af\u0348\3\2\2\2\u00b1\u034a\3\2\2\2\u00b3\u034c\3")
buf.write("\2\2\2\u00b5\u034e\3\2\2\2\u00b7\u0351\3\2\2\2\u00b9\u0354")
buf.write("\3\2\2\2\u00bb\u0357\3\2\2\2\u00bd\u035a\3\2\2\2\u00bf")
buf.write("\u035d\3\2\2\2\u00c1\u0361\3\2\2\2\u00c3\u0365\3\2\2\2")
buf.write("\u00c5\u0368\3\2\2\2\u00c7\u036b\3\2\2\2\u00c9\u036e\3")
buf.write("\2\2\2\u00cb\u0371\3\2\2\2\u00cd\u0374\3\2\2\2\u00cf\u0377")
buf.write("\3\2\2\2\u00d1\u0379\3\2\2\2\u00d3\u037d\3\2\2\2\u00d5")
buf.write("\u0387\3\2\2\2\u00d7\u0389\3\2\2\2\u00d9\u038b\3\2\2\2")
buf.write("\u00db\u0397\3\2\2\2\u00dd\u0399\3\2\2\2\u00df\u03a1\3")
buf.write("\2\2\2\u00e1\u03af\3\2\2\2\u00e3\u03b1\3\2\2\2\u00e5\u03b8")
buf.write("\3\2\2\2\u00e7\u03bf\3\2\2\2\u00e9\u03c5\3\2\2\2\u00eb")
buf.write("\u03c8\3\2\2\2\u00ed\u03ca\3\2\2\2\u00ef\u03cc\3\2\2\2")
buf.write("\u00f1\u03dd\3\2\2\2\u00f3\u03df\3\2\2\2\u00f5\u03e1\3")
buf.write("\2\2\2\u00f7\u03e7\3\2\2\2\u00f9\u03eb\3\2\2\2\u00fb\u03f9")
buf.write("\3\2\2\2\u00fd\u0407\3\2\2\2\u00ff\u0411\3\2\2\2\u0101")
buf.write("\u041d\3\2\2\2\u0103\u041f\3\2\2\2\u0105\u0422\3\2\2\2")
buf.write("\u0107\u042e\3\2\2\2\u0109\u043a\3\2\2\2\u010b\u043d\3")
buf.write("\2\2\2\u010d\u0441\3\2\2\2\u010f\u0459\3\2\2\2\u0111\u045c")
buf.write("\3\2\2\2\u0113\u0462\3\2\2\2\u0115\u0468\3\2\2\2\u0117")
buf.write("\u046a\3\2\2\2\u0119\u0478\3\2\2\2\u011b\u047a\3\2\2\2")
buf.write("\u011d\u0483\3\2\2\2\u011f\u048e\3\2\2\2\u0121\u0491\3")
buf.write("\2\2\2\u0123\u0497\3\2\2\2\u0125\u0499\3\2\2\2\u0127\u04aa")
buf.write("\3\2\2\2\u0129\u04bf\3\2\2\2\u012b\u04ca\3\2\2\2\u012d")
buf.write("\u04ce\3\2\2\2\u012f\u04dc\3\2\2\2\u0131\u0132\7a\2\2")
buf.write("\u0132\u0133\7a\2\2\u0133\u0134\7g\2\2\u0134\u0135\7z")
buf.write("\2\2\u0135\u0136\7v\2\2\u0136\u0137\7g\2\2\u0137\u0138")
buf.write("\7p\2\2\u0138\u0139\7u\2\2\u0139\u013a\7k\2\2\u013a\u013b")
buf.write("\7q\2\2\u013b\u013c\7p\2\2\u013c\u013d\7a\2\2\u013d\u013e")
buf.write("\7a\2\2\u013e\4\3\2\2\2\u013f\u0140\7a\2\2\u0140\u0141")
buf.write("\7a\2\2\u0141\u0142\7d\2\2\u0142\u0143\7w\2\2\u0143\u0144")
buf.write("\7k\2\2\u0144\u0145\7n\2\2\u0145\u0146\7v\2\2\u0146\u0147")
buf.write("\7k\2\2\u0147\u0148\7p\2\2\u0148\u0149\7a\2\2\u0149\u014a")
buf.write("\7x\2\2\u014a\u014b\7c\2\2\u014b\u014c\7a\2\2\u014c\u014d")
buf.write("\7c\2\2\u014d\u014e\7t\2\2\u014e\u014f\7i\2\2\u014f\6")
buf.write("\3\2\2\2\u0150\u0151\7a\2\2\u0151\u0152\7a\2\2\u0152\u0153")
buf.write("\7d\2\2\u0153\u0154\7w\2\2\u0154\u0155\7k\2\2\u0155\u0156")
buf.write("\7n\2\2\u0156\u0157\7v\2\2\u0157\u0158\7k\2\2\u0158\u0159")
buf.write("\7p\2\2\u0159\u015a\7a\2\2\u015a\u015b\7q\2\2\u015b\u015c")
buf.write("\7h\2\2\u015c\u015d\7h\2\2\u015d\u015e\7u\2\2\u015e\u015f")
buf.write("\7g\2\2\u015f\u0160\7v\2\2\u0160\u0161\7q\2\2\u0161\u0162")
buf.write("\7h\2\2\u0162\b\3\2\2\2\u0163\u0164\7a\2\2\u0164\u0165")
buf.write("\7a\2\2\u0165\u0166\7o\2\2\u0166\u0167\7\63\2\2\u0167")
buf.write("\u0168\7\64\2\2\u0168\u0169\7:\2\2\u0169\n\3\2\2\2\u016a")
buf.write("\u016b\7a\2\2\u016b\u016c\7a\2\2\u016c\u016d\7o\2\2\u016d")
buf.write("\u016e\7\63\2\2\u016e\u016f\7\64\2\2\u016f\u0170\7:\2")
buf.write("\2\u0170\u0171\7f\2\2\u0171\f\3\2\2\2\u0172\u0173\7a\2")
buf.write("\2\u0173\u0174\7a\2\2\u0174\u0175\7o\2\2\u0175\u0176\7")
buf.write("\63\2\2\u0176\u0177\7\64\2\2\u0177\u0178\7:\2\2\u0178")
buf.write("\u0179\7k\2\2\u0179\16\3\2\2\2\u017a\u017b\7a\2\2\u017b")
buf.write("\u017c\7a\2\2\u017c\u017d\7v\2\2\u017d\u017e\7{\2\2\u017e")
buf.write("\u017f\7r\2\2\u017f\u0180\7g\2\2\u0180\u0181\7q\2\2\u0181")
buf.write("\u0182\7h\2\2\u0182\u0183\7a\2\2\u0183\u0184\7a\2\2\u0184")
buf.write("\20\3\2\2\2\u0185\u0186\7a\2\2\u0186\u0187\7a\2\2\u0187")
buf.write("\u0188\7k\2\2\u0188\u0189\7p\2\2\u0189\u018a\7n\2\2\u018a")
buf.write("\u018b\7k\2\2\u018b\u018c\7p\2\2\u018c\u018d\7g\2\2\u018d")
buf.write("\u018e\7a\2\2\u018e\u018f\7a\2\2\u018f\22\3\2\2\2\u0190")
buf.write("\u0191\7a\2\2\u0191\u0192\7a\2\2\u0192\u0193\7u\2\2\u0193")
buf.write("\u0194\7v\2\2\u0194\u0195\7f\2\2\u0195\u0196\7e\2\2\u0196")
buf.write("\u0197\7c\2\2\u0197\u0198\7n\2\2\u0198\u0199\7n\2\2\u0199")
buf.write("\24\3\2\2\2\u019a\u019b\7a\2\2\u019b\u019c\7a\2\2\u019c")
buf.write("\u019d\7f\2\2\u019d\u019e\7g\2\2\u019e\u019f\7e\2\2\u019f")
buf.write("\u01a0\7n\2\2\u01a0\u01a1\7u\2\2\u01a1\u01a2\7r\2\2\u01a2")
buf.write("\u01a3\7g\2\2\u01a3\u01a4\7e\2\2\u01a4\26\3\2\2\2\u01a5")
buf.write("\u01a6\7a\2\2\u01a6\u01a7\7a\2\2\u01a7\u01a8\7c\2\2\u01a8")
buf.write("\u01a9\7u\2\2\u01a9\u01aa\7o\2\2\u01aa\30\3\2\2\2\u01ab")
buf.write("\u01ac\7a\2\2\u01ac\u01ad\7a\2\2\u01ad\u01ae\7c\2\2\u01ae")
buf.write("\u01af\7v\2\2\u01af\u01b0\7v\2\2\u01b0\u01b1\7t\2\2\u01b1")
buf.write("\u01b2\7k\2\2\u01b2\u01b3\7d\2\2\u01b3\u01b4\7w\2\2\u01b4")
buf.write("\u01b5\7v\2\2\u01b5\u01b6\7g\2\2\u01b6\u01b7\7a\2\2\u01b7")
buf.write("\u01b8\7a\2\2\u01b8\32\3\2\2\2\u01b9\u01ba\7a\2\2\u01ba")
buf.write("\u01bb\7a\2\2\u01bb\u01bc\7c\2\2\u01bc\u01bd\7u\2\2\u01bd")
buf.write("\u01be\7o\2\2\u01be\u01bf\7a\2\2\u01bf\u01c0\7a\2\2\u01c0")
buf.write("\34\3\2\2\2\u01c1\u01c2\7a\2\2\u01c2\u01c3\7a\2\2\u01c3")
buf.write("\u01c4\7x\2\2\u01c4\u01c5\7q\2\2\u01c5\u01c6\7n\2\2\u01c6")
buf.write("\u01c7\7c\2\2\u01c7\u01c8\7v\2\2\u01c8\u01c9\7k\2\2\u01c9")
buf.write("\u01ca\7n\2\2\u01ca\u01cb\7g\2\2\u01cb\u01cc\7a\2\2\u01cc")
buf.write("\u01cd\7a\2\2\u01cd\36\3\2\2\2\u01ce\u01cf\7c\2\2\u01cf")
buf.write("\u01d0\7w\2\2\u01d0\u01d1\7v\2\2\u01d1\u01d2\7q\2\2\u01d2")
buf.write(" \3\2\2\2\u01d3\u01d4\7d\2\2\u01d4\u01d5\7t\2\2\u01d5")
buf.write("\u01d6\7g\2\2\u01d6\u01d7\7c\2\2\u01d7\u01d8\7m\2\2\u01d8")
buf.write("\"\3\2\2\2\u01d9\u01da\7e\2\2\u01da\u01db\7c\2\2\u01db")
buf.write("\u01dc\7u\2\2\u01dc\u01dd\7g\2\2\u01dd$\3\2\2\2\u01de")
buf.write("\u01df\7e\2\2\u01df\u01e0\7j\2\2\u01e0\u01e1\7c\2\2\u01e1")
buf.write("\u01e2\7t\2\2\u01e2&\3\2\2\2\u01e3\u01e4\7e\2\2\u01e4")
buf.write("\u01e5\7q\2\2\u01e5\u01e6\7p\2\2\u01e6\u01e7\7u\2\2\u01e7")
buf.write("\u01e8\7v\2\2\u01e8(\3\2\2\2\u01e9\u01ea\7e\2\2\u01ea")
buf.write("\u01eb\7q\2\2\u01eb\u01ec\7p\2\2\u01ec\u01ed\7v\2\2\u01ed")
buf.write("\u01ee\7k\2\2\u01ee\u01ef\7p\2\2\u01ef\u01f0\7w\2\2\u01f0")
buf.write("\u01f1\7g\2\2\u01f1*\3\2\2\2\u01f2\u01f3\7f\2\2\u01f3")
buf.write("\u01f4\7g\2\2\u01f4\u01f5\7h\2\2\u01f5\u01f6\7c\2\2\u01f6")
buf.write("\u01f7\7w\2\2\u01f7\u01f8\7n\2\2\u01f8\u01f9\7v\2\2\u01f9")
buf.write(",\3\2\2\2\u01fa\u01fb\7f\2\2\u01fb\u01fc\7q\2\2\u01fc")
buf.write(".\3\2\2\2\u01fd\u01fe\7f\2\2\u01fe\u01ff\7q\2\2\u01ff")
buf.write("\u0200\7w\2\2\u0200\u0201\7d\2\2\u0201\u0202\7n\2\2\u0202")
buf.write("\u0203\7g\2\2\u0203\60\3\2\2\2\u0204\u0205\7g\2\2\u0205")
buf.write("\u0206\7n\2\2\u0206\u0207\7u\2\2\u0207\u0208\7g\2\2\u0208")
buf.write("\62\3\2\2\2\u0209\u020a\7g\2\2\u020a\u020b\7p\2\2\u020b")
buf.write("\u020c\7w\2\2\u020c\u020d\7o\2\2\u020d\64\3\2\2\2\u020e")
buf.write("\u020f\7g\2\2\u020f\u0210\7z\2\2\u0210\u0211\7v\2\2\u0211")
buf.write("\u0212\7g\2\2\u0212\u0213\7t\2\2\u0213\u0214\7p\2\2\u0214")
buf.write("\66\3\2\2\2\u0215\u0216\7h\2\2\u0216\u0217\7n\2\2\u0217")
buf.write("\u0218\7q\2\2\u0218\u0219\7c\2\2\u0219\u021a\7v\2\2\u021a")
buf.write("8\3\2\2\2\u021b\u021c\7h\2\2\u021c\u021d\7q\2\2\u021d")
buf.write("\u021e\7t\2\2\u021e:\3\2\2\2\u021f\u0220\7i\2\2\u0220")
buf.write("\u0221\7q\2\2\u0221\u0222\7v\2\2\u0222\u0223\7q\2\2\u0223")
buf.write("<\3\2\2\2\u0224\u0225\7k\2\2\u0225\u0226\7h\2\2\u0226")
buf.write(">\3\2\2\2\u0227\u0228\7k\2\2\u0228\u0229\7p\2\2\u0229")
buf.write("\u022a\7n\2\2\u022a\u022b\7k\2\2\u022b\u022c\7p\2\2\u022c")
buf.write("\u022d\7g\2\2\u022d@\3\2\2\2\u022e\u022f\7k\2\2\u022f")
buf.write("\u0230\7p\2\2\u0230\u0231\7v\2\2\u0231B\3\2\2\2\u0232")
buf.write("\u0233\7n\2\2\u0233\u0234\7q\2\2\u0234\u0235\7p\2\2\u0235")
buf.write("\u0236\7i\2\2\u0236D\3\2\2\2\u0237\u0238\7t\2\2\u0238")
buf.write("\u0239\7g\2\2\u0239\u023a\7i\2\2\u023a\u023b\7k\2\2\u023b")
buf.write("\u023c\7u\2\2\u023c\u023d\7v\2\2\u023d\u023e\7g\2\2\u023e")
buf.write("\u023f\7t\2\2\u023fF\3\2\2\2\u0240\u0241\7t\2\2\u0241")
buf.write("\u0242\7g\2\2\u0242\u0243\7u\2\2\u0243\u0244\7v\2\2\u0244")
buf.write("\u0245\7t\2\2\u0245\u0246\7k\2\2\u0246\u0247\7e\2\2\u0247")
buf.write("\u0248\7v\2\2\u0248H\3\2\2\2\u0249\u024a\7t\2\2\u024a")
buf.write("\u024b\7g\2\2\u024b\u024c\7v\2\2\u024c\u024d\7w\2\2\u024d")
buf.write("\u024e\7t\2\2\u024e\u024f\7p\2\2\u024fJ\3\2\2\2\u0250")
buf.write("\u0251\7u\2\2\u0251\u0252\7j\2\2\u0252\u0253\7q\2\2\u0253")
buf.write("\u0254\7t\2\2\u0254\u0255\7v\2\2\u0255L\3\2\2\2\u0256")
buf.write("\u0257\7u\2\2\u0257\u0258\7k\2\2\u0258\u0259\7i\2\2\u0259")
buf.write("\u025a\7p\2\2\u025a\u025b\7g\2\2\u025b\u025c\7f\2\2\u025c")
buf.write("N\3\2\2\2\u025d\u025e\7u\2\2\u025e\u025f\7k\2\2\u025f")
buf.write("\u0260\7|\2\2\u0260\u0261\7g\2\2\u0261\u0262\7q\2\2\u0262")
buf.write("\u0263\7h\2\2\u0263P\3\2\2\2\u0264\u0265\7u\2\2\u0265")
buf.write("\u0266\7v\2\2\u0266\u0267\7c\2\2\u0267\u0268\7v\2\2\u0268")
buf.write("\u0269\7k\2\2\u0269\u026a\7e\2\2\u026aR\3\2\2\2\u026b")
buf.write("\u026c\7u\2\2\u026c\u026d\7v\2\2\u026d\u026e\7t\2\2\u026e")
buf.write("\u026f\7w\2\2\u026f\u0270\7e\2\2\u0270\u0271\7v\2\2\u0271")
buf.write("T\3\2\2\2\u0272\u0273\7u\2\2\u0273\u0274\7y\2\2\u0274")
buf.write("\u0275\7k\2\2\u0275\u0276\7v\2\2\u0276\u0277\7e\2\2\u0277")
buf.write("\u0278\7j\2\2\u0278V\3\2\2\2\u0279\u027a\7v\2\2\u027a")
buf.write("\u027b\7{\2\2\u027b\u027c\7r\2\2\u027c\u027d\7g\2\2\u027d")
buf.write("\u027e\7f\2\2\u027e\u027f\7g\2\2\u027f\u0280\7h\2\2\u0280")
buf.write("X\3\2\2\2\u0281\u0282\7w\2\2\u0282\u0283\7p\2\2\u0283")
buf.write("\u0284\7k\2\2\u0284\u0285\7q\2\2\u0285\u0286\7p\2\2\u0286")
buf.write("Z\3\2\2\2\u0287\u0288\7w\2\2\u0288\u0289\7p\2\2\u0289")
buf.write("\u028a\7u\2\2\u028a\u028b\7k\2\2\u028b\u028c\7i\2\2\u028c")
buf.write("\u028d\7p\2\2\u028d\u028e\7g\2\2\u028e\u028f\7f\2\2\u028f")
buf.write("\\\3\2\2\2\u0290\u0291\7x\2\2\u0291\u0292\7q\2\2\u0292")
buf.write("\u0293\7k\2\2\u0293\u0294\7f\2\2\u0294^\3\2\2\2\u0295")
buf.write("\u0296\7x\2\2\u0296\u0297\7q\2\2\u0297\u0298\7n\2\2\u0298")
buf.write("\u0299\7c\2\2\u0299\u029a\7v\2\2\u029a\u029b\7k\2\2\u029b")
buf.write("\u029c\7n\2\2\u029c\u029d\7g\2\2\u029d`\3\2\2\2\u029e")
buf.write("\u029f\7y\2\2\u029f\u02a0\7j\2\2\u02a0\u02a1\7k\2\2\u02a1")
buf.write("\u02a2\7n\2\2\u02a2\u02a3\7g\2\2\u02a3b\3\2\2\2\u02a4")
buf.write("\u02a5\7a\2\2\u02a5\u02a6\7C\2\2\u02a6\u02a7\7n\2\2\u02a7")
buf.write("\u02a8\7k\2\2\u02a8\u02a9\7i\2\2\u02a9\u02aa\7p\2\2\u02aa")
buf.write("\u02ab\7c\2\2\u02ab\u02ac\7u\2\2\u02acd\3\2\2\2\u02ad")
buf.write("\u02ae\7a\2\2\u02ae\u02af\7C\2\2\u02af\u02b0\7n\2\2\u02b0")
buf.write("\u02b1\7k\2\2\u02b1\u02b2\7i\2\2\u02b2\u02b3\7p\2\2\u02b3")
buf.write("\u02b4\7q\2\2\u02b4\u02b5\7h\2\2\u02b5f\3\2\2\2\u02b6")
buf.write("\u02b7\7a\2\2\u02b7\u02b8\7C\2\2\u02b8\u02b9\7v\2\2\u02b9")
buf.write("\u02ba\7q\2\2\u02ba\u02bb\7o\2\2\u02bb\u02bc\7k\2\2\u02bc")
buf.write("\u02bd\7e\2\2\u02bdh\3\2\2\2\u02be\u02bf\7a\2\2\u02bf")
buf.write("\u02c0\7D\2\2\u02c0\u02c1\7q\2\2\u02c1\u02c2\7q\2\2\u02c2")
buf.write("\u02c3\7n\2\2\u02c3j\3\2\2\2\u02c4\u02c5\7a\2\2\u02c5")
buf.write("\u02c6\7E\2\2\u02c6\u02c7\7q\2\2\u02c7\u02c8\7o\2\2\u02c8")
buf.write("\u02c9\7r\2\2\u02c9\u02ca\7n\2\2\u02ca\u02cb\7g\2\2\u02cb")
buf.write("\u02cc\7z\2\2\u02ccl\3\2\2\2\u02cd\u02ce\7a\2\2\u02ce")
buf.write("\u02cf\7I\2\2\u02cf\u02d0\7g\2\2\u02d0\u02d1\7p\2\2\u02d1")
buf.write("\u02d2\7g\2\2\u02d2\u02d3\7t\2\2\u02d3\u02d4\7k\2\2\u02d4")
buf.write("\u02d5\7e\2\2\u02d5n\3\2\2\2\u02d6\u02d7\7a\2\2\u02d7")
buf.write("\u02d8\7K\2\2\u02d8\u02d9\7o\2\2\u02d9\u02da\7c\2\2\u02da")
buf.write("\u02db\7i\2\2\u02db\u02dc\7k\2\2\u02dc\u02dd\7p\2\2\u02dd")
buf.write("\u02de\7c\2\2\u02de\u02df\7t\2\2\u02df\u02e0\7{\2\2\u02e0")
buf.write("p\3\2\2\2\u02e1\u02e2\7a\2\2\u02e2\u02e3\7P\2\2\u02e3")
buf.write("\u02e4\7q\2\2\u02e4\u02e5\7t\2\2\u02e5\u02e6\7g\2\2\u02e6")
buf.write("\u02e7\7v\2\2\u02e7\u02e8\7w\2\2\u02e8\u02e9\7t\2\2\u02e9")
buf.write("\u02ea\7p\2\2\u02ear\3\2\2\2\u02eb\u02ec\7a\2\2\u02ec")
buf.write("\u02ed\7U\2\2\u02ed\u02ee\7v\2\2\u02ee\u02ef\7c\2\2\u02ef")
buf.write("\u02f0\7v\2\2\u02f0\u02f1\7k\2\2\u02f1\u02f2\7e\2\2\u02f2")
buf.write("\u02f3\7a\2\2\u02f3\u02f4\7c\2\2\u02f4\u02f5\7u\2\2\u02f5")
buf.write("\u02f6\7u\2\2\u02f6\u02f7\7g\2\2\u02f7\u02f8\7t\2\2\u02f8")
buf.write("\u02f9\7v\2\2\u02f9t\3\2\2\2\u02fa\u02fb\7a\2\2\u02fb")
buf.write("\u02fc\7V\2\2\u02fc\u02fd\7j\2\2\u02fd\u02fe\7t\2\2\u02fe")
buf.write("\u02ff\7g\2\2\u02ff\u0300\7c\2\2\u0300\u0301\7f\2\2\u0301")
buf.write("\u0302\7a\2\2\u0302\u0303\7n\2\2\u0303\u0304\7q\2\2\u0304")
buf.write("\u0305\7e\2\2\u0305\u0306\7c\2\2\u0306\u0307\7n\2\2\u0307")
buf.write("v\3\2\2\2\u0308\u0309\7*\2\2\u0309x\3\2\2\2\u030a\u030b")
buf.write("\7+\2\2\u030bz\3\2\2\2\u030c\u030d\7]\2\2\u030d|\3\2\2")
buf.write("\2\u030e\u030f\7_\2\2\u030f~\3\2\2\2\u0310\u0311\7}\2")
buf.write("\2\u0311\u0080\3\2\2\2\u0312\u0313\7\177\2\2\u0313\u0082")
buf.write("\3\2\2\2\u0314\u0315\7>\2\2\u0315\u0084\3\2\2\2\u0316")
buf.write("\u0317\7>\2\2\u0317\u0318\7?\2\2\u0318\u0086\3\2\2\2\u0319")
buf.write("\u031a\7@\2\2\u031a\u0088\3\2\2\2\u031b\u031c\7@\2\2\u031c")
buf.write("\u031d\7?\2\2\u031d\u008a\3\2\2\2\u031e\u031f\7>\2\2\u031f")
buf.write("\u0320\7>\2\2\u0320\u008c\3\2\2\2\u0321\u0322\7@\2\2\u0322")
buf.write("\u0323\7@\2\2\u0323\u008e\3\2\2\2\u0324\u0325\7-\2\2\u0325")
buf.write("\u0090\3\2\2\2\u0326\u0327\7-\2\2\u0327\u0328\7-\2\2\u0328")
buf.write("\u0092\3\2\2\2\u0329\u032a\7/\2\2\u032a\u0094\3\2\2\2")
buf.write("\u032b\u032c\7/\2\2\u032c\u032d\7/\2\2\u032d\u0096\3\2")
buf.write("\2\2\u032e\u032f\7,\2\2\u032f\u0098\3\2\2\2\u0330\u0331")
buf.write("\7\61\2\2\u0331\u009a\3\2\2\2\u0332\u0333\7\'\2\2\u0333")
buf.write("\u009c\3\2\2\2\u0334\u0335\7(\2\2\u0335\u009e\3\2\2\2")
buf.write("\u0336\u0337\7~\2\2\u0337\u00a0\3\2\2\2\u0338\u0339\7")
buf.write("(\2\2\u0339\u033a\7(\2\2\u033a\u00a2\3\2\2\2\u033b\u033c")
buf.write("\7~\2\2\u033c\u033d\7~\2\2\u033d\u00a4\3\2\2\2\u033e\u033f")
buf.write("\7`\2\2\u033f\u00a6\3\2\2\2\u0340\u0341\7#\2\2\u0341\u00a8")
buf.write("\3\2\2\2\u0342\u0343\7\u0080\2\2\u0343\u00aa\3\2\2\2\u0344")
buf.write("\u0345\7A\2\2\u0345\u00ac\3\2\2\2\u0346\u0347\7<\2\2\u0347")
buf.write("\u00ae\3\2\2\2\u0348\u0349\7=\2\2\u0349\u00b0\3\2\2\2")
buf.write("\u034a\u034b\7.\2\2\u034b\u00b2\3\2\2\2\u034c\u034d\7")
buf.write("?\2\2\u034d\u00b4\3\2\2\2\u034e\u034f\7,\2\2\u034f\u0350")
buf.write("\7?\2\2\u0350\u00b6\3\2\2\2\u0351\u0352\7\61\2\2\u0352")
buf.write("\u0353\7?\2\2\u0353\u00b8\3\2\2\2\u0354\u0355\7\'\2\2")
buf.write("\u0355\u0356\7?\2\2\u0356\u00ba\3\2\2\2\u0357\u0358\7")
buf.write("-\2\2\u0358\u0359\7?\2\2\u0359\u00bc\3\2\2\2\u035a\u035b")
buf.write("\7/\2\2\u035b\u035c\7?\2\2\u035c\u00be\3\2\2\2\u035d\u035e")
buf.write("\7>\2\2\u035e\u035f\7>\2\2\u035f\u0360\7?\2\2\u0360\u00c0")
buf.write("\3\2\2\2\u0361\u0362\7@\2\2\u0362\u0363\7@\2\2\u0363\u0364")
buf.write("\7?\2\2\u0364\u00c2\3\2\2\2\u0365\u0366\7(\2\2\u0366\u0367")
buf.write("\7?\2\2\u0367\u00c4\3\2\2\2\u0368\u0369\7`\2\2\u0369\u036a")
buf.write("\7?\2\2\u036a\u00c6\3\2\2\2\u036b\u036c\7~\2\2\u036c\u036d")
buf.write("\7?\2\2\u036d\u00c8\3\2\2\2\u036e\u036f\7?\2\2\u036f\u0370")
buf.write("\7?\2\2\u0370\u00ca\3\2\2\2\u0371\u0372\7#\2\2\u0372\u0373")
buf.write("\7?\2\2\u0373\u00cc\3\2\2\2\u0374\u0375\7/\2\2\u0375\u0376")
buf.write("\7@\2\2\u0376\u00ce\3\2\2\2\u0377\u0378\7\60\2\2\u0378")
buf.write("\u00d0\3\2\2\2\u0379\u037a\7\60\2\2\u037a\u037b\7\60\2")
buf.write("\2\u037b\u037c\7\60\2\2\u037c\u00d2\3\2\2\2\u037d\u0382")
buf.write("\5\u00d5k\2\u037e\u0381\5\u00d5k\2\u037f\u0381\5\u00d9")
buf.write("m\2\u0380\u037e\3\2\2\2\u0380\u037f\3\2\2\2\u0381\u0384")
buf.write("\3\2\2\2\u0382\u0380\3\2\2\2\u0382\u0383\3\2\2\2\u0383")
buf.write("\u00d4\3\2\2\2\u0384\u0382\3\2\2\2\u0385\u0388\5\u00d7")
buf.write("l\2\u0386\u0388\5\u00dbn\2\u0387\u0385\3\2\2\2\u0387\u0386")
buf.write("\3\2\2\2\u0388\u00d6\3\2\2\2\u0389\u038a\t\2\2\2\u038a")
buf.write("\u00d8\3\2\2\2\u038b\u038c\t\3\2\2\u038c\u00da\3\2\2\2")
buf.write("\u038d\u038e\7^\2\2\u038e\u038f\7w\2\2\u038f\u0390\3\2")
buf.write("\2\2\u0390\u0398\5\u00ddo\2\u0391\u0392\7^\2\2\u0392\u0393")
buf.write("\7W\2\2\u0393\u0394\3\2\2\2\u0394\u0395\5\u00ddo\2\u0395")
buf.write("\u0396\5\u00ddo\2\u0396\u0398\3\2\2\2\u0397\u038d\3\2")
buf.write("\2\2\u0397\u0391\3\2\2\2\u0398\u00dc\3\2\2\2\u0399\u039a")
buf.write("\5\u00efx\2\u039a\u039b\5\u00efx\2\u039b\u039c\5\u00ef")
buf.write("x\2\u039c\u039d\5\u00efx\2\u039d\u00de\3\2\2\2\u039e\u03a2")
buf.write("\5\u00e1q\2\u039f\u03a2\5\u00f9}\2\u03a0\u03a2\5\u010f")
buf.write("\u0088\2\u03a1\u039e\3\2\2\2\u03a1\u039f\3\2\2\2\u03a1")
buf.write("\u03a0\3\2\2\2\u03a2\u00e0\3\2\2\2\u03a3\u03a5\5\u00e3")
buf.write("r\2\u03a4\u03a6\5\u00f1y\2\u03a5\u03a4\3\2\2\2\u03a5\u03a6")
buf.write("\3\2\2\2\u03a6\u03b0\3\2\2\2\u03a7\u03a9\5\u00e5s\2\u03a8")
buf.write("\u03aa\5\u00f1y\2\u03a9\u03a8\3\2\2\2\u03a9\u03aa\3\2")
buf.write("\2\2\u03aa\u03b0\3\2\2\2\u03ab\u03ad\5\u00e7t\2\u03ac")
buf.write("\u03ae\5\u00f1y\2\u03ad\u03ac\3\2\2\2\u03ad\u03ae\3\2")
buf.write("\2\2\u03ae\u03b0\3\2\2\2\u03af\u03a3\3\2\2\2\u03af\u03a7")
buf.write("\3\2\2\2\u03af\u03ab\3\2\2\2\u03b0\u00e2\3\2\2\2\u03b1")
buf.write("\u03b5\5\u00ebv\2\u03b2\u03b4\5\u00d9m\2\u03b3\u03b2\3")
buf.write("\2\2\2\u03b4\u03b7\3\2\2\2\u03b5\u03b3\3\2\2\2\u03b5\u03b6")
buf.write("\3\2\2\2\u03b6\u00e4\3\2\2\2\u03b7\u03b5\3\2\2\2\u03b8")
buf.write("\u03bc\7\62\2\2\u03b9\u03bb\5\u00edw\2\u03ba\u03b9\3\2")
buf.write("\2\2\u03bb\u03be\3\2\2\2\u03bc\u03ba\3\2\2\2\u03bc\u03bd")
buf.write("\3\2\2\2\u03bd\u00e6\3\2\2\2\u03be\u03bc\3\2\2\2\u03bf")
buf.write("\u03c1\5\u00e9u\2\u03c0\u03c2\5\u00efx\2\u03c1\u03c0\3")
buf.write("\2\2\2\u03c2\u03c3\3\2\2\2\u03c3\u03c1\3\2\2\2\u03c3\u03c4")
buf.write("\3\2\2\2\u03c4\u00e8\3\2\2\2\u03c5\u03c6\7\62\2\2\u03c6")
buf.write("\u03c7\t\4\2\2\u03c7\u00ea\3\2\2\2\u03c8\u03c9\t\5\2\2")
buf.write("\u03c9\u00ec\3\2\2\2\u03ca\u03cb\t\6\2\2\u03cb\u00ee\3")
buf.write("\2\2\2\u03cc\u03cd\t\7\2\2\u03cd\u00f0\3\2\2\2\u03ce\u03d0")
buf.write("\5\u00f3z\2\u03cf\u03d1\5\u00f5{\2\u03d0\u03cf\3\2\2\2")
buf.write("\u03d0\u03d1\3\2\2\2\u03d1\u03de\3\2\2\2\u03d2\u03d3\5")
buf.write("\u00f3z\2\u03d3\u03d4\5\u00f7|\2\u03d4\u03de\3\2\2\2\u03d5")
buf.write("\u03d7\5\u00f5{\2\u03d6\u03d8\5\u00f3z\2\u03d7\u03d6\3")
buf.write("\2\2\2\u03d7\u03d8\3\2\2\2\u03d8\u03de\3\2\2\2\u03d9\u03db")
buf.write("\5\u00f7|\2\u03da\u03dc\5\u00f3z\2\u03db\u03da\3\2\2\2")
buf.write("\u03db\u03dc\3\2\2\2\u03dc\u03de\3\2\2\2\u03dd\u03ce\3")
buf.write("\2\2\2\u03dd\u03d2\3\2\2\2\u03dd\u03d5\3\2\2\2\u03dd\u03d9")
buf.write("\3\2\2\2\u03de\u00f2\3\2\2\2\u03df\u03e0\t\b\2\2\u03e0")
buf.write("\u00f4\3\2\2\2\u03e1\u03e2\t\t\2\2\u03e2\u00f6\3\2\2\2")
buf.write("\u03e3\u03e4\7n\2\2\u03e4\u03e8\7n\2\2\u03e5\u03e6\7N")
buf.write("\2\2\u03e6\u03e8\7N\2\2\u03e7\u03e3\3\2\2\2\u03e7\u03e5")
buf.write("\3\2\2\2\u03e8\u00f8\3\2\2\2\u03e9\u03ec\5\u00fb~\2\u03ea")
buf.write("\u03ec\5\u00fd\177\2\u03eb\u03e9\3\2\2\2\u03eb\u03ea\3")
buf.write("\2\2\2\u03ec\u00fa\3\2\2\2\u03ed\u03ef\5\u00ff\u0080\2")
buf.write("\u03ee\u03f0\5\u0101\u0081\2\u03ef\u03ee\3\2\2\2\u03ef")
buf.write("\u03f0\3\2\2\2\u03f0\u03f2\3\2\2\2\u03f1\u03f3\5\u010d")
buf.write("\u0087\2\u03f2\u03f1\3\2\2\2\u03f2\u03f3\3\2\2\2\u03f3")
buf.write("\u03fa\3\2\2\2\u03f4\u03f5\5\u0105\u0083\2\u03f5\u03f7")
buf.write("\5\u0101\u0081\2\u03f6\u03f8\5\u010d\u0087\2\u03f7\u03f6")
buf.write("\3\2\2\2\u03f7\u03f8\3\2\2\2\u03f8\u03fa\3\2\2\2\u03f9")
buf.write("\u03ed\3\2\2\2\u03f9\u03f4\3\2\2\2\u03fa\u00fc\3\2\2\2")
buf.write("\u03fb\u03fc\5\u00e9u\2\u03fc\u03fd\5\u0107\u0084\2\u03fd")
buf.write("\u03ff\5\u0109\u0085\2\u03fe\u0400\5\u010d\u0087\2\u03ff")
buf.write("\u03fe\3\2\2\2\u03ff\u0400\3\2\2\2\u0400\u0408\3\2\2\2")
buf.write("\u0401\u0402\5\u00e9u\2\u0402\u0403\5\u010b\u0086\2\u0403")
buf.write("\u0405\5\u0109\u0085\2\u0404\u0406\5\u010d\u0087\2\u0405")
buf.write("\u0404\3\2\2\2\u0405\u0406\3\2\2\2\u0406\u0408\3\2\2\2")
buf.write("\u0407\u03fb\3\2\2\2\u0407\u0401\3\2\2\2\u0408\u00fe\3")
buf.write("\2\2\2\u0409\u040b\5\u0105\u0083\2\u040a\u0409\3\2\2\2")
buf.write("\u040a\u040b\3\2\2\2\u040b\u040c\3\2\2\2\u040c\u040d\7")
buf.write("\60\2\2\u040d\u0412\5\u0105\u0083\2\u040e\u040f\5\u0105")
buf.write("\u0083\2\u040f\u0410\7\60\2\2\u0410\u0412\3\2\2\2\u0411")
buf.write("\u040a\3\2\2\2\u0411\u040e\3\2\2\2\u0412\u0100\3\2\2\2")
buf.write("\u0413\u0415\7g\2\2\u0414\u0416\5\u0103\u0082\2\u0415")
buf.write("\u0414\3\2\2\2\u0415\u0416\3\2\2\2\u0416\u0417\3\2\2\2")
buf.write("\u0417\u041e\5\u0105\u0083\2\u0418\u041a\7G\2\2\u0419")
buf.write("\u041b\5\u0103\u0082\2\u041a\u0419\3\2\2\2\u041a\u041b")
buf.write("\3\2\2\2\u041b\u041c\3\2\2\2\u041c\u041e\5\u0105\u0083")
buf.write("\2\u041d\u0413\3\2\2\2\u041d\u0418\3\2\2\2\u041e\u0102")
buf.write("\3\2\2\2\u041f\u0420\t\n\2\2\u0420\u0104\3\2\2\2\u0421")
buf.write("\u0423\5\u00d9m\2\u0422\u0421\3\2\2\2\u0423\u0424\3\2")
buf.write("\2\2\u0424\u0422\3\2\2\2\u0424\u0425\3\2\2\2\u0425\u0106")
buf.write("\3\2\2\2\u0426\u0428\5\u010b\u0086\2\u0427\u0426\3\2\2")
buf.write("\2\u0427\u0428\3\2\2\2\u0428\u0429\3\2\2\2\u0429\u042a")
buf.write("\7\60\2\2\u042a\u042f\5\u010b\u0086\2\u042b\u042c\5\u010b")
buf.write("\u0086\2\u042c\u042d\7\60\2\2\u042d\u042f\3\2\2\2\u042e")
buf.write("\u0427\3\2\2\2\u042e\u042b\3\2\2\2\u042f\u0108\3\2\2\2")
buf.write("\u0430\u0432\7r\2\2\u0431\u0433\5\u0103\u0082\2\u0432")
buf.write("\u0431\3\2\2\2\u0432\u0433\3\2\2\2\u0433\u0434\3\2\2\2")
buf.write("\u0434\u043b\5\u0105\u0083\2\u0435\u0437\7R\2\2\u0436")
buf.write("\u0438\5\u0103\u0082\2\u0437\u0436\3\2\2\2\u0437\u0438")
buf.write("\3\2\2\2\u0438\u0439\3\2\2\2\u0439\u043b\5\u0105\u0083")
buf.write("\2\u043a\u0430\3\2\2\2\u043a\u0435\3\2\2\2\u043b\u010a")
buf.write("\3\2\2\2\u043c\u043e\5\u00efx\2\u043d\u043c\3\2\2\2\u043e")
buf.write("\u043f\3\2\2\2\u043f\u043d\3\2\2\2\u043f\u0440\3\2\2\2")
buf.write("\u0440\u010c\3\2\2\2\u0441\u0442\t\13\2\2\u0442\u010e")
buf.write("\3\2\2\2\u0443\u0444\7)\2\2\u0444\u0445\5\u0111\u0089")
buf.write("\2\u0445\u0446\7)\2\2\u0446\u045a\3\2\2\2\u0447\u0448")
buf.write("\7N\2\2\u0448\u0449\7)\2\2\u0449\u044a\3\2\2\2\u044a\u044b")
buf.write("\5\u0111\u0089\2\u044b\u044c\7)\2\2\u044c\u045a\3\2\2")
buf.write("\2\u044d\u044e\7w\2\2\u044e\u044f\7)\2\2\u044f\u0450\3")
buf.write("\2\2\2\u0450\u0451\5\u0111\u0089\2\u0451\u0452\7)\2\2")
buf.write("\u0452\u045a\3\2\2\2\u0453\u0454\7W\2\2\u0454\u0455\7")
buf.write(")\2\2\u0455\u0456\3\2\2\2\u0456\u0457\5\u0111\u0089\2")
buf.write("\u0457\u0458\7)\2\2\u0458\u045a\3\2\2\2\u0459\u0443\3")
buf.write("\2\2\2\u0459\u0447\3\2\2\2\u0459\u044d\3\2\2\2\u0459\u0453")
buf.write("\3\2\2\2\u045a\u0110\3\2\2\2\u045b\u045d\5\u0113\u008a")
buf.write("\2\u045c\u045b\3\2\2\2\u045d\u045e\3\2\2\2\u045e\u045c")
buf.write("\3\2\2\2\u045e\u045f\3\2\2\2\u045f\u0112\3\2\2\2\u0460")
buf.write("\u0463\n\f\2\2\u0461\u0463\5\u0115\u008b\2\u0462\u0460")
buf.write("\3\2\2\2\u0462\u0461\3\2\2\2\u0463\u0114\3\2\2\2\u0464")
buf.write("\u0469\5\u0117\u008c\2\u0465\u0469\5\u0119\u008d\2\u0466")
buf.write("\u0469\5\u011b\u008e\2\u0467\u0469\5\u00dbn\2\u0468\u0464")
buf.write("\3\2\2\2\u0468\u0465\3\2\2\2\u0468\u0466\3\2\2\2\u0468")
buf.write("\u0467\3\2\2\2\u0469\u0116\3\2\2\2\u046a\u046b\7^\2\2")
buf.write("\u046b\u046c\t\r\2\2\u046c\u0118\3\2\2\2\u046d\u046e\7")
buf.write("^\2\2\u046e\u0479\5\u00edw\2\u046f\u0470\7^\2\2\u0470")
buf.write("\u0471\5\u00edw\2\u0471\u0472\5\u00edw\2\u0472\u0479\3")
buf.write("\2\2\2\u0473\u0474\7^\2\2\u0474\u0475\5\u00edw\2\u0475")
buf.write("\u0476\5\u00edw\2\u0476\u0477\5\u00edw\2\u0477\u0479\3")
buf.write("\2\2\2\u0478\u046d\3\2\2\2\u0478\u046f\3\2\2\2\u0478\u0473")
buf.write("\3\2\2\2\u0479\u011a\3\2\2\2\u047a\u047b\7^\2\2\u047b")
buf.write("\u047c\7z\2\2\u047c\u047e\3\2\2\2\u047d\u047f\5\u00ef")
buf.write("x\2\u047e\u047d\3\2\2\2\u047f\u0480\3\2\2\2\u0480\u047e")
buf.write("\3\2\2\2\u0480\u0481\3\2\2\2\u0481\u011c\3\2\2\2\u0482")
buf.write("\u0484\5\u011f\u0090\2\u0483\u0482\3\2\2\2\u0483\u0484")
buf.write("\3\2\2\2\u0484\u0485\3\2\2\2\u0485\u0487\7$\2\2\u0486")
buf.write("\u0488\5\u0121\u0091\2\u0487\u0486\3\2\2\2\u0487\u0488")
buf.write("\3\2\2\2\u0488\u0489\3\2\2\2\u0489\u048a\7$\2\2\u048a")
buf.write("\u011e\3\2\2\2\u048b\u048c\7w\2\2\u048c\u048f\7:\2\2\u048d")
buf.write("\u048f\t\16\2\2\u048e\u048b\3\2\2\2\u048e\u048d\3\2\2")
buf.write("\2\u048f\u0120\3\2\2\2\u0490\u0492\5\u0123\u0092\2\u0491")
buf.write("\u0490\3\2\2\2\u0492\u0493\3\2\2\2\u0493\u0491\3\2\2\2")
buf.write("\u0493\u0494\3\2\2\2\u0494\u0122\3\2\2\2\u0495\u0498\n")
buf.write("\17\2\2\u0496\u0498\5\u0115\u008b\2\u0497\u0495\3\2\2")
buf.write("\2\u0497\u0496\3\2\2\2\u0498\u0124\3\2\2\2\u0499\u049b")
buf.write("\7%\2\2\u049a\u049c\5\u0129\u0095\2\u049b\u049a\3\2\2")
buf.write("\2\u049b\u049c\3\2\2\2\u049c\u049d\3\2\2\2\u049d\u049f")
buf.write("\5\u00e3r\2\u049e\u04a0\5\u0129\u0095\2\u049f\u049e\3")
buf.write("\2\2\2\u049f\u04a0\3\2\2\2\u04a0\u04a1\3\2\2\2\u04a1\u04a5")
buf.write("\5\u011d\u008f\2\u04a2\u04a4\n\20\2\2\u04a3\u04a2\3\2")
buf.write("\2\2\u04a4\u04a7\3\2\2\2\u04a5\u04a3\3\2\2\2\u04a5\u04a6")
buf.write("\3\2\2\2\u04a6\u04a8\3\2\2\2\u04a7\u04a5\3\2\2\2\u04a8")
buf.write("\u04a9\b\u0093\2\2\u04a9\u0126\3\2\2\2\u04aa\u04ac\7%")
buf.write("\2\2\u04ab\u04ad\5\u0129\u0095\2\u04ac\u04ab\3\2\2\2\u04ac")
buf.write("\u04ad\3\2\2\2\u04ad\u04ae\3\2\2\2\u04ae\u04af\7r\2\2")
buf.write("\u04af\u04b0\7t\2\2\u04b0\u04b1\7c\2\2\u04b1\u04b2\7i")
buf.write("\2\2\u04b2\u04b3\7o\2\2\u04b3\u04b4\7c\2\2\u04b4\u04b5")
buf.write("\3\2\2\2\u04b5\u04b9\5\u0129\u0095\2\u04b6\u04b8\n\20")
buf.write("\2\2\u04b7\u04b6\3\2\2\2\u04b8\u04bb\3\2\2\2\u04b9\u04b7")
buf.write("\3\2\2\2\u04b9\u04ba\3\2\2\2\u04ba\u04bc\3\2\2\2\u04bb")
buf.write("\u04b9\3\2\2\2\u04bc\u04bd\b\u0094\2\2\u04bd\u0128\3\2")
buf.write("\2\2\u04be\u04c0\t\21\2\2\u04bf\u04be\3\2\2\2\u04c0\u04c1")
buf.write("\3\2\2\2\u04c1\u04bf\3\2\2\2\u04c1\u04c2\3\2\2\2\u04c2")
buf.write("\u04c3\3\2\2\2\u04c3\u04c4\b\u0095\2\2\u04c4\u012a\3\2")
buf.write("\2\2\u04c5\u04c7\7\17\2\2\u04c6\u04c8\7\f\2\2\u04c7\u04c6")
buf.write("\3\2\2\2\u04c7\u04c8\3\2\2\2\u04c8\u04cb\3\2\2\2\u04c9")
buf.write("\u04cb\7\f\2\2\u04ca\u04c5\3\2\2\2\u04ca\u04c9\3\2\2\2")
buf.write("\u04cb\u04cc\3\2\2\2\u04cc\u04cd\b\u0096\2\2\u04cd\u012c")
buf.write("\3\2\2\2\u04ce\u04cf\7\61\2\2\u04cf\u04d0\7,\2\2\u04d0")
buf.write("\u04d4\3\2\2\2\u04d1\u04d3\13\2\2\2\u04d2\u04d1\3\2\2")
buf.write("\2\u04d3\u04d6\3\2\2\2\u04d4\u04d5\3\2\2\2\u04d4\u04d2")
buf.write("\3\2\2\2\u04d5\u04d7\3\2\2\2\u04d6\u04d4\3\2\2\2\u04d7")
buf.write("\u04d8\7,\2\2\u04d8\u04d9\7\61\2\2\u04d9\u04da\3\2\2\2")
buf.write("\u04da\u04db\b\u0097\2\2\u04db\u012e\3\2\2\2\u04dc\u04dd")
buf.write("\7\61\2\2\u04dd\u04de\7\61\2\2\u04de\u04e2\3\2\2\2\u04df")
buf.write("\u04e1\n\20\2\2\u04e0\u04df\3\2\2\2\u04e1\u04e4\3\2\2")
buf.write("\2\u04e2\u04e0\3\2\2\2\u04e2\u04e3\3\2\2\2\u04e3\u04e5")
buf.write("\3\2\2\2\u04e4\u04e2\3\2\2\2\u04e5\u04e6\b\u0098\2\2\u04e6")
buf.write("\u0130\3\2\2\2=\2\u0380\u0382\u0387\u0397\u03a1\u03a5")
buf.write("\u03a9\u03ad\u03af\u03b5\u03bc\u03c3\u03d0\u03d7\u03db")
buf.write("\u03dd\u03e7\u03eb\u03ef\u03f2\u03f7\u03f9\u03ff\u0405")
buf.write("\u0407\u040a\u0411\u0415\u041a\u041d\u0424\u0427\u042e")
buf.write("\u0432\u0437\u043a\u043f\u0459\u045e\u0462\u0468\u0478")
buf.write("\u0480\u0483\u0487\u048e\u0493\u0497\u049b\u049f\u04a5")
buf.write("\u04ac\u04b9\u04c1\u04c7\u04ca\u04d4\u04e2\3\b\2\2")
return buf.getvalue()
class CLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
T__0 = 1
T__1 = 2
T__2 = 3
T__3 = 4
T__4 = 5
T__5 = 6
T__6 = 7
T__7 = 8
T__8 = 9
T__9 = 10
T__10 = 11
T__11 = 12
T__12 = 13
T__13 = 14
Auto = 15
Break = 16
Case = 17
Char = 18
Const = 19
Continue = 20
Default = 21
Do = 22
Double = 23
Else = 24
Enum = 25
Extern = 26
Float = 27
For = 28
Goto = 29
If = 30
Inline = 31
Int = 32
Long = 33
Register = 34
Restrict = 35
Return = 36
Short = 37
Signed = 38
Sizeof = 39
Static = 40
Struct = 41
Switch = 42
Typedef = 43
Union = 44
Unsigned = 45
Void = 46
Volatile = 47
While = 48
Alignas = 49
Alignof = 50
Atomic = 51
Bool = 52
Complex = 53
Generic = 54
Imaginary = 55
Noreturn = 56
StaticAssert = 57
ThreadLocal = 58
LeftParen = 59
RightParen = 60
LeftBracket = 61
RightBracket = 62
LeftBrace = 63
RightBrace = 64
Less = 65
LessEqual = 66
Greater = 67
GreaterEqual = 68
LeftShift = 69
RightShift = 70
Plus = 71
PlusPlus = 72
Minus = 73
MinusMinus = 74
Star = 75
Div = 76
Mod = 77
And = 78
Or = 79
AndAnd = 80
OrOr = 81
Caret = 82
Not = 83
Tilde = 84
Question = 85
Colon = 86
Semi = 87
Comma = 88
Assign = 89
StarAssign = 90
DivAssign = 91
ModAssign = 92
PlusAssign = 93
MinusAssign = 94
LeftShiftAssign = 95
RightShiftAssign = 96
AndAssign = 97
XorAssign = 98
OrAssign = 99
Equal = 100
NotEqual = 101
Arrow = 102
Dot = 103
Ellipsis = 104
Identifier = 105
Constant = 106
StringLiteral = 107
LineDirective = 108
PragmaDirective = 109
Whitespace = 110
Newline = 111
BlockComment = 112
LineComment = 113
modeNames = [ "DEFAULT_MODE" ]
literalNames = [ "<INVALID>",
"'__extension__'", "'__builtin_va_arg'", "'__builtin_offsetof'",
"'__m128'", "'__m128d'", "'__m128i'", "'__typeof__'", "'__inline__'",
"'__stdcall'", "'__declspec'", "'__asm'", "'__attribute__'",
"'__asm__'", "'__volatile__'", "'auto'", "'break'", "'case'",
"'char'", "'const'", "'continue'", "'default'", "'do'", "'double'",
"'else'", "'enum'", "'extern'", "'float'", "'for'", "'goto'",
"'if'", "'inline'", "'int'", "'long'", "'register'", "'restrict'",
"'return'", "'short'", "'signed'", "'sizeof'", "'static'", "'struct'",
"'switch'", "'typedef'", "'union'", "'unsigned'", "'void'",
"'volatile'", "'while'", "'_Alignas'", "'_Alignof'", "'_Atomic'",
"'_Bool'", "'_Complex'", "'_Generic'", "'_Imaginary'", "'_Noreturn'",
"'_Static_assert'", "'_Thread_local'", "'('", "')'", "'['",
"']'", "'{'", "'}'", "'<'", "'<='", "'>'", "'>='", "'<<'", "'>>'",
"'+'", "'++'", "'-'", "'--'", "'*'", "'/'", "'%'", "'&'", "'|'",
"'&&'", "'||'", "'^'", "'!'", "'~'", "'?'", "':'", "';'", "','",
"'='", "'*='", "'/='", "'%='", "'+='", "'-='", "'<<='", "'>>='",
"'&='", "'^='", "'|='", "'=='", "'!='", "'->'", "'.'", "'...'" ]
symbolicNames = [ "<INVALID>",
"Auto", "Break", "Case", "Char", "Const", "Continue", "Default",
"Do", "Double", "Else", "Enum", "Extern", "Float", "For", "Goto",
"If", "Inline", "Int", "Long", "Register", "Restrict", "Return",
"Short", "Signed", "Sizeof", "Static", "Struct", "Switch", "Typedef",
"Union", "Unsigned", "Void", "Volatile", "While", "Alignas",
"Alignof", "Atomic", "Bool", "Complex", "Generic", "Imaginary",
"Noreturn", "StaticAssert", "ThreadLocal", "LeftParen", "RightParen",
"LeftBracket", "RightBracket", "LeftBrace", "RightBrace", "Less",
"LessEqual", "Greater", "GreaterEqual", "LeftShift", "RightShift",
"Plus", "PlusPlus", "Minus", "MinusMinus", "Star", "Div", "Mod",
"And", "Or", "AndAnd", "OrOr", "Caret", "Not", "Tilde", "Question",
"Colon", "Semi", "Comma", "Assign", "StarAssign", "DivAssign",
"ModAssign", "PlusAssign", "MinusAssign", "LeftShiftAssign",
"RightShiftAssign", "AndAssign", "XorAssign", "OrAssign", "Equal",
"NotEqual", "Arrow", "Dot", "Ellipsis", "Identifier", "Constant",
"StringLiteral", "LineDirective", "PragmaDirective", "Whitespace",
"Newline", "BlockComment", "LineComment" ]
ruleNames = [ "T__0", "T__1", "T__2", "T__3", "T__4", "T__5", "T__6",
"T__7", "T__8", "T__9", "T__10", "T__11", "T__12", "T__13",
"Auto", "Break", "Case", "Char", "Const", "Continue",
"Default", "Do", "Double", "Else", "Enum", "Extern", "Float",
"For", "Goto", "If", "Inline", "Int", "Long", "Register",
"Restrict", "Return", "Short", "Signed", "Sizeof", "Static",
"Struct", "Switch", "Typedef", "Union", "Unsigned", "Void",
"Volatile", "While", "Alignas", "Alignof", "Atomic", "Bool",
"Complex", "Generic", "Imaginary", "Noreturn", "StaticAssert",
"ThreadLocal", "LeftParen", "RightParen", "LeftBracket",
"RightBracket", "LeftBrace", "RightBrace", "Less", "LessEqual",
"Greater", "GreaterEqual", "LeftShift", "RightShift",
"Plus", "PlusPlus", "Minus", "MinusMinus", "Star", "Div",
"Mod", "And", "Or", "AndAnd", "OrOr", "Caret", "Not",
"Tilde", "Question", "Colon", "Semi", "Comma", "Assign",
"StarAssign", "DivAssign", "ModAssign", "PlusAssign",
"MinusAssign", "LeftShiftAssign", "RightShiftAssign",
"AndAssign", "XorAssign", "OrAssign", "Equal", "NotEqual",
"Arrow", "Dot", "Ellipsis", "Identifier", "IdentifierNondigit",
"Nondigit", "Digit", "UniversalCharacterName", "HexQuad",
"Constant", "IntegerConstant", "DecimalConstant", "OctalConstant",
"HexadecimalConstant", "HexadecimalPrefix", "NonzeroDigit",
"OctalDigit", "HexadecimalDigit", "IntegerSuffix", "UnsignedSuffix",
"LongSuffix", "LongLongSuffix", "FloatingConstant", "DecimalFloatingConstant",
"HexadecimalFloatingConstant", "FractionalConstant", "ExponentPart",
"Sign", "DigitSequence", "HexadecimalFractionalConstant",
"BinaryExponentPart", "HexadecimalDigitSequence", "FloatingSuffix",
"CharacterConstant", "CCharSequence", "CChar", "EscapeSequence",
"SimpleEscapeSequence", "OctalEscapeSequence", "HexadecimalEscapeSequence",
"StringLiteral", "EncodingPrefix", "SCharSequence", "SChar",
"LineDirective", "PragmaDirective", "Whitespace", "Newline",
"BlockComment", "LineComment" ]
grammarFileName = "C.bnf"
def __init__(self, input=None):
super().__init__(input)
self.checkVersion("4.6")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
|
|
#!/usr/bin/python
import sys
import time
import math
import functions
import myGui
import SocketServer
import threading
import socket
import parameters
from PyQt4 import QtGui
from PyQt4 import QtCore
import math
from collections import deque
socket.setdefaulttimeout(2)
import random
params=parameters.getParams()
SERVERIP=params['serverIP']
MAINPORT=params['port']
class Client(QtGui.QWidget):
def __init__(self,server):
super(Client, self).__init__()
# Activate the server; this will keep running until you
# interrupt the program with Ctrl-C
HOST=socket.gethostbyname(socket.gethostname())
PORT=MAINPORT
self.server=server
self.server.queue=[]
t = threading.Thread(target=self.server.serve_forever)
t.setDaemon(True) # don't hang on exit
t.start()
self.makeQueues()
self.initUI()
def makeQueues(self):
self.queues=['important','unimportant']
self.queue={}
for Q in self.queues:
self.queue[Q]=deque([])
def initUI(self):
self.resize(1280,1024)
#self.showFullScreen()
self.elapsed = 0
self.currentPage="intro"
self.computerNumber=-1
self.getPlayerNumber()
self.period=0
self.totalPeriods=20
self.earnings=0
self.grid = QtGui.QGridLayout()
self.grid.setHorizontalSpacing(0)
self.grid.setVerticalSpacing(0)
self.grid.setContentsMargins(0, 0, 0, 0)
self.grid.setSpacing(0)
self.pageNavigator()
def pageNavigator(self):
print "page navigator"
while self.grid.count():
item = self.grid.takeAt(0)
widget = item.widget()
# if widget has some id attributes you need to
# save in a list to maintain order, you can do that here
# i.e.: aList.append(widget.someId)
widget.deleteLater()
if self.currentPage=="intro":
self.makePageIntro()
elif self.currentPage=="prePeriod":
self.makePagePrePeriod()
elif self.currentPage=="period":
self.makePageGame()
elif self.currentPage=="summary":
self.makePageSummary()
def resetGridStreches(self):
for k in range(100):
self.grid.setRowStretch(k,0)
self.grid.setColumnStretch(k,0)
self.grid.setRowMinimumHeight(k,0)
self.grid.setColumnMinimumWidth(k,0)
def makePageIntro(self):
self.resetGridStreches()
print "making intro page"
self.grid.setRowStretch(0,5)
self.grid.setRowStretch(1,1)
self.grid.setRowStretch(2,1)
self.grid.setRowStretch(3,5)
self.waitingTitle = QtGui.QLabel('Waiting For Game To Start')
self.waitingTitle.setStyleSheet('QLabel {color: black;font-size: 24pt; font-family: Courier;}')
self.waitingTitle.setAlignment(QtCore.Qt.AlignCenter)
self.computerTitle = QtGui.QLabel("Computer Number: %s"%(self.computerNumber))
self.computerTitle.setStyleSheet('QLabel {color: black;font-size: 18pt; font-family: Courier;}')
self.computerTitle.setAlignment(QtCore.Qt.AlignCenter)
#self.computerTitle.setText("Computer Number: %s"%(self.computerNumber))
self.grid.addWidget(self.waitingTitle,1,1)
self.grid.addWidget(self.computerTitle,2,1)
self.setLayout(self.grid)
self.show()
self.checkStatus()
def makePageSummary(self):
print "making summary page"
self.resetGridStreches()
self.grid.setColumnStretch(0,2)
self.grid.setColumnStretch(1,1)
self.grid.setColumnStretch(2,2)
self.grid.setRowStretch(0,2)
self.grid.setRowStretch(1,1)
self.grid.setRowStretch(2,1)
self.grid.setRowStretch(3,1)
self.grid.setRowStretch(4,2)
self.waitingTitle = QtGui.QLabel('The Game Has Finished')
self.waitingTitle.setStyleSheet('QLabel {color: black;font-size: 24pt; font-family: Courier;}')
self.waitingTitle.setAlignment(QtCore.Qt.AlignCenter)
self.waitingTitle.setText('The Game Has Finished')
self.computerTitle = QtGui.QLabel('Total Points Earned:')
self.computerTitle.setStyleSheet('QLabel {color: black;font-size: 18pt; font-family: Courier;}')
self.computerTitle.setAlignment(QtCore.Qt.AlignCenter)
self.computerTitle.setText('Total Points Earned Today: %.02f'%(self.totalProfitNumber))
self.dollarPayoff = QtGui.QLabel('Total Points Earned:')
self.dollarPayoff.setStyleSheet('QLabel {color: black;font-size: 24pt; font-family: Courier;}')
self.dollarPayoff.setAlignment(QtCore.Qt.AlignCenter)
self.totalMonetaryPayoff=self.showUpFee+self.exchangeRate*self.totalProfitNumber
self.dollarPayoff.setText('Money Earned Today: %s + %.02f= $%.02f'%(self.showUpFee,self.exchangeRate*self.totalProfitNumber,self.totalMonetaryPayoff))
self.grid.addWidget(self.waitingTitle,1,1)
self.grid.addWidget(self.computerTitle,2,1)
self.grid.addWidget(self.dollarPayoff,3,1)
if self.payoffsSent==0:
#m=[SERVERIP,MAINPORT,"recordPayoffs",self.totalProfitNumber]
#this=myGui.sendMessage(m[0],m[1],m[2],m[3])
#if this=="fail":
# self.queue['resend'].append(m)
self.payoffsSent=1
def makePagePrePeriod(self):
print "making prePeriod page"
self.prePeriodButton=myGui.Button()
self.prePeriodButton.titleUnclicked="Start Next Period"
self.prePeriodButton.titleClicked="Starting!"
self.makeTopPanel()
self.makeBottomPanel()
self.grid.addWidget(self.topPanel,1,1,1,3)
self.grid.setRowMinimumHeight(2,20)
self.grid.addWidget(self.prePeriodButton,3,2,QtCore.Qt.AlignCenter)
if self.period>0:
self.summaryTitle1 = QtGui.QLabel('You were #%s out of %s members of your group to invest last period.'%(6,7))
self.summaryTitle1.setStyleSheet('QLabel {color: black;font-size: 24pt; font-family: Courier;}')
self.summaryTitle1.setAlignment(QtCore.Qt.AlignCenter)
self.summaryTitle2 = QtGui.QLabel('Out of the %s unrevealed projects, you invested in each one with probability %s%%.'%(self.periodSummary['unrevealed'],self.periodSummary['percentage']))
self.summaryTitle2.setStyleSheet('QLabel {color: black;font-size: 24pt; font-family: Courier;}')
self.summaryTitle2.setAlignment(QtCore.Qt.AlignCenter)
self.summaryTitle3 = QtGui.QLabel('You invested in %s Projects, %s of which were successful.'%(12,4))
self.summaryTitle3.setStyleSheet('QLabel {color: black;font-size: 24pt; font-family: Courier;}')
self.summaryTitle3.setAlignment(QtCore.Qt.AlignCenter)
self.summaryTitle4 = QtGui.QLabel('Your cost was %s for investing in %s projects.'%(234,12))
self.summaryTitle4.setStyleSheet('QLabel {color: black;font-size: 24pt; font-family: Courier;}')
self.summaryTitle4.setAlignment(QtCore.Qt.AlignCenter)
self.summaryTitle5 = QtGui.QLabel('Your benefit was %s for the %s succesfull projects.'%(23,4))
self.summaryTitle5.setStyleSheet('QLabel {color: black;font-size: 24pt; font-family: Courier;}')
self.summaryTitle5.setAlignment(QtCore.Qt.AlignCenter)
self.summaryTitle6 = QtGui.QLabel('You earned %s points for the period.'%(23))
self.summaryTitle6.setStyleSheet('QLabel {color: black;font-size: 24pt; font-family: Courier;}')
self.summaryTitle6.setAlignment(QtCore.Qt.AlignCenter)
width=700
self.summaryTitle1.setFixedWidth(width)
self.summaryTitle2.setFixedWidth(width)
self.summaryTitle3.setFixedWidth(width)
self.summaryTitle4.setFixedWidth(width)
self.summaryTitle5.setFixedWidth(width)
self.summaryTitle6.setFixedWidth(width)
self.summaryTitle1.setWordWrap(True)
self.summaryTitle2.setWordWrap(True)
self.summaryTitle3.setWordWrap(True)
self.summaryTitle4.setWordWrap(True)
self.summaryTitle5.setWordWrap(True)
self.summaryTitle6.setWordWrap(True)
self.grid.addWidget(self.summaryTitle1,4,2,QtCore.Qt.AlignCenter)
self.grid.addWidget(self.summaryTitle2,5,2,QtCore.Qt.AlignCenter)
self.grid.addWidget(self.summaryTitle3,6,2,QtCore.Qt.AlignCenter)
self.grid.addWidget(self.summaryTitle4,7,2,QtCore.Qt.AlignCenter)
self.grid.addWidget(self.summaryTitle5,8,2,QtCore.Qt.AlignCenter)
self.grid.addWidget(self.summaryTitle6,9,2,QtCore.Qt.AlignCenter)
self.grid.addWidget(self.bottomPanel,10,1,1,3,QtCore.Qt.AlignBottom)
self.resetGridStreches()
self.grid.setRowStretch(2,1)
self.grid.setRowStretch(4,1)
self.grid.setRowStretch(5,1)
self.grid.setRowStretch(6,1)
self.grid.setRowStretch(7,1)
self.grid.setRowStretch(8,1)
self.grid.setRowStretch(9,1)
# self.grid.setRowStretch(10,4)
self.grid.setColumnStretch(2,100000000000)
self.grid.setColumnStretch(1,100000000000)
self.grid.setColumnStretch(3,100000000000)
self.setLayout(self.grid)
self.show()
self.checkStatus()
def makeTopPanel(self):
self.periodTitle = QtGui.QLabel('Period #%s out of %s'%(self.period,self.totalPeriods))
self.periodTitle.setStyleSheet('QLabel {color: black;font-size: 20pt; font-family: Courier;}')
self.periodTitle.setAlignment(QtCore.Qt.AlignCenter)
self.totalPointsLabel = QtGui.QLabel('Today you have earned %s Points'%(self.earnings))
self.totalPointsLabel.setStyleSheet('QLabel {color: black;font-size: 20pt; font-family: Courier;}')
self.totalPointsLabel.setAlignment(QtCore.Qt.AlignCenter)
self.topPanel=QtGui.QWidget()
self.topPanel.grid = QtGui.QGridLayout()
self.topPanel.setLayout(self.topPanel.grid)
self.topPanel.resize(1024,100)
self.topPanel.grid.setHorizontalSpacing(0)
self.topPanel.grid.setVerticalSpacing(0)
self.topPanel.setObjectName("topPanel")
self.topPanel.setStyleSheet("#topPanel{border: 2px solid red;background-color:transparent;}")
self.topPanel.grid.addWidget(self.totalPointsLabel,1,1,1,1)
self.topPanel.grid.addWidget(self.periodTitle,1,3,1,1)
self.topPanel.grid.setColumnStretch(0,30)
self.topPanel.grid.setColumnStretch(1,5)
self.topPanel.grid.setColumnStretch(2,15)
self.topPanel.grid.setColumnMinimumWidth(2,100)
self.topPanel.grid.setColumnStretch(3,5)
self.topPanel.grid.setColumnStretch(4,30)
def makeBottomPanel(self):
self.statusLabel = QtGui.QLabel('Please move mouse over the button when you would like to start the next period (no click needed).')
self.statusLabel.setStyleSheet('QLabel {color: black;font-size: 24pt; font-family: Courier;line-height:190%;}')
self.statusLabel.setAlignment(QtCore.Qt.AlignCenter)
self.statusLabel.setWordWrap(True)
self.statusLabel.setFixedWidth(700)
self.statusLabel.setFixedHeight(100)
self.bottomPanel=QtGui.QWidget()
#self.bottomPanel.resize(100,350)
self.bottomPanel.grid = QtGui.QGridLayout()
self.bottomPanel.setGeometry(QtCore.QRect(40, 10, 241, 251));
self.bottomPanel.setLayout(self.bottomPanel.grid)
self.bottomPanel.setObjectName("bottomPanel")
self.bottomPanel.setStyleSheet("#bottomPanel{border: 2px solid red;}")
self.bottomPanel.grid.addWidget(self.statusLabel,1,1,)
def makePageGame(self):
self.revealed=0
self.timeVectorIndex=0
self.timeSlowDown=3
self.stageStatus=0
print "making period page"
self.localStartTime=time.time()
self.resetGridStreches()
self.setLayout(self.grid)
self.show()
inputs={}
inputs['player']=self.computerNumber
inputs['links']=[]
inputs['serverIP']=SERVERIP
inputs['port']=MAINPORT
self.squares = myGui.Squares(inputs)
self.squares.setAlignment(QtCore.Qt.AlignCenter)
self.hoverButton=myGui.Button()
self.hoverButton.titleUnclicked="INVEST!"
self.hoverButton.titleClicked="YOU HAVE INVESTED!"
self.payoffTableLabel = QtGui.QLabel('Payoff Table')
self.payoffTableLabel.setStyleSheet('QLabel {color: black;font-size: 20pt; font-family: Courier;}')
self.payoffTableLabel.setAlignment(QtCore.Qt.AlignCenter)
self.costLabel = QtGui.QLabel('Cost Per Investment: %.02f Points'%(2))
self.costLabel.setStyleSheet('QLabel {color: green;font-size: 24pt; font-family: Courier;}')
self.costLabel.setAlignment(QtCore.Qt.AlignCenter)
self.statisticsTableLabel = QtGui.QLabel('Summary Table')
self.statisticsTableLabel.setStyleSheet('QLabel {color: black;font-size: 20pt; font-family: Courier;}')
self.statisticsTableLabel.setAlignment(QtCore.Qt.AlignCenter)
self.investmentGridLabel = QtGui.QLabel('Investment Grid')
self.investmentGridLabel.setStyleSheet('QLabel {color: black;font-size: 20pt; font-family: Courier;}')
self.investmentGridLabel.setAlignment(QtCore.Qt.AlignCenter)
self.remainingSuccessLabel = QtGui.QLabel('Success Probability of Remaining Investments')
self.remainingSuccessLabel.setStyleSheet('QLabel {color: red;font-size: 24pt; font-family: Courier;}')
self.remainingSuccessLabel.setAlignment(QtCore.Qt.AlignCenter)
self.remainingSuccessLabel.setFixedWidth(325)
self.remainingSuccessLabel.setWordWrap(True)
self.remainingSuccessNumber = QtGui.QLabel('0%')
self.remainingSuccessNumber.setStyleSheet('QLabel {color: red;font-size: 40pt; font-family: Courier;}')
self.remainingSuccessNumber.setAlignment(QtCore.Qt.AlignCenter)
self.parameterGroupSize=7
self.parameterTheta=2
self.parameterPi=9
self.payoffTable = myGui.Table()
self.payoffTable.columnHeaders=['Order','Payoff']
self.payoffTable.data=[]
for row in range(self.parameterGroupSize):
self.payoffTable.data.append(["%s"%(row+1),"%.02f"%(self.parameterPi-self.parameterTheta*(row))])
self.payoffTable.columnWidths=[100,100]
self.payoffTable.rowHeight=30
self.payoffTable.fontSize=14
self.payoffTable.updateTable()
self.summaryTable = myGui.Table()
self.summaryTable.columnHeaders=['Statistic','Value']
self.summaryTable.data=[["Revealed Unsuccesfull","%s"%(self.revealed)],["Unrevealed Investments","%s"%(100-self.revealed)],["Time",'%.02f'%(self.elapsed*self.timeSlowDown)]]
self.summaryTable.columnWidths=[200,100]
self.summaryTable.rowHeight=50
self.summaryTable.fontSize=14
self.summaryTable.updateTable()
self.rightPanel=QtGui.QWidget()
self.rightPanel.grid = QtGui.QGridLayout()
self.rightPanel.setLayout(self.rightPanel.grid)
self.rightPanel.grid.addWidget(self.statisticsTableLabel,1,1,QtCore.Qt.AlignCenter)
self.rightPanel.grid.addWidget(self.summaryTable,2,1,QtCore.Qt.AlignCenter)
self.rightPanel.grid.addWidget(self.payoffTableLabel,4,1,QtCore.Qt.AlignCenter)
self.rightPanel.grid.addWidget(self.payoffTable,5,1,QtCore.Qt.AlignCenter)
self.rightPanel.grid.addWidget(self.costLabel,7,1,QtCore.Qt.AlignCenter)
#self.rightPanel.grid.addWidget(self.statusLabel,2,1,1,2,QtCore.Qt.AlignCenter)
self.rightPanel.grid.setRowMinimumHeight(2,self.summaryTable.height)
self.rightPanel.grid.setRowMinimumHeight(5,self.payoffTable.height)
self.rightPanel.grid.setColumnStretch(0,3)
self.rightPanel.grid.setRowStretch(0,3)
self.rightPanel.grid.setRowStretch(3,3)
self.rightPanel.grid.setRowStretch(6,3)
self.rightPanel.grid.setRowStretch(8,3)
self.remainingSuccessPanel=QtGui.QWidget()
self.remainingSuccessPanel.grid = QtGui.QGridLayout()
self.remainingSuccessPanel.setLayout(self.remainingSuccessPanel.grid)
self.remainingSuccessPanel.grid.addWidget(self.remainingSuccessLabel,0,1,QtCore.Qt.AlignRight)
self.remainingSuccessPanel.grid.addWidget(self.remainingSuccessNumber,0,2,QtCore.Qt.AlignLeft)
self.leftPanel=QtGui.QWidget()
self.leftPanel.grid = QtGui.QGridLayout()
self.leftPanel.setLayout(self.leftPanel.grid)
self.leftPanel.grid.addWidget(self.investmentGridLabel,0,1,1,1)
self.leftPanel.grid.addWidget(self.squares,1,1,1,1,QtCore.Qt.AlignCenter)
self.leftPanel.grid.addWidget(self.remainingSuccessPanel,2,1,1,1,QtCore.Qt.AlignCenter)
self.leftPanel.grid.setRowMinimumHeight(1,self.squares.height+4)
self.leftPanel.grid.setColumnMinimumWidth(1,self.squares.height+4)
self.leftPanel.grid.setRowStretch(3,3)
self.buttonPanel=QtGui.QWidget()
self.buttonPanel.grid = QtGui.QGridLayout()
self.buttonPanel.setLayout(self.buttonPanel.grid)
self.buttonPanel.grid.addWidget(self.hoverButton,0,1,QtCore.Qt.AlignCenter)
self.buttonPanel.grid.setColumnMinimumWidth(1,200)
self.buttonPanel.grid.setColumnMinimumWidth(2,800)
self.makeTopPanel()
self.makeBottomPanel()
self.statusLabel.setText('Please move mouse over "INVEST!" button when you would like to invest (no click needed).')
self.grid.addWidget(self.topPanel,0,1,1,4,QtCore.Qt.AlignTop)
self.grid.addWidget(self.leftPanel,2,2,1,1)
self.grid.addWidget(self.rightPanel,2,3,1,1)
self.grid.addWidget(self.buttonPanel,3,1,1,4,QtCore.Qt.AlignCenter)
self.grid.addWidget(self.bottomPanel,5,1,1,4,QtCore.Qt.AlignBottom)
self.grid.setRowMinimumHeight(2,600)
self.grid.setRowMinimumHeight(3,180)
self.grid.setRowMinimumHeight(5,100)
self.grid.setRowStretch(1,1)
self.grid.setRowStretch(4,1)
self.grid.setColumnStretch(1,4)
self.grid.setColumnStretch(4,4)
self.start=time.time()
self.show()
self.checkStatus()
def updateGamePage(self):
self.elapsed = (time.time()-self.localStartTime)/self.timeSlowDown
if self.stageStatus==0:
self.summaryTable.data[2][1]='%.02f'%(self.elapsed*self.timeSlowDown)
remainingSuccessNumber=100*functions.getProbRemaining(params['p0'],params['mu'],self.elapsed*self.timeSlowDown)
if remainingSuccessNumber>99.99:
remainingSuccessNumber=99.99
self.remainingSuccessNumber.setText("%.02f%%"%(remainingSuccessNumber))
self.summaryTable.updateTable()
while self.elapsed>self.timeVectorTimes[self.timeVectorIndex]:
self.squares.rects[self.timeVectorNumbers[self.timeVectorIndex]+1]['color']=QtGui.QColor("black")
self.timeVectorIndex=self.timeVectorIndex+1
self.squares.update()
self.revealed=self.revealed+1
self.summaryTable.data[0][1]="%s"%(self.revealed)
self.summaryTable.data[1][1]="%s"%(100-self.revealed)
self.summaryTable.updateTable()
if self.hoverButton.buttonStatus=="clicked":
self.stageStatus=1
elif self.stageStatus==1:
self.slider = myGui.Slider()
self.roundFinishedAt=self.elapsed*self.timeSlowDown
for k in range(1,101):
if self.squares.rects[k]['color']==QtGui.QColor("green"):
self.squares.rects[k]['color']=QtGui.QColor("white")
self.squares.update()
self.statusLabel.setText('Please use slider to select percentage of projects to invest in (no click needed).')
self.buttonPanel.grid.addWidget(self.slider,0,2,1,1,QtCore.Qt.AlignCenter)
self.stageStatus=2
elif self.stageStatus==2:
self.slider.update()
if self.slider.beenInside==1:
self.stageStatus=3
elif self.stageStatus==3:
self.slider.update()
self.statusLabel.setText('Please move mouse over invest button when you have selected your percentage (no click needed).')
self.hoverButton.buttonStatus="unclicked"
self.hoverButton.update()
self.hoverButton.hovering()
self.stageStatus=4
elif self.stageStatus==4:
self.hoverButton.titleUnclicked="Invest in %s"%(self.slider.percentage)
self.hoverButton.titleClicked="Invested in %s"%(self.slider.percentage)
self.hoverButton.update()
thisNumber=int(round((self.slider.value*(100-self.revealed))/100))
j=0
for k in range(1,len(self.squares.rects)+1):
if j<thisNumber:
if self.squares.rects[k]['color']==QtGui.QColor("white") or self.squares.rects[k]['color']==QtGui.QColor("red"):
self.squares.rects[k]['color']=QtGui.QColor("red")
j=j+1
else:
if self.squares.rects[k]['color']==QtGui.QColor("white") or self.squares.rects[k]['color']==QtGui.QColor("red"):
self.squares.rects[k]['color']=QtGui.QColor("white")
self.squares.update()
if self.hoverButton.buttonStatus=="clicked":
self.stageStatus=5
self.statusLabel.setText('Please wait for others to finish investing.')
self.slider.lock=1
print self.revealed,self.roundFinishedAt,int(round(self.slider.value))
self.periodSummary={}
self.periodSummary['revealed']=self.revealed
self.periodSummary['unrevealed']=100-self.revealed
self.periodSummary['finishTime']=self.roundFinishedAt
self.periodSummary['percentage']=int(round(self.slider.value))
m=[SERVERIP,MAINPORT,"periodSummary",self.periodSummary]
this=myGui.sendMessage(m[0],m[1],m[2],m[3])
#self.currentPage="prePeriod"
#self.pageNavigator()
elif self.stageStatus==5:
self.slider.lock=1
def getPlayerNumber(self):
m=[SERVERIP,MAINPORT,"getPlayerNumber",100]
this=myGui.sendMessage(m[0],m[1],m[2],m[3])
def queueManager(self):
while len(self.server.queue)>0:
k=self.server.queue.pop()
if k[1]=="getPlayerNumber":
self.queue['important'].append(k)
else:
self.queue['unimportant'].append(k)
for Q in self.queues:
while len(self.queue[Q])>0:
thisMessage=self.queue[Q].popleft()
messageIp=thisMessage[0]
messageType=thisMessage[1]
messageValue=thisMessage[2]
if messageType=="assignPlayerNumber":
self.assignPlayerNumber(messageIp,messageValue)
elif messageType=="assignParameters":
self.assignParameters(messageIp,messageValue)
elif messageType=="periodStarted":
self.startPeriod(messageIp,messageValue)
def startPeriod(self,messageIp,messageValue):
self.period=messageValue[0]
self.timeVectorTimes=eval(messageValue[1])
self.timeVectorNumbers=eval(messageValue[2])
self.currentPage="prePeriod"
self.pageNavigator()
def assignPlayerNumber(self,messageIp,messageValue):
self.computerNumber=int(messageValue)
print "player number assigned"
self.pageNavigator()
def assignParameters(self,messageIp,messageValue):
params=messageValue
self.delta=params['delta']
self.exchangeRate=params['exchangeRate']
self.showUpFee=params['showUpFee']
self.wParam=params['w']
self.startDelay=float(params['startDelay'])
self.costFunctionType=params['costFunctionType']
def checkStatus(self):
self.queueManager()
if self.currentPage=="waitingForGame":
if time.time()>=self.localStartTime:
self.currentPage="prePeriod"
self.pageNavigator()
elif self.currentPage=="prePeriod":
if self.prePeriodButton.buttonStatus=="clicked":
self.localStartTime=time.time()
self.currentPage="period"
self.period=self.period+1
self.pageNavigator()
elif self.currentPage=="period":
self.updateGamePage()
QtCore.QTimer.singleShot(10,self.checkStatus)
def main():
HOST, PORT = "", 9989
# Create the server, binding to localhost on port 9999
server = SocketServer.TCPServer((HOST, PORT),myGui.MyTCPHandler, bind_and_activate=True)
app = QtGui.QApplication(sys.argv)
client = Client(server)
###
server.allow_reuse_address = True
client.allow_reuse_address = True
###
client.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
"""
Tests parsers ability to read and parse non-local files
and hence require a network connection to be read.
"""
import logging
import numpy as np
import pytest
from pandas.compat import BytesIO, StringIO
import pandas.util._test_decorators as td
from pandas import DataFrame
import pandas.util.testing as tm
from pandas.io.parsers import read_csv
@pytest.mark.network
@pytest.mark.parametrize(
"compress_type, extension", [
('gzip', '.gz'), ('bz2', '.bz2'), ('zip', '.zip'),
pytest.param('xz', '.xz', marks=td.skip_if_no_lzma)
]
)
@pytest.mark.parametrize('mode', ['explicit', 'infer'])
@pytest.mark.parametrize('engine', ['python', 'c'])
def test_compressed_urls(salaries_table, compress_type, extension, mode,
engine):
check_compressed_urls(salaries_table, compress_type, extension, mode,
engine)
@tm.network
def check_compressed_urls(salaries_table, compression, extension, mode,
engine):
# test reading compressed urls with various engines and
# extension inference
base_url = ('https://github.com/pandas-dev/pandas/raw/master/'
'pandas/tests/io/parser/data/salaries.csv')
url = base_url + extension
if mode != 'explicit':
compression = mode
url_table = read_csv(url, sep='\t', compression=compression, engine=engine)
tm.assert_frame_equal(url_table, salaries_table)
@pytest.fixture
def tips_df(datapath):
"""DataFrame with the tips dataset."""
return read_csv(datapath('io', 'parser', 'data', 'tips.csv'))
@pytest.mark.usefixtures("s3_resource")
@td.skip_if_not_us_locale()
class TestS3(object):
def test_parse_public_s3_bucket(self, tips_df):
pytest.importorskip('s3fs')
# more of an integration test due to the not-public contents portion
# can probably mock this though.
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
df = read_csv('s3://pandas-test/tips.csv' +
ext, compression=comp)
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(df, tips_df)
# Read public file from bucket with not-public contents
df = read_csv('s3://cant_get_it/tips.csv')
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(df, tips_df)
def test_parse_public_s3n_bucket(self, tips_df):
# Read from AWS s3 as "s3n" URL
df = read_csv('s3n://pandas-test/tips.csv', nrows=10)
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(tips_df.iloc[:10], df)
def test_parse_public_s3a_bucket(self, tips_df):
# Read from AWS s3 as "s3a" URL
df = read_csv('s3a://pandas-test/tips.csv', nrows=10)
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(tips_df.iloc[:10], df)
def test_parse_public_s3_bucket_nrows(self, tips_df):
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
df = read_csv('s3://pandas-test/tips.csv' +
ext, nrows=10, compression=comp)
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(tips_df.iloc[:10], df)
def test_parse_public_s3_bucket_chunked(self, tips_df):
# Read with a chunksize
chunksize = 5
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
df_reader = read_csv('s3://pandas-test/tips.csv' + ext,
chunksize=chunksize, compression=comp)
assert df_reader.chunksize == chunksize
for i_chunk in [0, 1, 2]:
# Read a couple of chunks and make sure we see them
# properly.
df = df_reader.get_chunk()
assert isinstance(df, DataFrame)
assert not df.empty
true_df = tips_df.iloc[
chunksize * i_chunk: chunksize * (i_chunk + 1)]
tm.assert_frame_equal(true_df, df)
def test_parse_public_s3_bucket_chunked_python(self, tips_df):
# Read with a chunksize using the Python parser
chunksize = 5
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
df_reader = read_csv('s3://pandas-test/tips.csv' + ext,
chunksize=chunksize, compression=comp,
engine='python')
assert df_reader.chunksize == chunksize
for i_chunk in [0, 1, 2]:
# Read a couple of chunks and make sure we see them properly.
df = df_reader.get_chunk()
assert isinstance(df, DataFrame)
assert not df.empty
true_df = tips_df.iloc[
chunksize * i_chunk: chunksize * (i_chunk + 1)]
tm.assert_frame_equal(true_df, df)
def test_parse_public_s3_bucket_python(self, tips_df):
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
df = read_csv('s3://pandas-test/tips.csv' + ext, engine='python',
compression=comp)
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(df, tips_df)
def test_infer_s3_compression(self, tips_df):
for ext in ['', '.gz', '.bz2']:
df = read_csv('s3://pandas-test/tips.csv' + ext,
engine='python', compression='infer')
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(df, tips_df)
def test_parse_public_s3_bucket_nrows_python(self, tips_df):
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
df = read_csv('s3://pandas-test/tips.csv' + ext, engine='python',
nrows=10, compression=comp)
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(tips_df.iloc[:10], df)
def test_s3_fails(self):
with pytest.raises(IOError):
read_csv('s3://nyqpug/asdf.csv')
# Receive a permission error when trying to read a private bucket.
# It's irrelevant here that this isn't actually a table.
with pytest.raises(IOError):
read_csv('s3://cant_get_it/')
def test_read_csv_handles_boto_s3_object(self,
s3_resource,
tips_file):
# see gh-16135
s3_object = s3_resource.meta.client.get_object(
Bucket='pandas-test',
Key='tips.csv')
result = read_csv(BytesIO(s3_object["Body"].read()), encoding='utf8')
assert isinstance(result, DataFrame)
assert not result.empty
expected = read_csv(tips_file)
tm.assert_frame_equal(result, expected)
def test_read_csv_chunked_download(self, s3_resource, caplog):
# 8 MB, S3FS usees 5MB chunks
df = DataFrame(np.random.randn(100000, 4), columns=list('abcd'))
buf = BytesIO()
str_buf = StringIO()
df.to_csv(str_buf)
buf = BytesIO(str_buf.getvalue().encode('utf-8'))
s3_resource.Bucket("pandas-test").put_object(
Key="large-file.csv",
Body=buf)
with caplog.at_level(logging.DEBUG, logger='s3fs.core'):
read_csv("s3://pandas-test/large-file.csv", nrows=5)
# log of fetch_range (start, stop)
assert ((0, 5505024) in {x.args[-2:] for x in caplog.records})
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.common import exception
from heat.common.i18n import _
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine import support
class KeystoneRoleAssignmentMixin(object):
"""Implements role assignments between user/groups and project/domain.
heat_template_version: 2013-05-23
parameters:
... Group or User parameters
group_role:
type: string
description: role
group_role_domain:
type: string
description: group role domain
group_role_project:
type: string
description: group role project
resources:
admin_group:
type: OS::Keystone::Group OR OS::Keystone::User
properties:
... Group or User properties
roles:
- role: {get_param: group_role}
domain: {get_param: group_role_domain}
- role: {get_param: group_role}
project: {get_param: group_role_project}
"""
PROPERTIES = (
ROLES
) = (
'roles'
)
_ROLES_MAPPING_PROPERTIES = (
ROLE, DOMAIN, PROJECT
) = (
'role', 'domain', 'project'
)
mixin_properties_schema = {
ROLES: properties.Schema(
properties.Schema.LIST,
_('List of role assignments.'),
schema=properties.Schema(
properties.Schema.MAP,
_('Map between role with either project or domain.'),
schema={
ROLE: properties.Schema(
properties.Schema.STRING,
_('Keystone role.'),
required=True,
constraints=([constraints.
CustomConstraint('keystone.role')])
),
PROJECT: properties.Schema(
properties.Schema.STRING,
_('Keystone project.'),
constraints=([constraints.
CustomConstraint('keystone.project')])
),
DOMAIN: properties.Schema(
properties.Schema.STRING,
_('Keystone domain.'),
constraints=([constraints.
CustomConstraint('keystone.domain')])
),
}
),
update_allowed=True
)
}
def _add_role_assignments_to_group(self, group_id, role_assignments):
for role_assignment in self._normalize_to_id(role_assignments):
if role_assignment.get(self.PROJECT) is not None:
self.client().roles.grant(
role=role_assignment.get(self.ROLE),
project=role_assignment.get(self.PROJECT),
group=group_id
)
elif role_assignment.get(self.DOMAIN) is not None:
self.client().roles.grant(
role=role_assignment.get(self.ROLE),
domain=role_assignment.get(self.DOMAIN),
group=group_id
)
def _add_role_assignments_to_user(self, user_id, role_assignments):
for role_assignment in self._normalize_to_id(role_assignments):
if role_assignment.get(self.PROJECT) is not None:
self.client().roles.grant(
role=role_assignment.get(self.ROLE),
project=role_assignment.get(self.PROJECT),
user=user_id
)
elif role_assignment.get(self.DOMAIN) is not None:
self.client().roles.grant(
role=role_assignment.get(self.ROLE),
domain=role_assignment.get(self.DOMAIN),
user=user_id
)
def _remove_role_assignments_from_group(self, group_id, role_assignments):
for role_assignment in self._normalize_to_id(role_assignments):
if role_assignment.get(self.PROJECT) is not None:
self.client().roles.revoke(
role=role_assignment.get(self.ROLE),
project=role_assignment.get(self.PROJECT),
group=group_id
)
elif role_assignment.get(self.DOMAIN) is not None:
self.client().roles.revoke(
role=role_assignment.get(self.ROLE),
domain=role_assignment.get(self.DOMAIN),
group=group_id
)
def _remove_role_assignments_from_user(self, user_id, role_assignments):
for role_assignment in self._normalize_to_id(role_assignments):
if role_assignment.get(self.PROJECT) is not None:
self.client().roles.revoke(
role=role_assignment.get(self.ROLE),
project=role_assignment.get(self.PROJECT),
user=user_id
)
elif role_assignment.get(self.DOMAIN) is not None:
self.client().roles.revoke(
role=role_assignment.get(self.ROLE),
domain=role_assignment.get(self.DOMAIN),
user=user_id
)
def _normalize_to_id(self, role_assignment_prps):
role_assignments = []
if role_assignment_prps is None:
return role_assignments
for role_assignment in role_assignment_prps:
role = role_assignment.get(self.ROLE)
project = role_assignment.get(self.PROJECT)
domain = role_assignment.get(self.DOMAIN)
role_assignments.append({
self.ROLE: self.client_plugin().get_role_id(role),
self.PROJECT: (self.client_plugin().
get_project_id(project)) if project else None,
self.DOMAIN: (self.client_plugin().
get_domain_id(domain)) if domain else None
})
return role_assignments
def _find_differences(self, updated_prps, stored_prps):
updated_role_project_assignments = []
updated_role_domain_assignments = []
# Split the properties into two set of role assignments
# (project, domain) from updated properties
for role_assignment in updated_prps or []:
if role_assignment.get(self.PROJECT) is not None:
updated_role_project_assignments.append(
'%s:%s' % (
role_assignment[self.ROLE],
role_assignment[self.PROJECT]))
elif (role_assignment.get(self.DOMAIN)
is not None):
updated_role_domain_assignments.append(
'%s:%s' % (role_assignment[self.ROLE],
role_assignment[self.DOMAIN]))
stored_role_project_assignments = []
stored_role_domain_assignments = []
# Split the properties into two set of role assignments
# (project, domain) from updated properties
for role_assignment in (stored_prps or []):
if role_assignment.get(self.PROJECT) is not None:
stored_role_project_assignments.append(
'%s:%s' % (
role_assignment[self.ROLE],
role_assignment[self.PROJECT]))
elif (role_assignment.get(self.DOMAIN)
is not None):
stored_role_domain_assignments.append(
'%s:%s' % (role_assignment[self.ROLE],
role_assignment[self.DOMAIN]))
new_role_assignments = []
removed_role_assignments = []
# NOTE: finding the diff of list of strings is easier by using 'set'
# so properties are converted to string in above sections
# New items
for item in (set(updated_role_project_assignments) -
set(stored_role_project_assignments)):
new_role_assignments.append(
{self.ROLE: item[:item.find(':')],
self.PROJECT: item[item.find(':') + 1:]}
)
for item in (set(updated_role_domain_assignments) -
set(stored_role_domain_assignments)):
new_role_assignments.append(
{self.ROLE: item[:item.find(':')],
self.DOMAIN: item[item.find(':') + 1:]}
)
# Old items
for item in (set(stored_role_project_assignments) -
set(updated_role_project_assignments)):
removed_role_assignments.append(
{self.ROLE: item[:item.find(':')],
self.PROJECT: item[item.find(':') + 1:]}
)
for item in (set(stored_role_domain_assignments) -
set(updated_role_domain_assignments)):
removed_role_assignments.append(
{self.ROLE: item[:item.find(':')],
self.DOMAIN: item[item.find(':') + 1:]}
)
return new_role_assignments, removed_role_assignments
def create_assignment(self, user_id=None, group_id=None):
if self.properties.get(self.ROLES) is not None:
if user_id is not None:
self._add_role_assignments_to_user(
user_id,
self.properties.get(self.ROLES))
elif group_id is not None:
self._add_role_assignments_to_group(
group_id,
self.properties.get(self.ROLES))
def update_assignment(self, prop_diff, user_id=None, group_id=None):
# if there is no change do not update
if self.ROLES in prop_diff:
(new_role_assignments,
removed_role_assignments) = self._find_differences(
prop_diff.get(self.ROLES),
self._stored_properties_data.get(self.ROLES))
if len(new_role_assignments) > 0:
if user_id is not None:
self._add_role_assignments_to_user(
user_id,
new_role_assignments)
elif group_id is not None:
self._add_role_assignments_to_group(
group_id,
new_role_assignments)
if len(removed_role_assignments) > 0:
if user_id is not None:
self._remove_role_assignments_from_user(
user_id,
removed_role_assignments)
elif group_id is not None:
self._remove_role_assignments_from_group(
group_id,
removed_role_assignments)
def delete_assignment(self, user_id=None, group_id=None):
if self._stored_properties_data.get(self.ROLES) is not None:
if user_id is not None:
self._remove_role_assignments_from_user(
user_id,
(self._stored_properties_data.
get(self.ROLES)))
elif group_id is not None:
self._remove_role_assignments_from_group(
group_id,
(self._stored_properties_data.
get(self.ROLES)))
def validate_assignment_properties(self):
if self.properties.get(self.ROLES) is not None:
for role_assignment in self.properties.get(self.ROLES):
project = role_assignment.get(self.PROJECT)
domain = role_assignment.get(self.DOMAIN)
if project is not None and domain is not None:
raise exception.ResourcePropertyConflict(self.PROJECT,
self.DOMAIN)
if project is None and domain is None:
msg = _('Either project or domain must be specified for'
' role %s') % role_assignment.get(self.ROLE)
raise exception.StackValidationFailed(message=msg)
class KeystoneUserRoleAssignment(resource.Resource,
KeystoneRoleAssignmentMixin):
"""Resource for granting roles to a user.
Resource for specifying users and their's roles.
"""
support_status = support.SupportStatus(
version='5.0.0',
message=_('Supported versions: keystone v3'))
default_client_name = 'keystone'
PROPERTIES = (
USER,
) = (
'user',
)
properties_schema = {
USER: properties.Schema(
properties.Schema.STRING,
_('Name or id of keystone user.'),
required=True,
update_allowed=True,
constraints=[constraints.CustomConstraint('keystone.user')]
)
}
properties_schema.update(
KeystoneRoleAssignmentMixin.mixin_properties_schema)
def __init__(self, *args, **kwargs):
super(KeystoneUserRoleAssignment, self).__init__(*args, **kwargs)
@property
def user_id(self):
return (self.client_plugin().get_user_id(
self.properties.get(self.USER)))
def handle_create(self):
self.create_assignment(user_id=self.user_id)
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
self.update_assignment(user_id=self.user_id, prop_diff=prop_diff)
def handle_delete(self):
self.delete_assignment(user_id=self.user_id)
def validate(self):
super(KeystoneUserRoleAssignment, self).validate()
self.validate_assignment_properties()
class KeystoneGroupRoleAssignment(resource.Resource,
KeystoneRoleAssignmentMixin):
"""Resource for granting roles to a group.
Resource for specifying groups and their's roles.
"""
support_status = support.SupportStatus(
version='5.0.0',
message=_('Supported versions: keystone v3'))
default_client_name = 'keystone'
PROPERTIES = (
GROUP,
) = (
'group',
)
properties_schema = {
GROUP: properties.Schema(
properties.Schema.STRING,
_('Name or id of keystone group.'),
required=True,
update_allowed=True,
constraints=[constraints.CustomConstraint('keystone.group')]
)
}
properties_schema.update(
KeystoneRoleAssignmentMixin.mixin_properties_schema)
def __init__(self, *args, **kwargs):
super(KeystoneGroupRoleAssignment, self).__init__(*args, **kwargs)
@property
def group_id(self):
return (self.client_plugin().get_group_id(
self.properties.get(self.GROUP)))
def handle_create(self):
self.create_assignment(group_id=self.group_id)
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
self.update_assignment(group_id=self.group_id, prop_diff=prop_diff)
def handle_delete(self):
self.delete_assignment(group_id=self.group_id)
def validate(self):
super(KeystoneGroupRoleAssignment, self).validate()
self.validate_assignment_properties()
def resource_mapping():
return {
'OS::Keystone::UserRoleAssignment': KeystoneUserRoleAssignment,
'OS::Keystone::GroupRoleAssignment': KeystoneGroupRoleAssignment
}
|
|
# -*- coding: utf-8 -*-
"""
line.client
~~~~~~~~~~~
LineClient for sending and receiving message from LINE server.
:copyright: (c) 2014 by Taehoon Kim.
:license: BSD, see LICENSE for more details.
"""
import rsa
import requests
try:
import simplejson as json
except ImportError:
import json
from thrift.transport import TTransport
from thrift.transport import TSocket
from thrift.transport import THttpClient
from thrift.protocol import TCompactProtocol
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
#from curve import CurveThrift
from curve import CurveThrift
from curve.ttypes import TalkException
from curve.ttypes import ToType, ContentType
class LineAPI(object):
"""This class is a wrapper of LINE API
"""
LINE_DOMAIN = "http://gd2.line.naver.jp"
LINE_HTTP_URL = LINE_DOMAIN + "/api/v4/TalkService.do"
LINE_HTTP_IN_URL = LINE_DOMAIN + "/P4"
LINE_CERTIFICATE_URL = LINE_DOMAIN + "/Q"
LINE_SESSION_LINE_URL = LINE_DOMAIN + "/authct/v1/keys/line"
LINE_SESSION_NAVER_URL = LINE_DOMAIN + "/authct/v1/keys/naver"
ip = "127.0.0.1"
version = "5.1.2"
com_name = ""
revision = 0
certificate = ""
_session = requests.session()
_headers = {}
def ready(self):
self.transport = THttpClient.THttpClient(self.LINE_HTTP_URL)
self.transport_in = THttpClient.THttpClient(self.LINE_HTTP_IN_URL)
self.transport.setCustomHeaders(self._headers)
self.transport_in.setCustomHeaders(self._headers)
self.protocol = TCompactProtocol.TCompactProtocol(self.transport)
self.protocol_in = TCompactProtocol.TCompactProtocol(self.transport_in)
self._client = CurveThrift.Client(self.protocol)
self._client_in = CurveThrift.Client(self.protocol_in)
self.transport.open()
self.transport_in.open()
"""
After login, make `client` and `client_in` instance
to communicate with LINE server
"""
#raise Exception("Code is removed because of the request of LINE corporation")
def updateAuthToken(self):
"""
After login, update authToken to avoid expiration of
authToken. This method skip the PinCode validation step.
"""
if self.certificate:
self.login()
self.tokenLogin()
return True
else:
self.raise_error("You need to login first. There is no valid certificate")
def tokenLogin(self):
self.transport = THttpClient.THttpClient(self.LINE_HTTP_URL)
self.transport.setCustomHeaders(self._headers)
self.protocol = TCompactProtocol.TCompactProtocol(self.transport)
self._client = CurveThrift.Client(self.protocol)
def login(self):
"""Login to LINE server."""
if self.provider == CurveThrift.Provider.LINE: # LINE
j = self._get_json(self.LINE_SESSION_LINE_URL)
else: # NAVER
j = self._get_json(self.LINE_SESSION_NAVER_URL)
session_key = j['session_key']
message = (chr(len(session_key)) + session_key +
chr(len(self.id)) + self.id +
chr(len(self.password)) + self.password).encode('utf-8')
keyname, n, e = j['rsa_key'].split(",")
pub_key = rsa.PublicKey(int(n,16), int(e,16))
crypto = rsa.encrypt(message, pub_key).encode('hex')
self.transport = THttpClient.THttpClient(self.LINE_HTTP_URL)
self.transport.setCustomHeaders(self._headers)
self.protocol = TCompactProtocol.TCompactProtocol(self.transport)
self._client = CurveThrift.Client(self.protocol)
msg = self._client.loginWithIdentityCredentialForCertificate(
self.id, self.password, keyname, crypto, True, self.ip,
self.com_name, self.provider, self.certificate)
self._headers['X-Line-Access'] = msg.verifier
self._pinCode = msg.pinCode
if msg.type == 3:
print "Enter PinCode '%s' to your mobile phone in 2 minutes"\
% self._pinCode
return True
#raise Exception("Code is removed because of the request of LINE corporation")
else:
self.authToken =self._headers['X-Line-Access'] = msg.authToken
return True
def get_json(self, url):
"""Get josn from given url with saved session and headers"""
return json.loads(self._session.get(url, headers=self._headers).text)
def _getProfile(self):
"""Get profile information
:returns: Profile object
- picturePath
- displayName
- phone (base64 encoded?)
- allowSearchByUserid
- pictureStatus
- userid
- mid # used for unique id for account
- phoneticName
- regionCode
- allowSearchByEmail
- email
- statusMessage
"""
return self._client.getProfile()
def _getAllContactIds(self):
"""Get all contacts of your LINE account"""
return self._client.getAllContactIds()
def _getBlockedContactIds(self):
"""Get all blocked contacts of your LINE account"""
return self._client.getBlockedContactIds()
def _getContacts(self, ids):
"""Get contact information list from ids
:returns: List of Contact list
- status
- capableVideoCall
- dispalyName
- settings
- pictureStatus
- capableVoiceCall
- capableBuddy
- mid
- displayNameOverridden
- relation
- thumbnailUrl
- createdTime
- facoriteTime
- capableMyhome
- attributes
- type
- phoneticName
- statusMessage
"""
if type(ids) != list:
msg = "argument should be list of contact ids"
self.raise_error(msg)
return self._client.getContacts(ids)
def _findAndAddContactsByMid(self, mid, seq=0):
"""Find and add contacts by Mid"""
return self._client.findAndAddContactsByMid(seq, mid)
def _findContactByUserid(self, userid):
"""Find contacts by Userid"""
return self._client.findContactByUserid(userid)
def _findAndAddContactsByUserid(self, userid, seq=0):
"""Find and add contacts by Userid"""
return self._client.findAndAddContactsByUserid(seq, userid)
def _findContactsByPhone(self, phones):
"""Find contacts by phone"""
return self._client.findContactsByPhone(phones)
def _findAndAddContactsByPhone(self, phones, seq=0):
"""Find and add contacts by phone"""
return self._client.findAndAddContactsByPhone(seq, phones)
def _findContactsByEmail(self, emails):
"""Find contacts by email"""
return self._client.findContactsByEmail(emails)
def _findAndAddContactsByEmail(self, emails, seq=0):
"""Find and add contacts by email"""
return self._client.findAndAddContactsByEmail(seq, emails)
def _createRoom(self, ids, seq=0):
"""Create a chat room"""
return self._client.createRoom(seq, ids)
def _getRoom(self, id):
"""Get a chat room"""
return self._client.getRoom(id)
def _inviteIntoRoom(self, roomId, contactIds=[]):
"""Invite contacts into room"""
return self._client.inviteIntoRoom(0, roomId, contactIds)
def _leaveRoom(self, id):
"""Leave a chat room"""
return self._client.leaveRoom(0, id)
def _createGroup(self, name, ids, seq=0):
"""Create a group"""
return self._client.createGroup(seq, name, ids)
def _getGroups(self, ids):
"""Get a list of group with ids"""
if type(ids) != list:
msg = "argument should be list of group ids"
self.raise_error(msg)
return self._client.getGroups(ids)
def _getGroupIdsJoined(self):
"""Get group id that you joined"""
return self._client.getGroupIdsJoined()
def _getGroupIdsInvited(self):
"""Get group id that you invited"""
return self._client.getGroupIdsInvited()
def _acceptGroupInvitation(self, groupId, seq=0):
"""Accept a group invitation"""
return self._client.acceptGroupInvitation(seq, groupId)
def _cancelGroupInvitation(self, groupId, contactIds=[], seq=0):
"""Cancel a group invitation"""
return self._client.cancelGroupInvitation(seq, groupId, contactIds)
def _inviteIntoGroup(self, groupId, contactIds=[], seq=0):
"""Invite contacts into group"""
return self._client.inviteIntoGroup(seq, groupId, contactIds)
def _leaveGroup(self, id):
"""Leave a group"""
return self._client.leaveGroup(0, id)
def _getRecentMessages(self, id, count=1):
"""Get recent messages from `id`"""
return self._client.getRecentMessages(id, count)
def _sendMessage(self, message, seq=0):
"""Send a message to `id`. `id` could be contact id or group id
:param message: `message` instance
"""
return self._client.sendMessage(seq, message)
def _getLastOpRevision(self):
return self._client.getLastOpRevision()
def _fetchOperations(self, revision, count=50):
return self._client.fetchOperations(revision, count)
def _getMessageBoxCompactWrapUp(self, id):
try:
return self._client.getMessageBoxCompactWrapUp(id)
except:
return None
def _getMessageBoxCompactWrapUpList(self, start=1, count=50):
try:
return self._client.getMessageBoxCompactWrapUpList(start, count)
except Exception as e:
msg = e
self.raise_error(msg)
def raise_error(self, msg):
"""Error format"""
raise Exception("Error: %s" % msg)
def _get_json(self, url):
"""Get josn from given url with saved session and headers"""
return json.loads(self._session.get(url, headers=self._headers).text)
def post_content(self, url, data=None, files=None):
return self._session.post(url, headers=self._headers, data=data, files=files)
|
|
"""
Created on March 29, 2017
@author Miguel Contreras Morales
History:
4/12/2017 - Adding more queries
"""
from pymongo import MongoClient
import cherrypy as QueryServer
import json
import os
class QueryTool(object):
"""
QueryTool will contain a subset of functions used to parse
Mongo Database
"""
_db = None
client = None
def __init__(self, dbaddress="10.30.5.203:27017", path=None):
"""
Constructor/Intiliazer
+ dbaddress - MongoDB IP
"""
self.path = path
if (dbaddress == None):
self._dbaddress = "127.0.0.1:27017"
else:
self._dbaddress = dbaddress
address, port = self._dbaddress.split(':', 2)
self.client = MongoClient(address, int(port))
@QueryServer.expose
def index(self):
"""
This function initializes index.html
"""
print "This is the path -->"
print self.path
return open(os.path.join(self.path, "index.html"))
@QueryServer.expose
def dbdump(self):
"""
This function will dump out the DBs along with collection stored in DB.
The function will loop and store names of DB/Collection.
Secondary check to make sure the DB is up.
+ self - no input required
"""
print "Initializing DB Dump Query"
d = dict((db, [collection for collection in self.client[db].collection_names()])
for db in self.client.database_names())
return json.dumps(d)
@QueryServer.expose
def showdbs(self):
"""
This function will dump out the DBs in Mongo ONLY
+ self - no input required
"""
print "Initializing ShowDBs Query"
dbs = self.client.database_names()
return json.dumps(dbs)
@QueryServer.expose
def showcollections(self, sprocess):
"""
This function will dump out the collection of DB sprocess
+ sprocess - first level of DB also called 'Collection'
"""
print "Initializing ShowsCollection Query"
dbs = self.client.database_names()
self._db = self.client[sprocess]
collection = self._db.collection_names(include_system_collections=True)
return json.dumps(collection)
@QueryServer.expose
def showprocess(self, sprocess, pnnum):
"""
This function will get the number of instances of a specific pnnum in sprocess collection
+ sprocess - the db we will be using
+ pnnum - part number within the db, this is dynamic depending on DB structure
"""
print "Initializing ShowProcess Query"
dbquery = ("processid")
self._db = self.client[sprocess]
coll = self._db[pnnum]
return json.dumps(coll.distinct(dbquery))
@QueryServer.expose
def processlog(self, sprocess, pnnum, starttime, serialnum):
"""
This function will get the log for a serialnum at a specfic starttime
+ sprocess - the db we will be using
+ pnnum - part number within the db, this is dynamic depending on DB structure
+ starttime - start time of serialnum
+ serialnum - product indentifier
"""
print "Initializing Process Log Query"
dbquery = {'processid.fstarttime': str(starttime),
'processid.serialnum': serialnum}
self._db = self.client[sprocess]
coll = self._db[pnnum]
logs = []
print dbquery
for dbentry in coll.find(dbquery):
logs.append(str(dbentry))
return json.dumps(logs)
@QueryServer.expose
def processlogcmd(self, sprocess, pnnum, starttime, serialnum, cmd):
"""
This function will get the cmds for a serialnum at a specfic starttime
+ sprocess - the db we will be using
+ pnnum - part number within the db, this is dynamic depending on DB structure
+ starttime - start time of serialnum
+ serialnum - product indentifier
+ cmd - command being parsed
"""
print "Initializing Process Log CMD Query"
cmd = str(cmd) + "\r"
cmd = cmd.lower()
dbquery = {'processid.fstarttime': str(starttime),
'processid.serialnum': serialnum,
'content.cmdobject.cmd': cmd}
self._db = self.client[sprocess]
coll = self._db[pnnum]
logs = []
print dbquery
for dbentry in coll.find(dbquery):
logs.append(str(dbentry))
return json.dumps(logs)
@QueryServer.expose
def processlogbufferregex(self, sprocess, pnnum, starttime, serialnum, regex):
"""
This function will get the objext for a serialnum at a specfic starttime that matches
the regular expression provided
+ sprocess - the db we will be using
+ pnnum - part number within the db, this is dynamic depending on DB structure
+ starttime - start time of serialnum
+ serialnum - product indentifier
+ regex - regular expression
"""
print "Initializing Process Log Buffer REGEX Query"
dbquery = {'processid.fstarttime': str(starttime),
'processid.serialnum': serialnum,
'content.response.buffer': {'$regex': regex}}
self._db = self.client[sprocess]
coll = self._db[pnnum]
logs = []
print dbquery
for dbentry in coll.find(dbquery):
logs.append(str(dbentry))
return json.dumps(logs)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from functools import total_ordering
import itertools
import re
all_modules = []
@total_ordering
class Module(object):
"""
A module is the basic abstraction in our test runner script. Each module consists of a set
of source files, a set of test commands, and a set of dependencies on other modules. We use
modules to define a dependency graph that let us determine which tests to run based on which
files have changed.
"""
def __init__(self, name, dependencies, source_file_regexes, build_profile_flags=(), environ={},
sbt_test_goals=(), python_test_goals=(), blacklisted_python_implementations=(),
test_tags=(), should_run_r_tests=False, should_run_build_tests=False):
"""
Define a new module.
:param name: A short module name, for display in logging and error messages.
:param dependencies: A set of dependencies for this module. This should only include direct
dependencies; transitive dependencies are resolved automatically.
:param source_file_regexes: a set of regexes that match source files belonging to this
module. These regexes are applied by attempting to match at the beginning of the
filename strings.
:param build_profile_flags: A set of profile flags that should be passed to Maven or SBT in
order to build and test this module (e.g. '-PprofileName').
:param environ: A dict of environment variables that should be set when files in this
module are changed.
:param sbt_test_goals: A set of SBT test goals for testing this module.
:param python_test_goals: A set of Python test goals for testing this module.
:param blacklisted_python_implementations: A set of Python implementations that are not
supported by this module's Python components. The values in this set should match
strings returned by Python's `platform.python_implementation()`.
:param test_tags A set of tags that will be excluded when running unit tests if the module
is not explicitly changed.
:param should_run_r_tests: If true, changes in this module will trigger all R tests.
:param should_run_build_tests: If true, changes in this module will trigger build tests.
"""
self.name = name
self.dependencies = dependencies
self.source_file_prefixes = source_file_regexes
self.sbt_test_goals = sbt_test_goals
self.build_profile_flags = build_profile_flags
self.environ = environ
self.python_test_goals = python_test_goals
self.blacklisted_python_implementations = blacklisted_python_implementations
self.test_tags = test_tags
self.should_run_r_tests = should_run_r_tests
self.should_run_build_tests = should_run_build_tests
self.dependent_modules = set()
for dep in dependencies:
dep.dependent_modules.add(self)
all_modules.append(self)
def contains_file(self, filename):
return any(re.match(p, filename) for p in self.source_file_prefixes)
def __repr__(self):
return "Module<%s>" % self.name
def __lt__(self, other):
return self.name < other.name
def __eq__(self, other):
return self.name == other.name
def __ne__(self, other):
return not (self.name == other.name)
def __hash__(self):
return hash(self.name)
tags = Module(
name="tags",
dependencies=[],
source_file_regexes=[
"common/tags/",
]
)
catalyst = Module(
name="catalyst",
dependencies=[tags],
source_file_regexes=[
"sql/catalyst/",
],
sbt_test_goals=[
"catalyst/test",
],
)
sql = Module(
name="sql",
dependencies=[catalyst],
source_file_regexes=[
"sql/core/",
],
sbt_test_goals=[
"sql/test",
],
)
hive = Module(
name="hive",
dependencies=[sql],
source_file_regexes=[
"sql/hive/",
"bin/spark-sql",
],
build_profile_flags=[
"-Phive",
],
sbt_test_goals=[
"hive/test",
],
test_tags=[
"org.apache.spark.tags.ExtendedHiveTest"
]
)
repl = Module(
name="repl",
dependencies=[hive],
source_file_regexes=[
"repl/",
],
sbt_test_goals=[
"repl/test",
],
)
hive_thriftserver = Module(
name="hive-thriftserver",
dependencies=[hive],
source_file_regexes=[
"sql/hive-thriftserver",
"sbin/start-thriftserver.sh",
],
build_profile_flags=[
"-Phive-thriftserver",
],
sbt_test_goals=[
"hive-thriftserver/test",
]
)
avro = Module(
name="avro",
dependencies=[sql],
source_file_regexes=[
"external/avro",
],
sbt_test_goals=[
"avro/test",
]
)
sql_kafka = Module(
name="sql-kafka-0-10",
dependencies=[sql],
source_file_regexes=[
"external/kafka-0-10-sql",
],
sbt_test_goals=[
"sql-kafka-0-10/test",
]
)
sketch = Module(
name="sketch",
dependencies=[tags],
source_file_regexes=[
"common/sketch/",
],
sbt_test_goals=[
"sketch/test"
]
)
graphx = Module(
name="graphx",
dependencies=[tags],
source_file_regexes=[
"graphx/",
],
sbt_test_goals=[
"graphx/test"
]
)
streaming = Module(
name="streaming",
dependencies=[tags],
source_file_regexes=[
"streaming",
],
sbt_test_goals=[
"streaming/test",
]
)
# Don't set the dependencies because changes in other modules should not trigger Kinesis tests.
# Kinesis tests depends on external Amazon kinesis service. We should run these tests only when
# files in streaming_kinesis_asl are changed, so that if Kinesis experiences an outage, we don't
# fail other PRs.
streaming_kinesis_asl = Module(
name="streaming-kinesis-asl",
dependencies=[tags],
source_file_regexes=[
"external/kinesis-asl/",
"external/kinesis-asl-assembly/",
],
build_profile_flags=[
"-Pkinesis-asl",
],
environ={
"ENABLE_KINESIS_TESTS": "1"
},
sbt_test_goals=[
"streaming-kinesis-asl/test",
]
)
streaming_kafka_0_10 = Module(
name="streaming-kafka-0-10",
dependencies=[streaming],
source_file_regexes=[
# The ending "/" is necessary otherwise it will include "sql-kafka" codes
"external/kafka-0-10/",
"external/kafka-0-10-assembly",
],
sbt_test_goals=[
"streaming-kafka-0-10/test",
]
)
mllib_local = Module(
name="mllib-local",
dependencies=[tags],
source_file_regexes=[
"mllib-local",
],
sbt_test_goals=[
"mllib-local/test",
]
)
mllib = Module(
name="mllib",
dependencies=[mllib_local, streaming, sql],
source_file_regexes=[
"data/mllib/",
"mllib/",
],
sbt_test_goals=[
"mllib/test",
]
)
examples = Module(
name="examples",
dependencies=[graphx, mllib, streaming, hive],
source_file_regexes=[
"examples/",
],
sbt_test_goals=[
"examples/test",
]
)
pyspark_core = Module(
name="pyspark-core",
dependencies=[],
source_file_regexes=[
"python/(?!pyspark/(ml|mllib|sql|streaming))"
],
python_test_goals=[
# doctests
"pyspark.rdd",
"pyspark.context",
"pyspark.conf",
"pyspark.broadcast",
"pyspark.accumulators",
"pyspark.serializers",
"pyspark.profiler",
"pyspark.shuffle",
"pyspark.util",
# unittests
"pyspark.tests.test_appsubmit",
"pyspark.tests.test_broadcast",
"pyspark.tests.test_conf",
"pyspark.tests.test_context",
"pyspark.tests.test_daemon",
"pyspark.tests.test_join",
"pyspark.tests.test_profiler",
"pyspark.tests.test_rdd",
"pyspark.tests.test_readwrite",
"pyspark.tests.test_serializers",
"pyspark.tests.test_shuffle",
"pyspark.tests.test_taskcontext",
"pyspark.tests.test_util",
"pyspark.tests.test_worker",
]
)
pyspark_sql = Module(
name="pyspark-sql",
dependencies=[pyspark_core, hive, avro],
source_file_regexes=[
"python/pyspark/sql"
],
python_test_goals=[
# doctests
"pyspark.sql.types",
"pyspark.sql.context",
"pyspark.sql.session",
"pyspark.sql.conf",
"pyspark.sql.catalog",
"pyspark.sql.column",
"pyspark.sql.dataframe",
"pyspark.sql.group",
"pyspark.sql.functions",
"pyspark.sql.readwriter",
"pyspark.sql.streaming",
"pyspark.sql.udf",
"pyspark.sql.window",
"pyspark.sql.avro.functions",
# unittests
"pyspark.sql.tests.test_arrow",
"pyspark.sql.tests.test_catalog",
"pyspark.sql.tests.test_column",
"pyspark.sql.tests.test_conf",
"pyspark.sql.tests.test_context",
"pyspark.sql.tests.test_dataframe",
"pyspark.sql.tests.test_datasources",
"pyspark.sql.tests.test_functions",
"pyspark.sql.tests.test_group",
"pyspark.sql.tests.test_pandas_udf",
"pyspark.sql.tests.test_pandas_udf_cogrouped_map",
"pyspark.sql.tests.test_pandas_udf_grouped_agg",
"pyspark.sql.tests.test_pandas_udf_grouped_map",
"pyspark.sql.tests.test_pandas_udf_scalar",
"pyspark.sql.tests.test_pandas_udf_window",
"pyspark.sql.tests.test_readwriter",
"pyspark.sql.tests.test_serde",
"pyspark.sql.tests.test_session",
"pyspark.sql.tests.test_streaming",
"pyspark.sql.tests.test_types",
"pyspark.sql.tests.test_udf",
"pyspark.sql.tests.test_utils",
]
)
pyspark_streaming = Module(
name="pyspark-streaming",
dependencies=[
pyspark_core,
streaming,
streaming_kinesis_asl
],
source_file_regexes=[
"python/pyspark/streaming"
],
python_test_goals=[
# doctests
"pyspark.streaming.util",
# unittests
"pyspark.streaming.tests.test_context",
"pyspark.streaming.tests.test_dstream",
"pyspark.streaming.tests.test_kinesis",
"pyspark.streaming.tests.test_listener",
]
)
pyspark_mllib = Module(
name="pyspark-mllib",
dependencies=[pyspark_core, pyspark_streaming, pyspark_sql, mllib],
source_file_regexes=[
"python/pyspark/mllib"
],
python_test_goals=[
# doctests
"pyspark.mllib.classification",
"pyspark.mllib.clustering",
"pyspark.mllib.evaluation",
"pyspark.mllib.feature",
"pyspark.mllib.fpm",
"pyspark.mllib.linalg.__init__",
"pyspark.mllib.linalg.distributed",
"pyspark.mllib.random",
"pyspark.mllib.recommendation",
"pyspark.mllib.regression",
"pyspark.mllib.stat._statistics",
"pyspark.mllib.stat.KernelDensity",
"pyspark.mllib.tree",
"pyspark.mllib.util",
# unittests
"pyspark.mllib.tests.test_algorithms",
"pyspark.mllib.tests.test_feature",
"pyspark.mllib.tests.test_linalg",
"pyspark.mllib.tests.test_stat",
"pyspark.mllib.tests.test_streaming_algorithms",
"pyspark.mllib.tests.test_util",
],
blacklisted_python_implementations=[
"PyPy" # Skip these tests under PyPy since they require numpy and it isn't available there
]
)
pyspark_ml = Module(
name="pyspark-ml",
dependencies=[pyspark_core, pyspark_mllib],
source_file_regexes=[
"python/pyspark/ml/"
],
python_test_goals=[
# doctests
"pyspark.ml.classification",
"pyspark.ml.clustering",
"pyspark.ml.evaluation",
"pyspark.ml.feature",
"pyspark.ml.fpm",
"pyspark.ml.image",
"pyspark.ml.linalg.__init__",
"pyspark.ml.recommendation",
"pyspark.ml.regression",
"pyspark.ml.stat",
"pyspark.ml.tuning",
# unittests
"pyspark.ml.tests.test_algorithms",
"pyspark.ml.tests.test_base",
"pyspark.ml.tests.test_evaluation",
"pyspark.ml.tests.test_feature",
"pyspark.ml.tests.test_image",
"pyspark.ml.tests.test_linalg",
"pyspark.ml.tests.test_param",
"pyspark.ml.tests.test_persistence",
"pyspark.ml.tests.test_pipeline",
"pyspark.ml.tests.test_stat",
"pyspark.ml.tests.test_training_summary",
"pyspark.ml.tests.test_tuning",
"pyspark.ml.tests.test_wrapper",
],
blacklisted_python_implementations=[
"PyPy" # Skip these tests under PyPy since they require numpy and it isn't available there
]
)
sparkr = Module(
name="sparkr",
dependencies=[hive, mllib],
source_file_regexes=[
"R/",
],
should_run_r_tests=True
)
docs = Module(
name="docs",
dependencies=[],
source_file_regexes=[
"docs/",
]
)
build = Module(
name="build",
dependencies=[],
source_file_regexes=[
".*pom.xml",
"dev/test-dependencies.sh",
],
should_run_build_tests=True
)
yarn = Module(
name="yarn",
dependencies=[],
source_file_regexes=[
"resource-managers/yarn/",
"common/network-yarn/",
],
build_profile_flags=["-Pyarn"],
sbt_test_goals=[
"yarn/test",
"network-yarn/test",
],
test_tags=[
"org.apache.spark.tags.ExtendedYarnTest"
]
)
mesos = Module(
name="mesos",
dependencies=[],
source_file_regexes=["resource-managers/mesos/"],
build_profile_flags=["-Pmesos"],
sbt_test_goals=["mesos/test"]
)
kubernetes = Module(
name="kubernetes",
dependencies=[],
source_file_regexes=["resource-managers/kubernetes"],
build_profile_flags=["-Pkubernetes"],
sbt_test_goals=["kubernetes/test"]
)
hadoop_cloud = Module(
name="hadoop-cloud",
dependencies=[],
source_file_regexes=["hadoop-cloud"],
build_profile_flags=["-Phadoop-cloud"],
sbt_test_goals=["hadoop-cloud/test"]
)
spark_ganglia_lgpl = Module(
name="spark-ganglia-lgpl",
dependencies=[],
build_profile_flags=["-Pspark-ganglia-lgpl"],
source_file_regexes=[
"external/spark-ganglia-lgpl",
]
)
# The root module is a dummy module which is used to run all of the tests.
# No other modules should directly depend on this module.
root = Module(
name="root",
dependencies=[build], # Changes to build should trigger all tests.
source_file_regexes=[],
# In order to run all of the tests, enable every test profile:
build_profile_flags=list(set(
itertools.chain.from_iterable(m.build_profile_flags for m in all_modules))),
sbt_test_goals=[
"test",
],
python_test_goals=list(itertools.chain.from_iterable(m.python_test_goals for m in all_modules)),
should_run_r_tests=True,
should_run_build_tests=True
)
|
|
import arrow
import re
import feedparser
import json
import time
from hbconfig import Config
from sklearn import tree
from .pocket import Pocket
from ..slack.resource import MsgResource
from ..slack.slackbot import SlackerAdapter
from ..slack.template import MsgTemplate
from ..utils.data_handler import DataHandler
from ..utils.data_loader import FeedData
from ..utils.data_loader import FeedDataLoader
from ..utils.logger import Logger
from ..utils.logger import DataLogger
class FeedNotifier:
MAX_KEEP = 40
def __init__(self, slackbot: SlackerAdapter = None) -> None:
self.logger = Logger().get_logger()
self.feed_logger = DataLogger("feed").get_logger()
self.data_handler = DataHandler()
self.feeds = self.data_handler.read_feeds()
self.feed_classifier = None
if Config.bot.get("FEED_CLASSIFIER", False):
self.feed_classifier = FeedClassifier()
if slackbot is None:
self.slackbot = SlackerAdapter(
channel=Config.slack.channel.get("FEED", "#general")
)
else:
self.slackbot = slackbot
def notify_all(self) -> None:
self.logger.info("Check feed_list")
for category, feeds in self.feeds.items():
for feed in feeds:
try:
results = self.get_notify_list(category, feed)
self.notify(category, feed, results)
except Exception as e:
self.logger.error(f"FEED Error: {e}")
self.logger.exception("feed")
def get_notify_list(self, category: str, feed: tuple) -> list:
CACHE_FILE_NAME = "cache_feed.json"
cache_data = self.data_handler.read_cache(fname=CACHE_FILE_NAME)
feed_name, feed_url, save_pocket = feed
f = feedparser.parse(feed_url)
def get_timestamp(x):
update_time = x.get("updated_parsed", arrow.now().timestamp)
if type(update_time) == time.struct_time:
update_time = time.mktime(update_time)
if update_time is None:
return arrow.now().timestamp
return update_time
f.entries = sorted(
f.entries, key=lambda x: get_timestamp(x), reverse=True
)
# get Latest Feed
noti_list = []
if feed_url in cache_data:
previous_update_date = arrow.get(cache_data[feed_url])
for e in f.entries:
if getattr(e, "updated_parsed", None):
e_updated_date = arrow.get(e.updated_parsed)
else:
e_updated_date = arrow.now()
if e_updated_date > previous_update_date:
noti_list.append(self.__make_entry_tuple(category, e, feed_name))
elif f.entries:
e = f.entries[0]
noti_list.append(self.__make_entry_tuple(category, e, feed_name))
else:
pass
if f.entries:
last_e = f.entries[0]
last_updated_date = arrow.get(last_e.get("updated_parsed", None))
self.data_handler.edit_cache((feed_url, str(last_updated_date)), fname=CACHE_FILE_NAME)
# filter feeded entry link
cache_entry_links = set(cache_data.get("feed_links", []))
noti_list = list(filter(lambda e: e[1] not in cache_entry_links, noti_list))
# Cache entry link
for entry in noti_list:
_, entry_link, _ = entry
cache_entry_links.add(entry_link)
self.data_handler.edit_cache(
("feed_links", list(cache_entry_links)[-self.MAX_KEEP :]), fname=CACHE_FILE_NAME
)
if len(cache_data) == 0: # cache_data is Empty. (Error)
return []
# Append 'save_pocket' flags
noti_list = [(link, save_pocket) for link in noti_list]
return noti_list
def __make_entry_tuple(self, category: str, entry: dict, feed_name: str) -> tuple:
entry_title = f"[{category}] - {feed_name} \n" + entry.get("title", "")
entry_link = entry.get("link", "")
entry_description = f"Link : {entry_link} \n" + self.__remove_tag(
entry.get("description", ""), entry_link
)
return (entry_title, entry_link, entry_description)
def __remove_tag(self, text: str, entry_link: str) -> str:
text = re.sub("<.+?>", "", text, 0, re.I | re.S)
text = re.sub(" |\t|\r|", "", text)
text = re.sub(entry_link, "", text)
return text
def notify(self, category: str, feed: tuple, results: list):
if len(results) == 0:
feed_name = feed[0]
self.slackbot.send_message(text=MsgResource.FEED_NO_NEW_POST(feed_name=feed_name))
return
for (parsed_feed, save_pocket) in results:
feed_header = parsed_feed[0].split("\n")
category = feed_header[0]
title = feed_header[1]
link = parsed_feed[1]
# Depense
if not link.startswith("http"):
continue
self.feed_logger.info(json.dumps({"category": category, "title": title}))
if self.feed_classifier is not None and self.feed_classifier.predict(
link, category, force=save_pocket
):
self.slackbot.send_message(
text=MsgResource.PREDICT_FEED_TRUE(title=category + ": " + title)
)
continue
attachments = MsgTemplate.make_feed_template(parsed_feed)
self.slackbot.send_message(attachments=attachments)
class FeedClassifier:
def __init__(self):
self.logger = Logger().get_logger()
train_X = FeedData().train_X
train_y = FeedData().train_y
self.category_ids = FeedData().category_ids
self.clf = tree.DecisionTreeClassifier()
self.clf = self.clf.fit(train_X, train_y)
def predict(self, link, category, force=False):
category_id = self.category_ids.get(category.strip(), None)
if category_id is None:
return False
if force is True:
save_result = self.save_to_pocket(category, link)
return save_result
return False
# predict_result = self.clf.predict(category_id)[0]
# if predict_result == FeedDataLoader.TRUE_LABEL:
# self.logger.info("predict result is True, Save feed to Pocket ...")
# save_result = self.save_to_pocket(category, link)
# return save_result
# else:
# return False
def save_to_pocket(self, category, link):
try:
pocket = Pocket()
tags = self.extract_tags(category)
pocket.add(link, tags=tags)
return True
except BaseException as e:
self.logger.exception(e)
return False
def extract_tags(self, tags):
tags = tags.strip()
tags = tags.replace("[", "")
tags = tags.replace("]", "")
tags = tags.split(" - ")
return tags
|
|
from django import forms
from django.template import loader
from django.utils.http import int_to_base36
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
from django.contrib.auth.utils import UNUSABLE_PASSWORD
from django.contrib.auth import authenticate
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.models import get_current_site
class UserCreationForm(forms.ModelForm):
"""
A form that creates a user, with no privileges, from the given username and password.
"""
username = forms.RegexField(label=_("Username"), max_length=30, regex=r'^[\w.@+-]+$',
help_text = _("Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only."),
error_messages = {'invalid': _("This value may contain only letters, numbers and @/./+/-/_ characters.")})
password1 = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
password2 = forms.CharField(label=_("Password confirmation"), widget=forms.PasswordInput,
help_text = _("Enter the same password as above, for verification."))
class Meta:
model = User
fields = ("username",)
def clean_username(self):
username = self.cleaned_data["username"]
try:
User.objects.get(username=username)
except User.DoesNotExist:
return username
raise forms.ValidationError(_("A user with that username already exists."))
def clean_password2(self):
password1 = self.cleaned_data.get("password1", "")
password2 = self.cleaned_data["password2"]
if password1 != password2:
raise forms.ValidationError(_("The two password fields didn't match."))
return password2
def save(self, commit=True):
user = super(UserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class UserChangeForm(forms.ModelForm):
username = forms.RegexField(label=_("Username"), max_length=30, regex=r'^[\w.@+-]+$',
help_text = _("Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only."),
error_messages = {'invalid': _("This value may contain only letters, numbers and @/./+/-/_ characters.")})
class Meta:
model = User
def __init__(self, *args, **kwargs):
super(UserChangeForm, self).__init__(*args, **kwargs)
f = self.fields.get('user_permissions', None)
if f is not None:
f.queryset = f.queryset.select_related('content_type')
class AuthenticationForm(forms.Form):
"""
Base class for authenticating users. Extend this to get a form that accepts
username/password logins.
"""
username = forms.CharField(label=_("Username"), max_length=30)
password = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
def __init__(self, request=None, *args, **kwargs):
"""
If request is passed in, the form will validate that cookies are
enabled. Note that the request (a HttpRequest object) must have set a
cookie with the key TEST_COOKIE_NAME and value TEST_COOKIE_VALUE before
running this validation.
"""
self.request = request
self.user_cache = None
super(AuthenticationForm, self).__init__(*args, **kwargs)
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
if username and password:
self.user_cache = authenticate(username=username, password=password)
if self.user_cache is None:
raise forms.ValidationError(_("Please enter a correct username and password. Note that both fields are case-sensitive."))
elif not self.user_cache.is_active:
raise forms.ValidationError(_("This account is inactive."))
self.check_for_test_cookie()
return self.cleaned_data
def check_for_test_cookie(self):
if self.request and not self.request.session.test_cookie_worked():
raise forms.ValidationError(
_("Your Web browser doesn't appear to have cookies enabled. "
"Cookies are required for logging in."))
def get_user_id(self):
if self.user_cache:
return self.user_cache.id
return None
def get_user(self):
return self.user_cache
class PasswordResetForm(forms.Form):
email = forms.EmailField(label=_("E-mail"), max_length=75)
def clean_email(self):
"""
Validates that an active user exists with the given email address.
"""
email = self.cleaned_data["email"]
self.users_cache = User.objects.filter(
email__iexact=email,
is_active=True)
if not len(self.users_cache):
raise forms.ValidationError(_("That e-mail address doesn't have an associated user account. Are you sure you've registered?"))
if any((user.password == UNUSABLE_PASSWORD) for user in self.users_cache):
raise forms.ValidationError(_("The user account associated with this e-mail address cannot reset the password."))
return email
def save(self, domain_override=None,
subject_template_name='registration/password_reset_subject.txt',
email_template_name='registration/password_reset_email.html',
use_https=False, token_generator=default_token_generator,
from_email=None, request=None):
"""
Generates a one-use only link for resetting password and sends to the user
"""
from django.core.mail import send_mail
for user in self.users_cache:
if not domain_override:
current_site = get_current_site(request)
site_name = current_site.name
domain = current_site.domain
else:
site_name = domain = domain_override
c = {
'email': user.email,
'domain': domain,
'site_name': site_name,
'uid': int_to_base36(user.id),
'user': user,
'token': token_generator.make_token(user),
'protocol': use_https and 'https' or 'http',
}
subject = loader.render_to_string(subject_template_name, c)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
email = loader.render_to_string(email_template_name, c)
send_mail(subject, email, from_email, [user.email])
class SetPasswordForm(forms.Form):
"""
A form that lets a user change set his/her password without
entering the old password
"""
new_password1 = forms.CharField(label=_("New password"), widget=forms.PasswordInput)
new_password2 = forms.CharField(label=_("New password confirmation"), widget=forms.PasswordInput)
def __init__(self, user, *args, **kwargs):
self.user = user
super(SetPasswordForm, self).__init__(*args, **kwargs)
def clean_new_password2(self):
password1 = self.cleaned_data.get('new_password1')
password2 = self.cleaned_data.get('new_password2')
if password1 and password2:
if password1 != password2:
raise forms.ValidationError(_("The two password fields didn't match."))
return password2
def save(self, commit=True):
self.user.set_password(self.cleaned_data['new_password1'])
if commit:
self.user.save()
return self.user
class PasswordChangeForm(SetPasswordForm):
"""
A form that lets a user change his/her password by entering
their old password.
"""
old_password = forms.CharField(label=_("Old password"), widget=forms.PasswordInput)
def clean_old_password(self):
"""
Validates that the old_password field is correct.
"""
old_password = self.cleaned_data["old_password"]
if not self.user.check_password(old_password):
raise forms.ValidationError(_("Your old password was entered incorrectly. Please enter it again."))
return old_password
PasswordChangeForm.base_fields.keyOrder = ['old_password', 'new_password1', 'new_password2']
class AdminPasswordChangeForm(forms.Form):
"""
A form used to change the password of a user in the admin interface.
"""
password1 = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
password2 = forms.CharField(label=_("Password (again)"), widget=forms.PasswordInput)
def __init__(self, user, *args, **kwargs):
self.user = user
super(AdminPasswordChangeForm, self).__init__(*args, **kwargs)
def clean_password2(self):
password1 = self.cleaned_data.get('password1')
password2 = self.cleaned_data.get('password2')
if password1 and password2:
if password1 != password2:
raise forms.ValidationError(_("The two password fields didn't match."))
return password2
def save(self, commit=True):
"""
Saves the new password.
"""
self.user.set_password(self.cleaned_data["password1"])
if commit:
self.user.save()
return self.user
|
|
# GPIO Zero: a library for controlling the Raspberry Pi's GPIO pins
# Copyright (c) 2016-2019 Dave Jones <dave@waveform.org.uk>
# Copyright (c) 2019 Andrew Scheller <github@loowis.durge.org>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import (
unicode_literals,
absolute_import,
print_function,
division,
)
nstr = str
str = type('')
import io
import sys
import pytest
from array import array
from mock import patch
from collections import namedtuple
from gpiozero.pins.native import NativeFactory
from gpiozero.pins.local import (
LocalPiHardwareSPI,
LocalPiSoftwareSPI,
LocalPiHardwareSPIShared,
LocalPiSoftwareSPIShared,
)
from gpiozero.pins.mock import MockSPIDevice
from gpiozero import *
def test_spi_hardware_params(mock_factory):
with patch('os.open'), patch('mmap.mmap') as mmap_mmap, patch('io.open') as io_open:
mmap_mmap.return_value = array(nstr('B'), (0,) * 4096)
io_open.return_value.__enter__.return_value = io.BytesIO(b'\x00\xa2\x10\x42')
factory = NativeFactory()
with patch('gpiozero.pins.local.SpiDev'):
with factory.spi() as device:
assert isinstance(device, LocalPiHardwareSPI)
device.close()
assert device.closed
with factory.spi(port=0, device=0) as device:
assert isinstance(device, LocalPiHardwareSPI)
with factory.spi(port=0, device=1) as device:
assert isinstance(device, LocalPiHardwareSPI)
with factory.spi(clock_pin=11) as device:
assert isinstance(device, LocalPiHardwareSPI)
with factory.spi(clock_pin=11, mosi_pin=10, select_pin=8) as device:
assert isinstance(device, LocalPiHardwareSPI)
with factory.spi(clock_pin=11, mosi_pin=10, select_pin=7) as device:
assert isinstance(device, LocalPiHardwareSPI)
with factory.spi(shared=True) as device:
assert isinstance(device, LocalPiHardwareSPIShared)
with pytest.raises(ValueError):
factory.spi(port=1)
with pytest.raises(ValueError):
factory.spi(device=2)
with pytest.raises(ValueError):
factory.spi(port=0, clock_pin=12)
with pytest.raises(ValueError):
factory.spi(foo='bar')
def test_spi_software_params(mock_factory):
with patch('os.open'), patch('mmap.mmap') as mmap_mmap, patch('io.open') as io_open:
mmap_mmap.return_value = array(nstr('B'), (0,) * 4096)
io_open.return_value.__enter__.return_value = io.BytesIO(b'\x00\xa2\x10\x42')
factory = NativeFactory()
with patch('gpiozero.pins.local.SpiDev'):
with factory.spi(select_pin=6) as device:
assert isinstance(device, LocalPiSoftwareSPI)
device.close()
assert device.closed
with factory.spi(clock_pin=11, mosi_pin=9, miso_pin=10) as device:
assert isinstance(device, LocalPiSoftwareSPI)
device._bus.close()
assert device._bus.closed
device.close()
assert device.closed
with factory.spi(select_pin=6, shared=True) as device:
assert isinstance(device, LocalPiSoftwareSPIShared)
with patch('gpiozero.pins.local.SpiDev', None):
# Clear out the old factory's caches (this is only necessary
# because we're being naughty switching out patches)
factory.pins.clear()
factory._reservations.clear()
# Ensure software fallback works when SpiDev isn't present
with factory.spi() as device:
assert isinstance(device, LocalPiSoftwareSPI)
def test_spi_hardware_conflict(mock_factory):
with patch('gpiozero.pins.local.SpiDev') as spidev:
with LED(11) as led:
with pytest.raises(GPIOPinInUse):
mock_factory.spi(port=0, device=0)
with patch('gpiozero.pins.local.SpiDev') as spidev:
with mock_factory.spi(port=0, device=0) as spi:
with pytest.raises(GPIOPinInUse):
LED(11)
def test_spi_hardware_read(mock_factory):
with patch('gpiozero.pins.local.SpiDev') as spidev:
spidev.return_value.xfer2.side_effect = lambda data: list(range(10))[:len(data)]
with mock_factory.spi() as device:
assert device.read(3) == [0, 1, 2]
assert device.read(6) == list(range(6))
def test_spi_hardware_write(mock_factory):
with patch('gpiozero.pins.local.SpiDev') as spidev:
spidev.return_value.xfer2.side_effect = lambda data: list(range(10))[:len(data)]
with mock_factory.spi() as device:
assert device.write([0, 1, 2]) == 3
assert spidev.return_value.xfer2.called_with([0, 1, 2])
assert device.write(list(range(6))) == 6
assert spidev.return_value.xfer2.called_with(list(range(6)))
def test_spi_hardware_modes(mock_factory):
with patch('gpiozero.pins.local.SpiDev') as spidev:
spidev.return_value.mode = 0
spidev.return_value.lsbfirst = False
spidev.return_value.cshigh = True
spidev.return_value.bits_per_word = 8
with mock_factory.spi() as device:
assert device.clock_mode == 0
assert not device.clock_polarity
assert not device.clock_phase
device.clock_polarity = False
assert device.clock_mode == 0
device.clock_polarity = True
assert device.clock_mode == 2
device.clock_phase = True
assert device.clock_mode == 3
assert not device.lsb_first
assert device.select_high
assert device.bits_per_word == 8
device.select_high = False
device.lsb_first = True
device.bits_per_word = 12
assert not spidev.return_value.cshigh
assert spidev.return_value.lsbfirst
assert spidev.return_value.bits_per_word == 12
def test_spi_software_read(mock_factory):
class SPISlave(MockSPIDevice):
def on_start(self):
super(SPISlave, self).on_start()
for i in range(10):
self.tx_word(i)
with patch('gpiozero.pins.local.SpiDev', None), \
SPISlave(11, 10, 9, 8) as slave, \
mock_factory.spi() as master:
assert master.read(3) == [0, 1, 2]
assert master.read(6) == [0, 1, 2, 3, 4, 5]
slave.clock_phase = True
master.clock_phase = True
assert master.read(3) == [0, 1, 2]
assert master.read(6) == [0, 1, 2, 3, 4, 5]
def test_spi_software_write(mock_factory):
with patch('gpiozero.pins.local.SpiDev', None), \
MockSPIDevice(11, 10, 9, 8) as test_device, \
mock_factory.spi() as master:
master.write([0])
assert test_device.rx_word() == 0
master.write([2, 0])
# 0b 0000_0010 0000_0000
assert test_device.rx_word() == 512
master.write([0, 1, 1])
# 0b 0000_0000 0000_0001 0000_0001
assert test_device.rx_word() == 257
def test_spi_software_write_lsb_first(mock_factory):
with patch('gpiozero.pins.local.SpiDev', None), \
MockSPIDevice(11, 10, 9, 8, lsb_first=True) as test_device, \
mock_factory.spi() as master:
# lsb_first means the bit-strings above get reversed
master.write([0])
assert test_device.rx_word() == 0
master.write([2, 0])
# 0b 0000_0000 0100_0000
assert test_device.rx_word() == 64
master.write([0, 1, 1])
# 0b 1000_0000 1000_0000 0000_0000
assert test_device.rx_word() == 8421376
def test_spi_software_clock_mode(mock_factory):
with patch('gpiozero.pins.local.SpiDev', None), \
mock_factory.spi() as master:
assert master.clock_mode == 0
assert not master.clock_polarity
assert not master.clock_phase
master.clock_polarity = False
assert master.clock_mode == 0
master.clock_polarity = True
assert master.clock_mode == 2
master.clock_phase = True
assert master.clock_mode == 3
master.clock_mode = 0
assert not master.clock_polarity
assert not master.clock_phase
with pytest.raises(ValueError):
master.clock_mode = 5
def test_spi_software_attr(mock_factory):
with patch('gpiozero.pins.local.SpiDev', None), \
mock_factory.spi() as master:
assert not master.lsb_first
assert not master.select_high
assert master.bits_per_word == 8
master.bits_per_word = 12
assert master.bits_per_word == 12
master.lsb_first = True
assert master.lsb_first
master.select_high = True
assert master.select_high
with pytest.raises(ValueError):
master.bits_per_word = 0
# XXX Test two simultaneous SPI devices sharing clock, MOSI, and MISO, with
# separate select pins (including threaded tests which attempt simultaneous
# reading/writing)
|
|
# Copyright (c) 2014 Alex Meade. All rights reserved.
# Copyright (c) 2015 Clinton Knight. All rights reserved.
# Copyright (c) 2015 Tom Barron. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import hashlib
import time
from oslo_log import log
from oslo_utils import strutils
import six
from manila import exception
from manila.i18n import _, _LE, _LW
from manila.share.drivers.netapp.dataontap.client import api as netapp_api
from manila.share.drivers.netapp.dataontap.client import client_base
from manila.share.drivers.netapp import utils as na_utils
LOG = log.getLogger(__name__)
DELETED_PREFIX = 'deleted_manila_'
DEFAULT_IPSPACE = 'Default'
DEFAULT_BROADCAST_DOMAIN = 'OpenStack'
class NetAppCmodeClient(client_base.NetAppBaseClient):
def __init__(self, **kwargs):
super(NetAppCmodeClient, self).__init__(**kwargs)
self.vserver = kwargs.get('vserver')
self.connection.set_vserver(self.vserver)
# Default values to run first api.
self.connection.set_api_version(1, 15)
(major, minor) = self.get_ontapi_version(cached=False)
self.connection.set_api_version(major, minor)
self._init_features()
def _init_features(self):
"""Initialize cDOT feature support map."""
super(NetAppCmodeClient, self)._init_features()
ontapi_version = self.get_ontapi_version(cached=True)
ontapi_1_30 = ontapi_version >= (1, 30)
self.features.add_feature('BROADCAST_DOMAINS', supported=ontapi_1_30)
self.features.add_feature('IPSPACES', supported=ontapi_1_30)
self.features.add_feature('SUBNETS', supported=ontapi_1_30)
def _invoke_vserver_api(self, na_element, vserver):
server = copy.copy(self.connection)
server.set_vserver(vserver)
result = server.invoke_successfully(na_element, True)
return result
def _has_records(self, api_result_element):
if (not api_result_element.get_child_content('num-records') or
api_result_element.get_child_content('num-records') == '0'):
return False
else:
return True
def set_vserver(self, vserver):
self.vserver = vserver
self.connection.set_vserver(vserver)
@na_utils.trace
def create_vserver(self, vserver_name, root_volume_aggregate_name,
root_volume_name, aggregate_names):
"""Creates new vserver and assigns aggregates."""
create_args = {
'vserver-name': vserver_name,
'root-volume-security-style': 'unix',
'root-volume-aggregate': root_volume_aggregate_name,
'root-volume': root_volume_name,
'name-server-switch': {
'nsswitch': 'file',
},
}
self.send_request('vserver-create', create_args)
aggr_list = [{'aggr-name': aggr_name} for aggr_name in aggregate_names]
modify_args = {
'aggr-list': aggr_list,
'vserver-name': vserver_name,
}
self.send_request('vserver-modify', modify_args)
@na_utils.trace
def vserver_exists(self, vserver_name):
"""Checks if Vserver exists."""
LOG.debug('Checking if Vserver %s exists', vserver_name)
api_args = {
'query': {
'vserver-info': {
'vserver-name': vserver_name,
},
},
'desired-attributes': {
'vserver-info': {
'vserver-name': None,
},
},
}
result = self.send_request('vserver-get-iter', api_args)
return self._has_records(result)
@na_utils.trace
def get_vserver_root_volume_name(self, vserver_name):
"""Get the root volume name of the vserver."""
api_args = {
'query': {
'vserver-info': {
'vserver-name': vserver_name,
},
},
'desired-attributes': {
'vserver-info': {
'root-volume': None,
},
},
}
vserver_info = self.send_request('vserver-get-iter', api_args)
try:
root_volume_name = vserver_info.get_child_by_name(
'attributes-list').get_child_by_name(
'vserver-info').get_child_content('root-volume')
except AttributeError:
msg = _('Could not determine root volume name '
'for Vserver %s.') % vserver_name
raise exception.NetAppException(msg)
return root_volume_name
@na_utils.trace
def list_vservers(self, vserver_type='data'):
"""Get the names of vservers present, optionally filtered by type."""
query = {
'vserver-info': {
'vserver-type': vserver_type,
}
} if vserver_type else None
api_args = {
'desired-attributes': {
'vserver-info': {
'vserver-name': None,
},
},
}
if query:
api_args['query'] = query
result = self.send_request('vserver-get-iter', api_args)
vserver_info_list = result.get_child_by_name(
'attributes-list') or netapp_api.NaElement('none')
return [vserver_info.get_child_content('vserver-name')
for vserver_info in vserver_info_list.get_children()]
@na_utils.trace
def get_vserver_volume_count(self, max_records=20):
"""Get the number of volumes present on a cluster or vserver.
Call this on a vserver client to see how many volumes exist
on that vserver.
"""
api_args = {
'max-records': max_records,
'desired-attributes': {
'volume-attributes': {
'volume-id-attributes': {
'name': None,
},
},
},
}
volumes_data = self.send_request('volume-get-iter', api_args)
return int(volumes_data.get_child_content('num-records'))
@na_utils.trace
def delete_vserver(self, vserver_name, vserver_client,
security_services=None):
"""Delete Vserver.
Checks if Vserver exists and does not have active shares.
Offlines and destroys root volumes. Deletes Vserver.
"""
if not self.vserver_exists(vserver_name):
LOG.error(_LE("Vserver %s does not exist."), vserver_name)
return
root_volume_name = self.get_vserver_root_volume_name(vserver_name)
volumes_count = vserver_client.get_vserver_volume_count(max_records=2)
if volumes_count == 1:
try:
vserver_client.offline_volume(root_volume_name)
except netapp_api.NaApiError as e:
if e.code == netapp_api.EVOLUMEOFFLINE:
LOG.error(_LE("Volume %s is already offline."),
root_volume_name)
else:
raise e
vserver_client.delete_volume(root_volume_name)
elif volumes_count > 1:
msg = _("Cannot delete Vserver. Vserver %s has shares.")
raise exception.NetAppException(msg % vserver_name)
if security_services:
self._terminate_vserver_services(vserver_name, vserver_client,
security_services)
self.send_request('vserver-destroy', {'vserver-name': vserver_name})
@na_utils.trace
def _terminate_vserver_services(self, vserver_name, vserver_client,
security_services):
for service in security_services:
if service['type'] == 'active_directory':
api_args = {
'admin-password': service['password'],
'admin-username': service['user'],
}
try:
vserver_client.send_request('cifs-server-delete', api_args)
except netapp_api.NaApiError as e:
if e.code == netapp_api.EOBJECTNOTFOUND:
LOG.error(_LE('CIFS server does not exist for '
'Vserver %s.'), vserver_name)
else:
vserver_client.send_request('cifs-server-delete')
@na_utils.trace
def list_cluster_nodes(self):
"""Get all available cluster nodes."""
api_args = {
'desired-attributes': {
'node-details-info': {
'node': None,
},
},
}
result = self.send_request('system-node-get-iter', api_args)
nodes_info_list = result.get_child_by_name(
'attributes-list') or netapp_api.NaElement('none')
return [node_info.get_child_content('node') for node_info
in nodes_info_list.get_children()]
@na_utils.trace
def list_node_data_ports(self, node):
ports = self.get_node_data_ports(node)
return [port.get('port') for port in ports]
@na_utils.trace
def get_node_data_ports(self, node):
"""Get applicable data ports on the node."""
api_args = {
'query': {
'net-port-info': {
'node': node,
'link-status': 'up',
'port-type': 'physical|if_group',
'role': 'data',
},
},
'desired-attributes': {
'net-port-info': {
'port': None,
'node': None,
'operational-speed': None,
'ifgrp-port': None,
},
},
}
result = self.send_request('net-port-get-iter', api_args)
net_port_info_list = result.get_child_by_name(
'attributes-list') or netapp_api.NaElement('none')
ports = []
for port_info in net_port_info_list.get_children():
# Skip physical ports that are part of interface groups.
if port_info.get_child_content('ifgrp-port'):
continue
port = {
'node': port_info.get_child_content('node'),
'port': port_info.get_child_content('port'),
'speed': port_info.get_child_content('operational-speed'),
}
ports.append(port)
return self._sort_data_ports_by_speed(ports)
@na_utils.trace
def _sort_data_ports_by_speed(self, ports):
def sort_key(port):
value = port.get('speed')
if not (value and isinstance(value, six.string_types)):
return 0
elif value.isdigit():
return int(value)
elif value == 'auto':
return 3
elif value == 'undef':
return 2
else:
return 1
return sorted(ports, key=sort_key, reverse=True)
@na_utils.trace
def list_aggregates(self):
"""Get names of all aggregates."""
try:
api_args = {
'desired-attributes': {
'aggr-attributes': {
'aggregate-name': None,
},
},
}
result = self.send_request('aggr-get-iter', api_args)
aggr_list = result.get_child_by_name(
'attributes-list').get_children()
except AttributeError:
msg = _("Could not list aggregates.")
raise exception.NetAppException(msg)
return [aggr.get_child_content('aggregate-name') for aggr
in aggr_list]
@na_utils.trace
def list_vserver_aggregates(self):
"""Returns a list of aggregates available to a vserver.
This must be called against a Vserver LIF.
"""
return self.get_vserver_aggregate_capacities().keys()
@na_utils.trace
def create_network_interface(self, ip, netmask, vlan, node, port,
vserver_name, allocation_id,
lif_name_template):
"""Creates LIF on VLAN port."""
home_port_name = port
if vlan:
self._create_vlan(node, port, vlan)
home_port_name = '%(port)s-%(tag)s' % {'port': port, 'tag': vlan}
if self.features.BROADCAST_DOMAINS:
self._ensure_broadcast_domain_for_port(node, home_port_name)
interface_name = (lif_name_template %
{'node': node, 'net_allocation_id': allocation_id})
LOG.debug('Creating LIF %(lif)s for Vserver %(vserver)s ',
{'lif': interface_name, 'vserver': vserver_name})
api_args = {
'address': ip,
'administrative-status': 'up',
'data-protocols': [
{'data-protocol': 'nfs'},
{'data-protocol': 'cifs'},
],
'home-node': node,
'home-port': home_port_name,
'netmask': netmask,
'interface-name': interface_name,
'role': 'data',
'vserver': vserver_name,
}
self.send_request('net-interface-create', api_args)
@na_utils.trace
def _create_vlan(self, node, port, vlan):
try:
api_args = {
'vlan-info': {
'parent-interface': port,
'node': node,
'vlanid': vlan,
},
}
self.send_request('net-vlan-create', api_args)
except netapp_api.NaApiError as e:
if e.code == netapp_api.EDUPLICATEENTRY:
LOG.debug('VLAN %(vlan)s already exists on port %(port)s',
{'vlan': vlan, 'port': port})
else:
msg = _('Failed to create VLAN %(vlan)s on '
'port %(port)s. %(err_msg)s')
msg_args = {'vlan': vlan, 'port': port, 'err_msg': e.message}
raise exception.NetAppException(msg % msg_args)
@na_utils.trace
def _ensure_broadcast_domain_for_port(self, node, port,
domain=DEFAULT_BROADCAST_DOMAIN,
ipspace=DEFAULT_IPSPACE):
"""Ensure a port is in a broadcast domain. Create one if necessary."""
if self._get_broadcast_domain_for_port(node, port):
return
if not self._broadcast_domain_exists(domain, ipspace):
self._create_broadcast_domain(domain, ipspace)
self._add_port_to_broadcast_domain(node, port, domain, ipspace)
@na_utils.trace
def _get_broadcast_domain_for_port(self, node, port):
"""Get broadcast domain for a specific port."""
api_args = {
'query': {
'net-port-info': {
'node': node,
'port': port,
},
},
'desired-attributes': {
'net-port-info': {
'broadcast-domain': None,
},
},
}
result = self.send_request('net-port-get-iter', api_args)
net_port_info_list = result.get_child_by_name(
'attributes-list') or netapp_api.NaElement('none')
port_info = net_port_info_list.get_children()
if not port_info:
msg = _('Could not find port %(port)s on node %(node)s.')
msg_args = {'port': port, 'node': node}
raise exception.NetAppException(msg % msg_args)
return port_info[0].get_child_content('broadcast-domain')
@na_utils.trace
def _broadcast_domain_exists(self, domain, ipspace):
"""Check if a broadcast domain exists."""
api_args = {
'query': {
'net-port-broadcast-domain-info': {
'ipspace': ipspace,
'broadcast-domain': domain,
},
},
'desired-attributes': {
'net-port-broadcast-domain-info': None,
},
}
result = self.send_request('net-port-broadcast-domain-get-iter',
api_args)
return self._has_records(result)
@na_utils.trace
def _create_broadcast_domain(self, domain, ipspace, mtu=1500):
"""Create a broadcast domain."""
api_args = {
'ipspace': ipspace,
'broadcast-domain': domain,
'mtu': mtu,
}
self.send_request('net-port-broadcast-domain-create', api_args)
@na_utils.trace
def _add_port_to_broadcast_domain(self, node, port, domain, ipspace):
qualified_port_name = ':'.join([node, port])
try:
api_args = {
'ipspace': ipspace,
'broadcast-domain': domain,
'ports': {
'net-qualified-port-name': qualified_port_name,
}
}
self.send_request('net-port-broadcast-domain-add-ports', api_args)
except netapp_api.NaApiError as e:
if e.code == (netapp_api.
E_VIFMGR_PORT_ALREADY_ASSIGNED_TO_BROADCAST_DOMAIN):
LOG.debug('Port %(port)s already exists in broadcast domain '
'%(domain)s', {'port': port, 'domain': domain})
else:
msg = _('Failed to add port %(port)s to broadcast domain '
'%(domain)s. %(err_msg)s')
msg_args = {
'port': qualified_port_name,
'domain': domain,
'err_msg': e.message,
}
raise exception.NetAppException(msg % msg_args)
@na_utils.trace
def network_interface_exists(self, vserver_name, node, port, ip, netmask,
vlan):
"""Checks if LIF exists."""
home_port_name = (port if not vlan else
'%(port)s-%(tag)s' % {'port': port, 'tag': vlan})
api_args = {
'query': {
'net-interface-info': {
'address': ip,
'home-node': node,
'home-port': home_port_name,
'netmask': netmask,
'vserver': vserver_name,
},
},
'desired-attributes': {
'net-interface-info': {
'interface-name': None,
},
},
}
result = self.send_request('net-interface-get-iter', api_args)
return self._has_records(result)
@na_utils.trace
def list_network_interfaces(self):
"""Get the names of available LIFs."""
api_args = {
'desired-attributes': {
'net-interface-info': {
'interface-name': None,
},
},
}
result = self.send_request('net-interface-get-iter', api_args)
lif_info_list = result.get_child_by_name(
'attributes-list') or netapp_api.NaElement('none')
return [lif_info.get_child_content('interface-name') for lif_info
in lif_info_list.get_children()]
@na_utils.trace
def get_network_interfaces(self, protocols=None):
"""Get available LIFs."""
protocols = na_utils.convert_to_list(protocols)
protocols = [protocol.lower() for protocol in protocols]
api_args = {
'query': {
'net-interface-info': {
'data-protocols': {
'data-protocol': '|'.join(protocols),
}
}
}
} if protocols else None
result = self.send_request('net-interface-get-iter', api_args)
lif_info_list = result.get_child_by_name(
'attributes-list') or netapp_api.NaElement('none')
interfaces = []
for lif_info in lif_info_list.get_children():
lif = {
'address': lif_info.get_child_content('address'),
'home-node': lif_info.get_child_content('home-node'),
'home-port': lif_info.get_child_content('home-port'),
'interface-name': lif_info.get_child_content('interface-name'),
'netmask': lif_info.get_child_content('netmask'),
'role': lif_info.get_child_content('role'),
'vserver': lif_info.get_child_content('vserver'),
}
interfaces.append(lif)
return interfaces
@na_utils.trace
def delete_network_interface(self, interface_name):
"""Deletes LIF."""
api_args = {'vserver': None, 'interface-name': interface_name}
self.send_request('net-interface-delete', api_args)
@na_utils.trace
def get_cluster_aggregate_capacities(self, aggregate_names):
"""Calculates capacity of one or more aggregates.
Returns dictionary of aggregate capacity metrics.
'size-used' is the actual space consumed on the aggregate.
'size-available' is the actual space remaining.
'size-total' is the defined total aggregate size, such that
used + available = total.
"""
if aggregate_names is not None and len(aggregate_names) == 0:
return {}
desired_attributes = {
'aggr-attributes': {
'aggregate-name': None,
'aggr-space-attributes': {
'size-available': None,
'size-total': None,
'size-used': None,
},
},
}
aggrs = self._get_aggregates(aggregate_names=aggregate_names,
desired_attributes=desired_attributes)
aggr_space_dict = dict()
for aggr in aggrs:
aggr_name = aggr.get_child_content('aggregate-name')
aggr_space_attrs = aggr.get_child_by_name('aggr-space-attributes')
aggr_space_dict[aggr_name] = {
'available':
int(aggr_space_attrs.get_child_content('size-available')),
'total':
int(aggr_space_attrs.get_child_content('size-total')),
'used':
int(aggr_space_attrs.get_child_content('size-used')),
}
return aggr_space_dict
@na_utils.trace
def get_vserver_aggregate_capacities(self, aggregate_names=None):
"""Calculates capacity of one or more aggregates for a vserver.
Returns dictionary of aggregate capacity metrics. This must
be called against a Vserver LIF.
"""
if aggregate_names is not None and len(aggregate_names) == 0:
return {}
api_args = {
'desired-attributes': {
'vserver-info': {
'vserver-name': None,
'vserver-aggr-info-list': {
'vserver-aggr-info': {
'aggr-name': None,
'aggr-availsize': None,
},
},
},
},
}
result = self.send_request('vserver-get', api_args)
attributes = result.get_child_by_name('attributes')
if not attributes:
raise exception.NetAppException('Failed to read Vserver info')
vserver_info = attributes.get_child_by_name('vserver-info')
vserver_name = vserver_info.get_child_content('vserver-name')
vserver_aggr_info_element = vserver_info.get_child_by_name(
'vserver-aggr-info-list') or netapp_api.NaElement('none')
vserver_aggr_info_list = vserver_aggr_info_element.get_children()
if not vserver_aggr_info_list:
LOG.warning(_LW('No aggregates assigned to Vserver %s.'),
vserver_name)
# Return dict of key-value pair of aggr_name:aggr_size_available.
aggr_space_dict = {}
for aggr_info in vserver_aggr_info_list:
aggr_name = aggr_info.get_child_content('aggr-name')
if aggregate_names is None or aggr_name in aggregate_names:
aggr_size = int(aggr_info.get_child_content('aggr-availsize'))
aggr_space_dict[aggr_name] = {'available': aggr_size}
LOG.debug('Found available Vserver aggregates: %s', aggr_space_dict)
return aggr_space_dict
@na_utils.trace
def _get_aggregates(self, aggregate_names=None, desired_attributes=None):
query = {
'aggr-attributes': {
'aggregate-name': '|'.join(aggregate_names),
}
} if aggregate_names else None
api_args = {}
if query:
api_args['query'] = query
if desired_attributes:
api_args['desired-attributes'] = desired_attributes
result = self.send_request('aggr-get-iter', api_args)
if not self._has_records(result):
return []
else:
return result.get_child_by_name('attributes-list').get_children()
@na_utils.trace
def setup_security_services(self, security_services, vserver_client,
vserver_name):
api_args = {
'name-mapping-switch': [
{'nmswitch': 'ldap'},
{'nmswitch': 'file'}
],
'name-server-switch': [
{'nsswitch': 'ldap'},
{'nsswitch': 'file'}
],
'vserver-name': vserver_name,
}
self.send_request('vserver-modify', api_args)
for security_service in security_services:
if security_service['type'].lower() == 'ldap':
vserver_client.configure_ldap(security_service)
elif security_service['type'].lower() == 'active_directory':
vserver_client.configure_active_directory(security_service,
vserver_name)
elif security_service['type'].lower() == 'kerberos':
self.create_kerberos_realm(security_service)
vserver_client.configure_kerberos(security_service,
vserver_name)
else:
msg = _('Unsupported security service type %s for '
'Data ONTAP driver')
raise exception.NetAppException(msg % security_service['type'])
@na_utils.trace
def enable_nfs(self):
"""Enables NFS on Vserver."""
self.send_request('nfs-enable')
self.send_request('nfs-service-modify', {'is-nfsv40-enabled': 'true'})
api_args = {
'client-match': '0.0.0.0/0',
'policy-name': 'default',
'ro-rule': {
'security-flavor': 'any',
},
'rw-rule': {
'security-flavor': 'never',
},
}
self.send_request('export-rule-create', api_args)
@na_utils.trace
def configure_ldap(self, security_service):
"""Configures LDAP on Vserver."""
config_name = hashlib.md5(security_service['id']).hexdigest()
api_args = {
'ldap-client-config': config_name,
'servers': {
'ip-address': security_service['server'],
},
'tcp-port': '389',
'schema': 'RFC-2307',
'bind-password': security_service['password'],
}
self.send_request('ldap-client-create', api_args)
api_args = {'client-config': config_name, 'client-enabled': 'true'}
self.send_request('ldap-config-create', api_args)
@na_utils.trace
def configure_active_directory(self, security_service, vserver_name):
"""Configures AD on Vserver."""
self.configure_dns(security_service)
# 'cifs-server' is CIFS Server NetBIOS Name, max length is 15.
# Should be unique within each domain (data['domain']).
cifs_server = (vserver_name[0:7] + '..' + vserver_name[-6:]).upper()
api_args = {
'admin-username': security_service['user'],
'admin-password': security_service['password'],
'force-account-overwrite': 'true',
'cifs-server': cifs_server,
'domain': security_service['domain'],
}
try:
LOG.debug("Trying to setup CIFS server with data: %s", api_args)
self.send_request('cifs-server-create', api_args)
except netapp_api.NaApiError as e:
msg = _("Failed to create CIFS server entry. %s")
raise exception.NetAppException(msg % e.message)
@na_utils.trace
def create_kerberos_realm(self, security_service):
"""Creates Kerberos realm on cluster."""
api_args = {
'admin-server-ip': security_service['server'],
'admin-server-port': '749',
'clock-skew': '5',
'comment': '',
'config-name': security_service['id'],
'kdc-ip': security_service['server'],
'kdc-port': '88',
'kdc-vendor': 'other',
'password-server-ip': security_service['server'],
'password-server-port': '464',
'realm': security_service['domain'].upper(),
}
try:
self.send_request('kerberos-realm-create', api_args)
except netapp_api.NaApiError as e:
if e.code == netapp_api.EDUPLICATEENTRY:
LOG.debug('Kerberos realm config already exists.')
else:
msg = _('Failed to create Kerberos realm. %s')
raise exception.NetAppException(msg % e.message)
@na_utils.trace
def configure_kerberos(self, security_service, vserver_name):
"""Configures Kerberos for NFS on Vserver."""
self.configure_dns(security_service)
spn = self._get_kerberos_service_principal_name(
security_service, vserver_name)
lifs = self.list_network_interfaces()
if not lifs:
msg = _("Cannot set up Kerberos. There are no LIFs configured.")
raise exception.NetAppException(msg)
for lif_name in lifs:
api_args = {
'admin-password': security_service['password'],
'admin-user-name': security_service['user'],
'interface-name': lif_name,
'is-kerberos-enabled': 'true',
'service-principal-name': spn,
}
self.send_request('kerberos-config-modify', api_args)
@na_utils.trace
def _get_kerberos_service_principal_name(self, security_service,
vserver_name):
return 'nfs/' + vserver_name.replace('_', '-') + '.' + \
security_service['domain'] + '@' + \
security_service['domain'].upper()
@na_utils.trace
def configure_dns(self, security_service):
api_args = {
'domains': {
'string': security_service['domain'],
},
'name-servers': {
'ip-address': security_service['dns_ip'],
},
'dns-state': 'enabled',
}
try:
self.send_request('net-dns-create', api_args)
except netapp_api.NaApiError as e:
if e.code == netapp_api.EDUPLICATEENTRY:
LOG.error(_LE("DNS exists for Vserver."))
else:
msg = _("Failed to configure DNS. %s")
raise exception.NetAppException(msg % e.message)
@na_utils.trace
def create_volume(self, aggregate_name, volume_name, size_gb,
thin_provisioned=False, snapshot_policy=None,
language=None, dedup_enabled=False,
compression_enabled=False, max_files=None):
"""Creates a volume."""
api_args = {
'containing-aggr-name': aggregate_name,
'size': six.text_type(size_gb) + 'g',
'volume': volume_name,
'junction-path': '/%s' % volume_name,
}
if thin_provisioned:
api_args['space-reserve'] = 'none'
if snapshot_policy is not None:
api_args['snapshot-policy'] = snapshot_policy
if language is not None:
api_args['language-code'] = language
self.send_request('volume-create', api_args)
# cDOT compression requires that deduplication be enabled.
if dedup_enabled or compression_enabled:
self.enable_dedup(volume_name)
if compression_enabled:
self.enable_compression(volume_name)
if max_files is not None:
self.set_volume_max_files(volume_name, max_files)
@na_utils.trace
def enable_dedup(self, volume_name):
"""Enable deduplication on volume."""
api_args = {'path': '/vol/%s' % volume_name}
self.send_request('sis-enable', api_args)
@na_utils.trace
def enable_compression(self, volume_name):
"""Enable compression on volume."""
api_args = {
'path': '/vol/%s' % volume_name,
'enable-compression': 'true'
}
self.send_request('sis-set-config', api_args)
@na_utils.trace
def set_volume_max_files(self, volume_name, max_files):
"""Set flexvol file limit."""
api_args = {
'query': {
'volume-attributes': {
'volume-id-attributes': {
'name': volume_name,
},
},
},
'attributes': {
'volume-attributes': {
'volume-inode-attributes': {
'files-total': max_files,
},
},
},
}
self.send_request('volume-modify-iter', api_args)
@na_utils.trace
def volume_exists(self, volume_name):
"""Checks if volume exists."""
LOG.debug('Checking if volume %s exists', volume_name)
api_args = {
'query': {
'volume-attributes': {
'volume-id-attributes': {
'name': volume_name,
},
},
},
'desired-attributes': {
'volume-attributes': {
'volume-id-attributes': {
'name': None,
},
},
},
}
result = self.send_request('volume-get-iter', api_args)
return self._has_records(result)
@na_utils.trace
def get_aggregate_for_volume(self, volume_name):
"""Get the name of the aggregate containing a volume."""
api_args = {
'query': {
'volume-attributes': {
'volume-id-attributes': {
'name': volume_name,
},
},
},
'desired-attributes': {
'volume-attributes': {
'volume-id-attributes': {
'containing-aggregate-name': None,
'name': None,
},
},
},
}
result = self.send_request('volume-get-iter', api_args)
attributes_list = result.get_child_by_name(
'attributes-list') or netapp_api.NaElement('none')
volume_attributes = attributes_list.get_child_by_name(
'volume-attributes') or netapp_api.NaElement('none')
volume_id_attributes = volume_attributes.get_child_by_name(
'volume-id-attributes') or netapp_api.NaElement('none')
aggregate = volume_id_attributes.get_child_content(
'containing-aggregate-name')
if not aggregate:
msg = _('Could not find aggregate for volume %s.')
raise exception.NetAppException(msg % volume_name)
return aggregate
@na_utils.trace
def create_volume_clone(self, volume_name, parent_volume_name,
parent_snapshot_name=None):
"""Clones a volume."""
api_args = {
'volume': volume_name,
'parent-volume': parent_volume_name,
'parent-snapshot': parent_snapshot_name,
'junction-path': '/%s' % volume_name,
}
self.send_request('volume-clone-create', api_args)
@na_utils.trace
def split_volume_clone(self, volume_name):
"""Begins splitting a clone from its parent."""
api_args = {'volume': volume_name}
self.send_request('volume-clone-split-start', api_args)
@na_utils.trace
def get_volume_junction_path(self, volume_name, is_style_cifs=False):
"""Gets a volume junction path."""
api_args = {
'volume': volume_name,
'is-style-cifs': six.text_type(is_style_cifs).lower(),
}
result = self.send_request('volume-get-volume-path', api_args)
return result.get_child_content('junction')
@na_utils.trace
def offline_volume(self, volume_name):
"""Offlines a volume."""
try:
self.send_request('volume-offline', {'name': volume_name})
except netapp_api.NaApiError as e:
if e.code == netapp_api.EVOLUMEOFFLINE:
return
raise
@na_utils.trace
def _unmount_volume(self, volume_name, force=False):
"""Unmounts a volume."""
api_args = {
'volume-name': volume_name,
'force': six.text_type(force).lower(),
}
try:
self.send_request('volume-unmount', api_args)
except netapp_api.NaApiError as e:
if e.code == netapp_api.EVOL_NOT_MOUNTED:
return
raise
@na_utils.trace
def unmount_volume(self, volume_name, force=False, wait_seconds=30):
"""Unmounts a volume, retrying if a clone split is ongoing.
NOTE(cknight): While unlikely to happen in normal operation, any client
that tries to delete volumes immediately after creating volume clones
is likely to experience failures if cDOT isn't quite ready for the
delete. The volume unmount is the first operation in the delete
path that fails in this case, and there is no proactive check we can
use to reliably predict the failure. And there isn't a specific error
code from volume-unmount, so we have to check for a generic error code
plus certain language in the error code. It's ugly, but it works, and
it's better than hard-coding a fixed delay.
"""
# Do the unmount, handling split-related errors with retries.
retry_interval = 3 # seconds
for retry in range(wait_seconds / retry_interval):
try:
self._unmount_volume(volume_name, force=force)
LOG.debug('Volume %s unmounted.', volume_name)
return
except netapp_api.NaApiError as e:
if e.code == netapp_api.EAPIERROR and 'job ID' in e.message:
msg = _LW('Could not unmount volume %(volume)s due to '
'ongoing volume operation: %(exception)s')
msg_args = {'volume': volume_name, 'exception': e}
LOG.warning(msg, msg_args)
time.sleep(retry_interval)
continue
raise
msg = _('Failed to unmount volume %(volume)s after '
'waiting for %(wait_seconds)s seconds.')
msg_args = {'volume': volume_name, 'wait_seconds': wait_seconds}
LOG.error(msg, msg_args)
raise exception.NetAppException(msg % msg_args)
@na_utils.trace
def delete_volume(self, volume_name):
"""Deletes a volume."""
self.send_request('volume-destroy', {'name': volume_name})
@na_utils.trace
def create_snapshot(self, volume_name, snapshot_name):
"""Creates a volume snapshot."""
api_args = {'volume': volume_name, 'snapshot': snapshot_name}
self.send_request('snapshot-create', api_args)
@na_utils.trace
def get_snapshot(self, volume_name, snapshot_name):
"""Gets a single snapshot."""
api_args = {
'query': {
'snapshot-info': {
'name': snapshot_name,
'volume': volume_name,
},
},
'desired-attributes': {
'snapshot-info': {
'name': None,
'volume': None,
'busy': None,
'snapshot-owners-list': {
'snapshot-owner': None,
}
},
},
}
result = self.send_request('snapshot-get-iter', api_args)
error_record_list = result.get_child_by_name(
'volume-errors') or netapp_api.NaElement('none')
errors = error_record_list.get_children()
if errors:
error = errors[0]
error_code = error.get_child_content('errno')
error_reason = error.get_child_content('reason')
msg = _('Could not read information for snapshot %(name)s. '
'Code: %(code)s. Reason: %(reason)s')
msg_args = {
'name': snapshot_name,
'code': error_code,
'reason': error_reason
}
if error_code == netapp_api.ESNAPSHOTNOTALLOWED:
raise exception.SnapshotUnavailable(msg % msg_args)
else:
raise exception.NetAppException(msg % msg_args)
attributes_list = result.get_child_by_name(
'attributes-list') or netapp_api.NaElement('none')
snapshot_info_list = attributes_list.get_children()
if not self._has_records(result):
raise exception.SnapshotNotFound(name=snapshot_name)
elif len(snapshot_info_list) > 1:
msg = _('Could not find unique snapshot %(snap)s on '
'volume %(vol)s.')
msg_args = {'snap': snapshot_name, 'vol': volume_name}
raise exception.NetAppException(msg % msg_args)
snapshot_info = snapshot_info_list[0]
snapshot = {
'name': snapshot_info.get_child_content('name'),
'volume': snapshot_info.get_child_content('volume'),
'busy': strutils.bool_from_string(
snapshot_info.get_child_content('busy')),
}
snapshot_owners_list = snapshot_info.get_child_by_name(
'snapshot-owners-list') or netapp_api.NaElement('none')
snapshot_owners = set([
snapshot_owner.get_child_content('owner')
for snapshot_owner in snapshot_owners_list.get_children()])
snapshot['owners'] = snapshot_owners
return snapshot
@na_utils.trace
def delete_snapshot(self, volume_name, snapshot_name):
"""Deletes a volume snapshot."""
api_args = {'volume': volume_name, 'snapshot': snapshot_name}
self.send_request('snapshot-delete', api_args)
@na_utils.trace
def create_cifs_share(self, share_name):
share_path = '/%s' % share_name
api_args = {'path': share_path, 'share-name': share_name}
self.send_request('cifs-share-create', api_args)
@na_utils.trace
def add_cifs_share_access(self, share_name, user_name):
api_args = {
'permission': 'full_control',
'share': share_name,
'user-or-group': user_name,
}
self.send_request('cifs-share-access-control-create', api_args)
@na_utils.trace
def remove_cifs_share_access(self, share_name, user_name):
api_args = {'user-or-group': user_name, 'share': share_name}
self.send_request('cifs-share-access-control-delete', api_args)
@na_utils.trace
def remove_cifs_share(self, share_name):
self.send_request('cifs-share-delete', {'share-name': share_name})
@na_utils.trace
def add_nfs_export_rule(self, policy_name, rule, readonly):
rule_indices = self._get_nfs_export_rule_indices(policy_name, rule)
if not rule_indices:
self._add_nfs_export_rule(policy_name, rule, readonly)
else:
# Update first rule and delete the rest
self._update_nfs_export_rule(
policy_name, rule, readonly, rule_indices.pop(0))
self._remove_nfs_export_rules(policy_name, rule_indices)
@na_utils.trace
def _add_nfs_export_rule(self, policy_name, rule, readonly):
api_args = {
'policy-name': policy_name,
'client-match': rule,
'ro-rule': {
'security-flavor': 'sys',
},
'rw-rule': {
'security-flavor': 'sys' if not readonly else 'never',
},
'super-user-security': {
'security-flavor': 'sys',
},
}
self.send_request('export-rule-create', api_args)
@na_utils.trace
def _update_nfs_export_rule(self, policy_name, rule, readonly, rule_index):
api_args = {
'policy-name': policy_name,
'rule-index': rule_index,
'client-match': rule,
'ro-rule': {
'security-flavor': 'sys'
},
'rw-rule': {
'security-flavor': 'sys' if not readonly else 'never'
},
'super-user-security': {
'security-flavor': 'sys'
},
}
self.send_request('export-rule-modify', api_args)
@na_utils.trace
def _get_nfs_export_rule_indices(self, policy_name, rule):
api_args = {
'query': {
'export-rule-info': {
'policy-name': policy_name,
'client-match': rule,
},
},
'desired-attributes': {
'export-rule-info': {
'vserver-name': None,
'policy-name': None,
'client-match': None,
'rule-index': None,
},
},
}
result = self.send_request('export-rule-get-iter', api_args)
attributes_list = result.get_child_by_name(
'attributes-list') or netapp_api.NaElement('none')
export_rule_info_list = attributes_list.get_children()
rule_indices = [int(export_rule_info.get_child_content('rule-index'))
for export_rule_info in export_rule_info_list]
rule_indices.sort()
return [six.text_type(rule_index) for rule_index in rule_indices]
@na_utils.trace
def remove_nfs_export_rule(self, policy_name, rule):
rule_indices = self._get_nfs_export_rule_indices(policy_name, rule)
self._remove_nfs_export_rules(policy_name, rule_indices)
@na_utils.trace
def _remove_nfs_export_rules(self, policy_name, rule_indices):
for rule_index in rule_indices:
api_args = {
'policy-name': policy_name,
'rule-index': rule_index
}
try:
self.send_request('export-rule-destroy', api_args)
except netapp_api.NaApiError as e:
if e.code != netapp_api.EOBJECTNOTFOUND:
raise
@na_utils.trace
def clear_nfs_export_policy_for_volume(self, volume_name):
self.set_nfs_export_policy_for_volume(volume_name, 'default')
@na_utils.trace
def set_nfs_export_policy_for_volume(self, volume_name, policy_name):
api_args = {
'query': {
'volume-attributes': {
'volume-id-attributes': {
'name': volume_name,
},
},
},
'attributes': {
'volume-attributes': {
'volume-export-attributes': {
'policy': policy_name,
},
},
},
}
self.send_request('volume-modify-iter', api_args)
@na_utils.trace
def get_nfs_export_policy_for_volume(self, volume_name):
"""Get the name of the export policy for a volume."""
api_args = {
'query': {
'volume-attributes': {
'volume-id-attributes': {
'name': volume_name,
},
},
},
'desired-attributes': {
'volume-attributes': {
'volume-export-attributes': {
'policy': None,
},
},
},
}
result = self.send_request('volume-get-iter', api_args)
attributes_list = result.get_child_by_name(
'attributes-list') or netapp_api.NaElement('none')
volume_attributes = attributes_list.get_child_by_name(
'volume-attributes') or netapp_api.NaElement('none')
volume_export_attributes = volume_attributes.get_child_by_name(
'volume-export-attributes') or netapp_api.NaElement('none')
export_policy = volume_export_attributes.get_child_content('policy')
if not export_policy:
msg = _('Could not find export policy for volume %s.')
raise exception.NetAppException(msg % volume_name)
return export_policy
@na_utils.trace
def create_nfs_export_policy(self, policy_name):
api_args = {'policy-name': policy_name}
try:
self.send_request('export-policy-create', api_args)
except netapp_api.NaApiError as e:
if e.code != netapp_api.EDUPLICATEENTRY:
raise
@na_utils.trace
def soft_delete_nfs_export_policy(self, policy_name):
try:
self.delete_nfs_export_policy(policy_name)
except netapp_api.NaApiError:
# NOTE(cknight): Policy deletion can fail if called too soon after
# removing from a flexvol. So rename for later harvesting.
self.rename_nfs_export_policy(policy_name,
DELETED_PREFIX + policy_name)
@na_utils.trace
def delete_nfs_export_policy(self, policy_name):
api_args = {'policy-name': policy_name}
try:
self.send_request('export-policy-destroy', api_args)
except netapp_api.NaApiError as e:
if e.code == netapp_api.EOBJECTNOTFOUND:
return
raise
@na_utils.trace
def rename_nfs_export_policy(self, policy_name, new_policy_name):
api_args = {
'policy-name': policy_name,
'new-policy-name': new_policy_name
}
self.send_request('export-policy-rename', api_args)
@na_utils.trace
def prune_deleted_nfs_export_policies(self):
deleted_policy_map = self._get_deleted_nfs_export_policies()
for vserver in deleted_policy_map:
client = copy.deepcopy(self)
client.set_vserver(vserver)
for policy in deleted_policy_map[vserver]:
try:
client.delete_nfs_export_policy(policy)
except netapp_api.NaApiError:
LOG.debug('Could not delete export policy %s.' % policy)
@na_utils.trace
def _get_deleted_nfs_export_policies(self):
api_args = {
'query': {
'export-policy-info': {
'policy-name': DELETED_PREFIX + '*',
},
},
'desired-attributes': {
'export-policy-info': {
'policy-name': None,
'vserver': None,
},
},
}
result = self.send_request('export-policy-get-iter', api_args)
attributes_list = result.get_child_by_name(
'attributes-list') or netapp_api.NaElement('none')
policy_map = {}
for export_info in attributes_list.get_children():
vserver = export_info.get_child_content('vserver')
policies = policy_map.get(vserver, [])
policies.append(export_info.get_child_content('policy-name'))
policy_map[vserver] = policies
return policy_map
@na_utils.trace
def _get_ems_log_destination_vserver(self):
"""Returns the best vserver destination for EMS messages."""
major, minor = self.get_ontapi_version(cached=True)
if (major > 1) or (major == 1 and minor > 15):
# Prefer admin Vserver (requires cluster credentials).
admin_vservers = self.list_vservers(vserver_type='admin')
if admin_vservers:
return admin_vservers[0]
# Fall back to data Vserver.
data_vservers = self.list_vservers(vserver_type='data')
if data_vservers:
return data_vservers[0]
# If older API version, or no other Vservers found, use node Vserver.
node_vservers = self.list_vservers(vserver_type='node')
if node_vservers:
return node_vservers[0]
raise exception.NotFound("No Vserver found to receive EMS messages.")
@na_utils.trace
def send_ems_log_message(self, message_dict):
"""Sends a message to the Data ONTAP EMS log."""
node_client = copy.deepcopy(self)
node_client.connection.set_timeout(25)
try:
node_client.set_vserver(self._get_ems_log_destination_vserver())
node_client.send_request('ems-autosupport-log', message_dict)
LOG.debug('EMS executed successfully.')
except netapp_api.NaApiError as e:
LOG.warning(_LW('Failed to invoke EMS. %s') % e)
@na_utils.trace
def get_aggregate_raid_types(self, aggregate_names):
"""Get the RAID type of one or more aggregates."""
desired_attributes = {
'aggr-attributes': {
'aggregate-name': None,
'aggr-raid-attributes': {
'raid-type': None,
},
},
}
aggr_list = self._get_aggregates(aggregate_names=aggregate_names,
desired_attributes=desired_attributes)
aggr_raid_dict = {}
for aggr in aggr_list:
aggr_name = aggr.get_child_content('aggregate-name')
aggr_raid_attrs = aggr.get_child_by_name('aggr-raid-attributes')
aggr_raid_dict[aggr_name] = aggr_raid_attrs.get_child_content(
'raid-type')
return aggr_raid_dict
@na_utils.trace
def get_aggregate_disk_types(self, aggregate_names):
"""Get the disk type of one or more aggregates."""
aggr_disk_type_dict = {}
for aggregate_name in aggregate_names:
# Only get 1 disk, since apart from hybrid aggregates all disks
# must be the same type.
api_args = {
'max-records': 1,
'query': {
'storage-disk-info': {
'disk-raid-info': {
'disk-aggregate-info': {
'aggregate-name': aggregate_name,
},
},
},
},
'desired-attributes': {
'storage-disk-info': {
'disk-raid-info': {
'effective-disk-type': None,
},
},
},
}
result = self.send_request('storage-disk-get-iter', api_args)
attributes_list = result.get_child_by_name(
'attributes-list') or netapp_api.NaElement('none')
storage_disk_info_list = attributes_list.get_children()
if len(storage_disk_info_list) >= 1:
storage_disk_info = storage_disk_info_list[0]
disk_raid_info = storage_disk_info.get_child_by_name(
'disk-raid-info')
if disk_raid_info:
disk_type = disk_raid_info.get_child_content(
'effective-disk-type')
if disk_type:
aggr_disk_type_dict[aggregate_name] = disk_type
return aggr_disk_type_dict
@na_utils.trace
def check_for_cluster_credentials(self):
try:
self.list_cluster_nodes()
# API succeeded, so definitely a cluster management LIF
return True
except netapp_api.NaApiError as e:
if e.code == netapp_api.EAPINOTFOUND:
LOG.debug('Not connected to cluster management LIF.')
return False
else:
raise e
|
|
# Copyright 2016-2018 Brian Warner
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
# Git repo maintenance
#
# This script is responsible for cloning new repos and keeping existing repos up
# to date. It can be run as often as you want (and will detect when it's
# already running, so as not to spawn parallel processes), but once or twice per
# day should be more than sufficient. Each time it runs, it updates the repo
# and checks for any parents of HEAD that aren't already accounted for in the
# repos. It also rebuilds analysis data, checks any changed affiliations and
# aliases, and caches data for display.
import sys
import platform
import imp
import time
import datetime
import html.parser
import subprocess
import os
import getopt
import xlsxwriter
import configparser
import psycopg2
import json
import logging
from workers.util import read_config
class Config:
def __init__(self, logger):
self.repos_processed = 0
self.upstream_db = 7
self.cursor = None
self.cursor_people = None
self.logger = logger
self.db = None
self.db_people = None
worker_options = read_config("Workers", "facade_worker", None, None)
if 'repo_directory' in worker_options:
self.repo_base_directory = worker_options['repo_directory']
else:
self.log_activity('Error',"Please specify a \'repo_directory\' parameter"
" in your \'Workers\' -> \'facade_worker\' object in your config "
"to the directory in which you want to clone repos. Exiting...")
sys.exit(1)
self.tool_source = '\'Facade Worker\''
self.tool_version = '\'1.0.1\''
self.data_source = '\'Git Log\''
# Figure out how much we're going to log
logging.basicConfig(filename='worker_{}.log'.format(worker_options['port']), filemode='w', level=logging.INFO)
self.log_level = None #self.get_setting('log_level')
#### Database update functions ####
def increment_db(self, version):
# Helper function to increment the database number
increment_db = ("INSERT INTO settings (setting,value) "
"VALUES ('database_version',%s)")
self.cursor.execute(increment_db, (version, ))
db.commit()
print("Database updated to version: %s" % version)
def update_db(self, version):
# This function should incrementally step any version of the database up to
# the current schema. After executing the database operations, call
# increment_db to bring it up to the version with which it is now compliant.
print("Legacy Facade Block for DB UPDATE. No longer used. ")
print("No further database updates.\n")
def migrate_database_config(self):
# Since we're changing the way we store database credentials, we need a way to
# transparently migrate anybody who was using the old file. Someday after a long
# while this can disappear.
try:
# If the old database config was found, write a new config
imp.find_module('db')
db_config = configparser.ConfigParser()
from db import db_user,db_pass,db_name,db_host
from db import db_user_people,db_pass_people,db_name_people,db_host_people
db_config.add_section('main_database')
db_config.set('main_database','user',db_user)
db_config.set('main_database','pass',db_pass)
db_config.set('main_database','name',db_name)
db_config.set('main_database','host',db_host)
db_config.add_section('people_database')
db_config.set('people_database','user',db_user_people)
db_config.set('people_database','pass',db_pass_people)
db_config.set('people_database','name',db_name_people)
db_config.set('people_database','host',db_host_people)
with open('db.cfg','w') as db_file:
db_config.write(db_file)
print("Migrated old style config file to new.")
except:
# If nothing is found, the user probably hasn't run setup yet.
sys.exit("Can't find database config. Have you run setup.py?")
try:
os.remove('db.py')
os.remove('db.pyc')
print("Removed unneeded config files")
except:
print("Attempted to remove unneeded config files")
return db_user,db_pass,db_name,db_host,db_user_people,db_pass_people,db_name_people,db_host_people
#### Global helper functions ####
def database_connection(self, db_host,db_user,db_pass,db_name, db_port, people, multi_threaded_connection):
# Return a database connection based upon which interpreter we're using. CPython
# can use any database connection, although MySQLdb is preferred over pymysql
# for performance reasons. However, PyPy can't use MySQLdb at this point,
# instead requiring a pure python MySQL client. This function returns a database
# connection that should provide maximum performance depending upon the
# interpreter in use.
##TODO: Postgres connections as we make them ARE threadsafe. We *could* refactor this accordingly: https://www.psycopg.org/docs/connection.html #noturgent
# if platform.python_implementation() == 'PyPy':
db_schema = 'augur_data'
db = psycopg2.connect(
host = db_host,
user = db_user,
password = db_pass,
database = db_name,
# charset = 'utf8mb4',
port = db_port,
options=f'-c search_path={db_schema}',
connect_timeout = 31536000,)
cursor = db.cursor()#pymysql.cursors.DictCursor)
## TODO: Does this need a block for if the database connection IS multithreaded? I think so, @gabe-heim
if people and not multi_threaded_connection:
self.cursor_people = cursor
self.db_people = db
elif not multi_threaded_connection:
self.cursor = cursor
self.db = db
# Figure out how much we're going to log
self.log_level = self.get_setting('log_level')
#Not getting debug logging for some reason.
self.log_level = 'Debug'
return db, cursor
def get_setting(self, setting):
# Get a setting from the database
query = ("""SELECT value FROM settings WHERE setting=%s ORDER BY
last_modified DESC LIMIT 1""")
self.cursor.execute(query, (setting, ))
# print(type(self.cursor.fetchone()))
return self.cursor.fetchone()[0]#["value"]
def update_status(self, status):
# Update the status displayed in the UI
query = ("UPDATE settings SET value=%s WHERE setting='utility_status'")
self.cursor.execute(query, (status, ))
self.db.commit()
def log_activity(self, level, status):
# Log an activity based upon urgency and user's preference. If the log level is
# "Debug", then just print it and don't save it in the database.
log_options = ('Error','Quiet','Info','Verbose','Debug')
self.logger.info("* %s\n" % status)
if self.log_level == 'Debug' and level == 'Debug':
return
#if log_options.index(level) <= log_options.index(self.log_level):
query = ("INSERT INTO utility_log (level,status) VALUES (%s,%s)")
try:
self.cursor.execute(query, (level, status))
self.db.commit()
except Exception as e:
self.logger.info('Error encountered: {}\n'.format(e))
# Set up the database
db_user = read_config('Database', 'user', 'AUGUR_DB_USER', 'augur')
db_pass = read_config('Database', 'password', 'AUGUR_DB_PASSWORD', 'augur')
db_name = read_config('Database', 'name', 'AUGUR_DB_NAME', 'augur')
db_host = read_config('Database', 'host', 'AUGUR_DB_HOST', 'localhost')
db_port = read_config('Database', 'port', 'AUGUR_DB_PORT', 5432)
db_user_people = db_user
db_pass_people = db_pass
db_name_people = db_name
db_host_people = db_host
db_port_people = db_port
# Open a general-purpose connection
db,cursor = self.database_connection(
db_host,
db_user,
db_pass,
db_name,
db_port, False, False)
self.cursor.execute(query, (level, status))
self.db.commit()
def inc_repos_processed(self):
self.repos_processed += 1
|
|
import logging
import re
import sys
import html5lib
from html5lib.sanitizer import HTMLSanitizer
from html5lib.serializer.htmlserializer import HTMLSerializer
from . import callbacks as linkify_callbacks
from .encoding import force_unicode
from .sanitizer import BleachSanitizer
VERSION = (1, 2, 'dev')
__version__ = '1.2-dev'
__all__ = ['clean', 'linkify']
log = logging.getLogger('bleach')
ALLOWED_TAGS = [
'a',
'abbr',
'acronym',
'b',
'blockquote',
'code',
'em',
'i',
'li',
'ol',
'strong',
'ul',
]
ALLOWED_ATTRIBUTES = {
'a': ['href', 'title'],
'abbr': ['title'],
'acronym': ['title'],
}
ALLOWED_STYLES = []
TLDS = """ac ad ae aero af ag ai al am an ao aq ar arpa as asia at au aw ax az
ba bb bd be bf bg bh bi biz bj bm bn bo br bs bt bv bw by bz ca cat
cc cd cf cg ch ci ck cl cm cn co com coop cr cu cv cx cy cz de dj dk
dm do dz ec edu ee eg er es et eu fi fj fk fm fo fr ga gb gd ge gf gg
gh gi gl gm gn gov gp gq gr gs gt gu gw gy hk hm hn hr ht hu id ie il
im in info int io iq ir is it je jm jo jobs jp ke kg kh ki km kn kp
kr kw ky kz la lb lc li lk lr ls lt lu lv ly ma mc md me mg mh mil mk
ml mm mn mo mobi mp mq mr ms mt mu museum mv mw mx my mz na name nc ne
net nf ng ni nl no np nr nu nz om org pa pe pf pg ph pk pl pm pn pr pro
ps pt pw py qa re ro rs ru rw sa sb sc sd se sg sh si sj sk sl sm sn so
sr st su sv sy sz tc td tel tf tg th tj tk tl tm tn to tp tr travel tt
tv tw tz ua ug uk us uy uz va vc ve vg vi vn vu wf ws xn ye yt yu za zm
zw""".split()
PROTOCOLS = HTMLSanitizer.acceptable_protocols
TLDS.reverse()
url_re = re.compile(
r"""\(* # Match any opening parentheses.
\b(?<![@.])(?:(?:%s):/{0,3}(?:(?:\w+:)?\w+@)?)? # http://
([\w-]+\.)+(?:%s)(?:\:\d+)?(?!\.\w)\b # xx.yy.tld(:##)?
(?:[/?][^\s\{\}\|\\\^\[\]`<>"]*)?
# /path/zz (excluding "unsafe" chars from RFC 1738,
# except for # and ~, which happen in practice)
""" % (u'|'.join(PROTOCOLS), u'|'.join(TLDS)),
re.IGNORECASE | re.VERBOSE | re.UNICODE)
proto_re = re.compile(r'^[\w-]+:/{0,3}', re.IGNORECASE)
punct_re = re.compile(r'([\.,]+)$')
email_re = re.compile(
r"""(?<!//)
(([-!#$%&'*+/=?^_`{}|~0-9A-Z]+
(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)* # dot-atom
|^"([\001-\010\013\014\016-\037!#-\[\]-\177]
|\\[\001-011\013\014\016-\177])*" # quoted-string
)@(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+[A-Z]{2,6})\.? # domain
""",
re.IGNORECASE | re.MULTILINE | re.VERBOSE)
NODE_TEXT = 4 # The numeric ID of a text node in simpletree.
DEFAULT_CALLBACKS = [linkify_callbacks.nofollow]
PY_26 = (sys.version_info < (2, 7))
RECURSION_EXCEPTION = RuntimeError if not PY_26 else AttributeError
def clean(text, tags=ALLOWED_TAGS, attributes=ALLOWED_ATTRIBUTES,
styles=ALLOWED_STYLES, strip=False, strip_comments=True):
"""Clean an HTML fragment and return it"""
if not text:
return u''
text = force_unicode(text)
class s(BleachSanitizer):
allowed_elements = tags
allowed_attributes = attributes
allowed_css_properties = styles
strip_disallowed_elements = strip
strip_html_comments = strip_comments
parser = html5lib.HTMLParser(tokenizer=s)
return _render(parser.parseFragment(text))
def linkify(text, callbacks=DEFAULT_CALLBACKS, skip_pre=False,
parse_email=False, tokenizer=HTMLSanitizer):
"""Convert URL-like strings in an HTML fragment to links.
linkify() converts strings that look like URLs or domain names in a
blob of text that may be an HTML fragment to links, while preserving
(a) links already in the string, (b) urls found in attributes, and
(c) email addresses.
"""
text = force_unicode(text)
if not text:
return u''
parser = html5lib.HTMLParser(tokenizer=tokenizer)
forest = parser.parseFragment(text)
def replace_nodes(tree, new_frag, node):
new_tree = parser.parseFragment(new_frag)
for n in new_tree.childNodes:
# Prevent us from re-parsing links new links as existing links.
if n.name == 'a':
n._seen = True
tree.insertBefore(n, node)
tree.removeChild(node)
# Return the number of new nodes.
return len(new_tree.childNodes) - 1
def strip_wrapping_parentheses(fragment):
"""Strips wrapping parentheses.
Returns a tuple of the following format::
(string stripped from wrapping parentheses,
count of stripped opening parentheses,
count of stripped closing parentheses)
"""
opening_parentheses = closing_parentheses = 0
# Count consecutive opening parentheses
# at the beginning of the fragment (string).
for char in fragment:
if char == '(':
opening_parentheses += 1
else:
break
if opening_parentheses:
newer_frag = ''
# Cut the consecutive opening brackets from the fragment.
fragment = fragment[opening_parentheses:]
# Reverse the fragment for easier detection of parentheses
# inside the URL.
reverse_fragment = fragment[::-1]
skip = False
for char in reverse_fragment:
# Remove the closing parentheses if it has a matching
# opening parentheses (they are balanced).
if (char == ')' and
closing_parentheses < opening_parentheses and
not skip):
closing_parentheses += 1
continue
# Do not remove ')' from the URL itself.
elif char != ')':
skip = True
newer_frag += char
fragment = newer_frag[::-1]
return fragment, opening_parentheses, closing_parentheses
def apply_callbacks(attrs, new):
for cb in callbacks:
attrs = cb(attrs, new)
if attrs is None:
return None
return attrs
def linkify_nodes(tree, parse_text=True):
# I know this isn't Pythonic, but we're sometimes mutating
# tree.childNodes, which ends up breaking the loop and causing us to
# reparse code.
children = len(tree.childNodes)
current = 0 # A pointer to the "current" node.
while current < children:
node = tree.childNodes[current]
if node.type == NODE_TEXT and parse_text:
new_frag = _render(node)
# Look for email addresses?
if parse_email:
new_frag = re.sub(email_re, email_repl, new_frag)
if new_frag != _render(node):
adj = replace_nodes(tree, new_frag, node)
children += adj
current += adj
linkify_nodes(tree)
continue
new_frag = re.sub(url_re, link_repl, new_frag)
if new_frag != _render(node):
adj = replace_nodes(tree, new_frag, node)
children += adj
current += adj
elif node.name == 'a' and not getattr(node, '_seen', False):
if 'href' in node.attributes:
attrs = node.attributes
_text = attrs['_text'] = ''.join(c.toxml() for
c in node.childNodes)
attrs = apply_callbacks(attrs, False)
if attrs is not None:
text = force_unicode(attrs.pop('_text'))
node.attributes = attrs
for n in reversed(node.childNodes):
node.removeChild(n)
text = parser.parseFragment(text)
for n in text.childNodes:
node.appendChild(n)
node._seen = True
else:
replace_nodes(tree, _text, node)
elif skip_pre and node.name == 'pre':
linkify_nodes(node, False)
elif not getattr(node, '_seen', False):
linkify_nodes(node)
current += 1
def email_repl(match):
addr = match.group(0).replace('"', '"')
link = {
'_text': addr,
'href': 'mailto:%s' % addr,
}
link = apply_callbacks(link, True)
if link is None:
return addr
_href = link.pop('href')
_text = link.pop('_text')
repl = '<a href="%s" %s>%s</a>'
attribs = ' '.join('%s="%s"' % (k, v) for k, v in link.items())
return repl % (_href, attribs, _text)
def link_repl(match):
url = match.group(0)
open_brackets = close_brackets = 0
if url.startswith('('):
url, open_brackets, close_brackets = (
strip_wrapping_parentheses(url)
)
end = u''
m = re.search(punct_re, url)
if m:
end = m.group(0)
url = url[0:m.start()]
if re.search(proto_re, url):
href = url
else:
href = u''.join([u'http://', url])
link = {
'_text': url,
'href': href,
}
link = apply_callbacks(link, True)
if link is None:
return url
_text = link.pop('_text')
_href = link.pop('href')
repl = u'%s<a href="%s" %s>%s</a>%s%s'
attribs = ' '.join('%s="%s"' % (k, v) for k, v in link.items())
return repl % ('(' * open_brackets,
_href, attribs, _text, end,
')' * close_brackets)
try:
linkify_nodes(forest)
except (RECURSION_EXCEPTION) as e:
# If we hit the max recursion depth, just return what we've got.
log.exception('Probable recursion error: %r' % e)
return _render(forest)
def _render(tree):
"""Try rendering as HTML, then XML, then give up."""
try:
return force_unicode(_serialize(tree))
except AssertionError: # The treewalker throws this sometimes.
return force_unicode(tree.toxml())
def _serialize(domtree):
walker = html5lib.treewalkers.getTreeWalker('simpletree')
stream = walker(domtree)
serializer = HTMLSerializer(quote_attr_values=True,
omit_optional_tags=False)
return serializer.render(stream)
|
|
# -*- coding: utf-8 -*-
"""
Test script for reading of tsp data.
"""
import tsp_io as io
import euclidean as e
import objective as o
import init_solutions as init
from objective import SimpleTSPObjective
from bruteforce import BruteForceSolver, RandomSearch
from local_search import OrdinaryDecent, SteepestDecent
from random_restarts import MultiRunner, IteratedLocalSearch, HigherQualityHomeBase
from local_search_2opt import OrdinaryDecent2Opt, SteepestDecent2Opt, LocalSearchArgs, OrdinaryDecent2OptNew
from construction import NearestNeighbour, FurthestInsertion
from evolutionary import (EvolutionaryAlgorithm, MewLambdaEvolutionStrategy,
MewPlusLambdaEvolutionStrategy,
GeneticAlgorithmStrategy,
ElitistGeneticAlgorithmStrategy,
TwoOptMutator, TwoCityMutator,
TruncationSelector, TournamentSelector, PartiallyMappedCrossover)
import numpy as np
import random
def mark_optimal(optimal_cost, cost):
if(cost == optimal_cost):
return '*'
else:
return ''
def print_output(solver):
print("\nbest solutions:\t{0}".format(len(solver.best_solutions)))
print("best cost:\t{0}".format(solver.best_cost))
print("best solutions:")
[print(s) for s in solver.best_solutions]
def print_multi_run(solver):
print("\nbest solutions:\t{0}".format(len(solver.best_solutions)))
print("best cost:\t{0}".format(solver.best_cost))
print("best solutions:")
[print(s[0]) for s in solver.best_solutions]
#seed = 999
#np.random.seed(seed)
#random.seed(seed)
file_path = "Data/st70.tsp"
file_out = "Data/matrix.csv"
md_rows = 6
cities = io.read_coordinates(file_path, md_rows)
meta = io.read_meta_data(file_path, md_rows)
print(cities)
print(meta)
#example of calculating a single euclidean distance
dist = e.euclidean_distance(cities[0], cities[1])
print(dist)
#generate matrix
matrix = e.gen_matrix(cities)
#output city matrix - to validate and use for manual calcs etc.
np.savetxt(file_out, matrix, delimiter=",")
#you can specify the start/end city index using
#the optional parameter start_index. Default index = 0
tour = o.symmetric_tour_list(len(cities), 2) # city at index 2 is start/end
# tour = o.symmetric_tour_list(len(cities)) # for city 0
print(tour)
#randomise the cities apart from start/end
tour = init.random_tour(tour)
print("\n", tour)
cost = o.tour_cost(tour, matrix)
print(cost)
#Brute force example for small TSP problems
#need somethign to produce "short tour from large".
size_trim = 70 #note bruteforce is slow beyond 10
base_city = tour[0]
tour = tour[0:size_trim] #select a subset of the big problem.
tour.append(base_city)
results = []
print("\n\n**Short tour\n{0}".format(tour))
print("initial cost: {0}".format(o.tour_cost(tour, matrix)))
solver = BruteForceSolver(tour, matrix)
print("Enumerating...")
#for size_trim 10 = 2.2s per loop
#solver.solve()
print("\n** BRUTEFORCE OUTPUT ***")
print_output(solver)
cost1 = solver.best_cost
solver = RandomSearch(tour, matrix, max_iter=10000)
print("Searching...")
#for size_trim 10 = 2.2s per loop
solver.solve()
print("\n** RANDOMSEARCH OUTPUT ***")
print_output(solver)
cost1a = solver.best_cost
#Local Search - Single Run of Ordinary Decent
solver = OrdinaryDecent(tour, matrix)
print("\nRunning Local Search using Ordinary Decent...")
#for trim_size = 10 = average 220ms
solver.solve()
print("\n** ORDINARY DECENT OUTPUT ***")
print_output(solver)
cost2 = solver.best_cost
#Local Search - Single Run of Steepest Decent
solver = SteepestDecent(tour, matrix)
print("\nRunning Local Search using Steepest Decent...")
#for trim_size = 10 = average 222ms
solver.solve()
print("\n** STEEPEST DECENT OUTPUT ***")
print_output(solver)
cost3 = solver.best_cost
#Local Search - multiple runs of Ordinary Decent
runner = MultiRunner(OrdinaryDecent(tour, matrix))
n = 10
print("\nRunning Local Search using Ordinary Decent (Best of {} runs)..."\
.format(n))
runner.run(n)
print("\n** MULTIPLE RUNS OF ORDINARY DECENT OUTPUT ***")
print_multi_run(solver)
cost4, solutions = runner.get_best_solutions()
#Local Search - multiple runs of Steepest Decent
runner = MultiRunner(SteepestDecent(tour, matrix))
print("\nRunning Local Search using Steepest Decent (Best of {} runs)..."\
.format(n))
runner.run(n)
print("\n** MULTIPLE RUNS OF STEEPEST DECENT OUTPUT ***")
print_multi_run(solver)
cost5, solutions = runner.get_best_solutions()
#Construction Heuristic - Nearest Neighbour
solver = NearestNeighbour(tour, matrix)
print("\nRunning Nearest Neighbour alg...")
solver.solve()
print("\n** NEAREST NEIGHBOUR OUTPUT ***")
print("\nbest solutions:\t{0}".format(1))
print("best cost:\t{0}".format(solver.best_cost))
cost6 = solver.best_cost
print("best solutions:")
print(solver.best_solution)
bs = solver.best_solution
#Local Search - Single Run of Ordinary Decent
solver = OrdinaryDecent(bs, matrix)
print("\nRunning Local Search using Ordinary Decent...NN")
#for trim_size = 10 = average 220ms
solver.solve()
print("\n** ORDINARY DECENT OUTPUT ***")
print_output(solver)
cost7 = solver.best_cost
#Local Search - Single Run of Ordinary Decent with 2 Opt Swap
args = LocalSearchArgs()
args.init_solution = tour
args.matrix = matrix
solver = OrdinaryDecent2Opt(args)
print("\nRunning Local Search using Ordinary Decent 2-Opt...")
solver.solve()
print("\n** ORDINARY DECENT 2 Opt OUTPUT ***")
print_output(solver)
cost8 = solver.best_cost
#Local Search - Single Run of Steepest Decent with 2 Opt Swap
args = LocalSearchArgs()
args.init_solution = tour
args.matrix = matrix
solver = SteepestDecent2Opt(args)
print("\nRunning Local Search using Steepest Decent 2-Opt...")
solver.solve()
print("\n** ORDINARY DECENT 2 Opt OUTPUT ***")
print_output(solver)
cost9 = solver.best_cost
#Local Search - multiple runs of Ordinary Decent 2Opt
args = LocalSearchArgs()
args.init_solution = tour
args.matrix = matrix
runner = MultiRunner(OrdinaryDecent2Opt(args))
n = 100
print("\nRunning Local Search using Ordinary Decent 2-Opt (Best of {} runs)..."\
.format(n))
runner.run(n)
print("\n** MULTIPLE RUNS OF ORDINARY DECENT OUTPUT ***")
print_multi_run(solver)
cost9a, solutions = runner.get_best_solutions()
#Construction Heuristic - Furthest insertion
solver = FurthestInsertion(tour, matrix)
print("\nRunning Furthest Insertion alg...")
solver.solve()
print("\n** FURTHEST INSERTION OUTPUT ***")
print("best cost:\t{0}".format(solver.best_cost))
cost10 = solver.best_cost
print("best solutions:")
print(solver.best_solution)
#Evolutionary Algorithm - (mew, lambda) strategy
mew = 10
_lambda = 200
strategy = MewLambdaEvolutionStrategy(mew, _lambda, TwoCityMutator())
solver = EvolutionaryAlgorithm(tour, matrix,_lambda, strategy,
maximisation=False, generations=500)
print("\nRunning (mew, lambda) evolutionary alg...")
solver.solve()
print("\n** (MEW, LAMBDA) OUTPUT ***")
print("best cost:\t{0}".format(solver.best_fitness))
cost11 = solver.best_fitness
print("best solutions:")
print(solver.best_solution)
#Evolutionary Algorithm - (mew+lambda) strategy
mew = 10
_lambda = 200
strategy = MewPlusLambdaEvolutionStrategy(mew, _lambda, TwoCityMutator())
solver = EvolutionaryAlgorithm(tour, matrix,_lambda, strategy,
maximisation=False, generations=500)
print("\nRunning (mew + lambda) evolutionary alg...")
solver.solve()
print("\n** (MEW+LAMBDA) OUTPUT ***")
print("best cost:\t{0}".format(solver.best_fitness))
cost12 = solver.best_fitness
print("best solutions:")
print(solver.best_solution)
#Evolutionary Algorithm - (mew+lambda) strategy, TwoOpt Mutation
mew = 10
_lambda = 200
strategy = MewPlusLambdaEvolutionStrategy(mew, _lambda, TwoOptMutator())
solver = EvolutionaryAlgorithm(tour, matrix,_lambda, strategy,
maximisation=False, generations=500)
print("\nRunning (mew + lambda) evolutionary alg with 2-Opt...")
solver.solve()
print("\n** (MEW+LAMBDA) OUTPUT ***")
print("best cost:\t{0}".format(solver.best_fitness))
cost13 = solver.best_fitness
print("best solutions:")
print(solver.best_solution)
#Evolutionary Algorithm - Genetic Algorithm strategy
_lambda = 200
strategy = GeneticAlgorithmStrategy(_lambda,
selector=TournamentSelector(),
xoperator=PartiallyMappedCrossover(),
mutator=TwoCityMutator())
solver = EvolutionaryAlgorithm(tour, matrix,_lambda, strategy,
maximisation=False, generations=500)
print("\nRunning Genetic Algorithm")
solver.solve()
print("\n** GA OUTPUT ***")
print("best cost:\t{0}".format(solver.best_fitness))
cost14 = solver.best_fitness
print("best solutions:")
print(solver.best_solution)
#Evolutionary Algorithm - Elitist Genetic Algorithm strategy
mew = 10
_lambda = 200
strategy = ElitistGeneticAlgorithmStrategy(mew, _lambda,
selector=TournamentSelector(),
xoperator=PartiallyMappedCrossover(),
mutator=TwoCityMutator())
solver = EvolutionaryAlgorithm(tour, matrix,_lambda, strategy,
maximisation=False, generations=500)
print("\nRunning Elitist Genetic Algorithm")
solver.solve()
print("\n** GA OUTPUT ***")
print("best cost:\t{0}".format(solver.best_fitness))
cost15 = solver.best_fitness
print("best solutions:")
print(solver.best_solution)
#Evolutionary Algorithm - Elitist Genetic Algorithm strategy - 2Opt
mew = 10
_lambda = 200
strategy = ElitistGeneticAlgorithmStrategy(mew, _lambda,
selector=TournamentSelector(),
xoperator=PartiallyMappedCrossover(),
mutator=TwoOptMutator())
solver = EvolutionaryAlgorithm(tour, matrix,_lambda, strategy,
maximisation=False, generations=500)
print("\nRunning Elitist Genetic Algorithm - 2Opt")
solver.solve()
print("\n** GA OUTPUT ***")
print("best cost:\t{0}".format(solver.best_fitness))
cost16 = solver.best_fitness
print("best solutions:")
print(solver.best_solution)
#Iterated Local Search - multiple runs of Ordinary Decent 2Opt
objective = SimpleTSPObjective(matrix)
local_search = OrdinaryDecent2OptNew(objective, tour)
runner = IteratedLocalSearch(objective, local_search, maximisation=False)
n = 20
print("\nRunning Iterated Local Search using Ordinary Decent 2-Opt ({} runs)..."\
.format(n))
runner.run(n)
print("\n** ILS OUTPUT ***")
print("best cost:\t{0}".format(runner._best_cost))
print("best solutions:")
print(runner._solutions)
cost17, solutions = runner.get_best_solutions()
#Iterated Local Search - multiple runs of Ordinary Decent 2Opt.
#accept new home base when better only
objective = SimpleTSPObjective(matrix)
local_search = OrdinaryDecent2OptNew(objective, tour)
runner = IteratedLocalSearch(objective, local_search, accept=HigherQualityHomeBase(),
maximisation=False)
n = 20
print("\nRunning ILS, HQHombase, OD 2-Opt ({} runs)..."\
.format(n))
runner.run(n)
print("\n** ILS OUTPUT ***")
print("best cost:\t{0}".format(runner._best_cost))
print("best solutions:")
print(runner._solutions)
cost18, solutions = runner.get_best_solutions()
#Summary of methods
print("\n** COST SUMMARY ***")
print("\nBrute Force:\t\t\t{0}".format(cost1))
print("Random Search:\t\t\t{0}\t{1}".format(cost1a, mark_optimal(cost1, cost1a)))
print("Ordinary Decent:\t\t{0}\t{1}".format(cost2, mark_optimal(cost1, cost2)))
print("Steepest Decent:\t\t{0}\t{1}".format(cost3, mark_optimal(cost1, cost3)))
print("Ordinary Decent ({0} runs):\t{1}\t{2}".format(n, cost4, mark_optimal(cost1, cost4)))
print("Steepest Decent ({0} runs):\t{1}\t{2}".format(n, cost5, mark_optimal(cost1, cost5)))
print("Nearest Neighbour:\t\t{0}\t{1}".format(cost6, mark_optimal(cost1, cost6)))
print("Ordinary Decent NN init:\t{0}\t{1}".format(cost7, mark_optimal(cost1, cost7)))
print("Ordinary Decent 2-Opt\t\t{0}\t{1}".format(cost8, mark_optimal(cost1, cost8)))
print("Steepest Decent 2-Opt\t\t{0}\t{1}".format(cost9,mark_optimal(cost1, cost9)))
print("Ord Decent 2-Opt ({0} runs):\t{1}\t{2}".format(n, cost9a, mark_optimal(cost1, cost9a)))
print("Furthest Insertion:\t\t{0}\t{1}".format(cost10, mark_optimal(cost1, cost10)))
print("EA: (Mew, Lambda) \t\t{0}\t{1}".format(cost11, mark_optimal(cost1, cost11)))
print("EA: (Mew+Lambda) \t\t{0}\t{1}".format(cost12, mark_optimal(cost1, cost12)))
print("EA: (Mew+Lambda)+2Opt \t\t{0}\t{1}".format(cost13, mark_optimal(cost1, cost13)))
print("Genetic Algorithm \t\t{0}\t{1}".format(cost14, mark_optimal(cost1, cost14)))
print("Elitist GA \t\t\t{0}\t{1}".format(cost15, mark_optimal(cost1, cost15)))
print("Elitist GA+2Opt \t\t{0}\t{1}".format(cost16, mark_optimal(cost1, cost16)))
print("ILS. Homebase=Rand Walk\t\t{0}\t{1}".format(cost17, mark_optimal(cost1, cost17)))
print("ILS. Homebase=best local optimum \t\t{0}\t{1}".format(cost18, mark_optimal(cost1, cost17)))
print("\n*Optimal")
|
|
import operator
from functools import reduce
from django.core.exceptions import SuspiciousOperation, ImproperlyConfigured
from django.core.paginator import InvalidPage
from django.db import models
from django.db.models.fields import FieldDoesNotExist
from django.utils.datastructures import SortedDict
from django.utils.encoding import force_unicode, smart_str
from django.utils.translation import ugettext, ugettext_lazy
from django.utils.http import urlencode
from django.contrib.admin import FieldListFilter
from django.contrib.admin.options import IncorrectLookupParameters
from django.contrib.admin.util import (quote, get_fields_from_path,
lookup_needs_distinct, prepare_lookup_value)
# Changelist settings
ALL_VAR = 'all'
ORDER_VAR = 'o'
ORDER_TYPE_VAR = 'ot'
PAGE_VAR = 'p'
SEARCH_VAR = 'q'
TO_FIELD_VAR = 't'
IS_POPUP_VAR = 'pop'
ERROR_FLAG = 'e'
IGNORED_PARAMS = (
ALL_VAR, ORDER_VAR, ORDER_TYPE_VAR, SEARCH_VAR, IS_POPUP_VAR, TO_FIELD_VAR)
# Text to display within change-list table cells if the value is blank.
EMPTY_CHANGELIST_VALUE = ugettext_lazy('(None)')
class ChangeList(object):
def __init__(self, request, model, list_display, list_display_links,
list_filter, date_hierarchy, search_fields, list_select_related,
list_per_page, list_max_show_all, list_editable, model_admin):
self.model = model
self.opts = model._meta
self.lookup_opts = self.opts
self.root_query_set = model_admin.queryset(request)
self.list_display = list_display
self.list_display_links = list_display_links
self.list_filter = list_filter
self.date_hierarchy = date_hierarchy
self.search_fields = search_fields
self.list_select_related = list_select_related
self.list_per_page = list_per_page
self.list_max_show_all = list_max_show_all
self.model_admin = model_admin
# Get search parameters from the query string.
try:
self.page_num = int(request.GET.get(PAGE_VAR, 0))
except ValueError:
self.page_num = 0
self.show_all = ALL_VAR in request.GET
self.is_popup = IS_POPUP_VAR in request.GET
self.to_field = request.GET.get(TO_FIELD_VAR)
self.params = dict(request.GET.items())
if PAGE_VAR in self.params:
del self.params[PAGE_VAR]
if ERROR_FLAG in self.params:
del self.params[ERROR_FLAG]
if self.is_popup:
self.list_editable = ()
else:
self.list_editable = list_editable
self.query = request.GET.get(SEARCH_VAR, '')
self.query_set = self.get_query_set(request)
self.get_results(request)
if self.is_popup:
title = ugettext('Select %s')
else:
title = ugettext('Select %s to change')
self.title = title % force_unicode(self.opts.verbose_name)
self.pk_attname = self.lookup_opts.pk.attname
def get_filters(self, request):
lookup_params = self.params.copy() # a dictionary of the query string
use_distinct = False
# Remove all the parameters that are globally and systematically
# ignored.
for ignored in IGNORED_PARAMS:
if ignored in lookup_params:
del lookup_params[ignored]
# Normalize the types of keys
for key, value in lookup_params.items():
if not isinstance(key, str):
# 'key' will be used as a keyword argument later, so Python
# requires it to be a string.
del lookup_params[key]
lookup_params[smart_str(key)] = value
if not self.model_admin.lookup_allowed(key, value):
raise SuspiciousOperation("Filtering by %s not allowed" % key)
filter_specs = []
if self.list_filter:
for list_filter in self.list_filter:
if callable(list_filter):
# This is simply a custom list filter class.
spec = list_filter(request, lookup_params,
self.model, self.model_admin)
else:
field_path = None
if isinstance(list_filter, (tuple, list)):
# This is a custom FieldListFilter class for a given field.
field, field_list_filter_class = list_filter
else:
# This is simply a field name, so use the default
# FieldListFilter class that has been registered for
# the type of the given field.
field, field_list_filter_class = list_filter, FieldListFilter.create
if not isinstance(field, models.Field):
field_path = field
field = get_fields_from_path(self.model, field_path)[-1]
spec = field_list_filter_class(field, request, lookup_params,
self.model, self.model_admin, field_path=field_path)
# Check if we need to use distinct()
use_distinct = (use_distinct or
lookup_needs_distinct(self.lookup_opts,
field_path))
if spec and spec.has_output():
filter_specs.append(spec)
# At this point, all the parameters used by the various ListFilters
# have been removed from lookup_params, which now only contains other
# parameters passed via the query string. We now loop through the
# remaining parameters both to ensure that all the parameters are valid
# fields and to determine if at least one of them needs distinct(). If
# the lookup parameters aren't real fields, then bail out.
try:
for key, value in lookup_params.items():
lookup_params[key] = prepare_lookup_value(key, value)
use_distinct = (use_distinct or
lookup_needs_distinct(self.lookup_opts, key))
return filter_specs, bool(filter_specs), lookup_params, use_distinct
except FieldDoesNotExist as e:
raise IncorrectLookupParameters(e)
def get_query_string(self, new_params=None, remove=None):
if new_params is None: new_params = {}
if remove is None: remove = []
p = self.params.copy()
for r in remove:
for k in p.keys():
if k.startswith(r):
del p[k]
for k, v in new_params.items():
if v is None:
if k in p:
del p[k]
else:
p[k] = v
return '?%s' % urlencode(p)
def get_results(self, request):
paginator = self.model_admin.get_paginator(request, self.query_set, self.list_per_page)
# Get the number of objects, with admin filters applied.
result_count = paginator.count
# Get the total number of objects, with no admin filters applied.
# Perform a slight optimization: Check to see whether any filters were
# given. If not, use paginator.hits to calculate the number of objects,
# because we've already done paginator.hits and the value is cached.
if not self.query_set.query.where:
full_result_count = result_count
else:
full_result_count = self.root_query_set.count()
can_show_all = result_count <= self.list_max_show_all
multi_page = result_count > self.list_per_page
# Get the list of objects to display on this page.
if (self.show_all and can_show_all) or not multi_page:
result_list = self.query_set._clone()
else:
try:
result_list = paginator.page(self.page_num+1).object_list
except InvalidPage:
raise IncorrectLookupParameters
self.result_count = result_count
self.full_result_count = full_result_count
self.result_list = result_list
self.can_show_all = can_show_all
self.multi_page = multi_page
self.paginator = paginator
def _get_default_ordering(self):
ordering = []
if self.model_admin.ordering:
ordering = self.model_admin.ordering
elif self.lookup_opts.ordering:
ordering = self.lookup_opts.ordering
return ordering
def get_ordering_field(self, field_name):
"""
Returns the proper model field name corresponding to the given
field_name to use for ordering. field_name may either be the name of a
proper model field or the name of a method (on the admin or model) or a
callable with the 'admin_order_field' attribute. Returns None if no
proper model field name can be matched.
"""
try:
field = self.lookup_opts.get_field(field_name)
return field.name
except models.FieldDoesNotExist:
# See whether field_name is a name of a non-field
# that allows sorting.
if callable(field_name):
attr = field_name
elif hasattr(self.model_admin, field_name):
attr = getattr(self.model_admin, field_name)
else:
attr = getattr(self.model, field_name)
return getattr(attr, 'admin_order_field', None)
def get_ordering(self, request, queryset):
"""
Returns the list of ordering fields for the change list.
First we check the get_ordering() method in model admin, then we check
the object's default ordering. Then, any manually-specified ordering
from the query string overrides anything. Finally, a deterministic
order is guaranteed by ensuring the primary key is used as the last
ordering field.
"""
params = self.params
ordering = list(self.model_admin.get_ordering(request)
or self._get_default_ordering())
if ORDER_VAR in params:
# Clear ordering and used params
ordering = []
order_params = params[ORDER_VAR].split('.')
for p in order_params:
try:
none, pfx, idx = p.rpartition('-')
field_name = self.list_display[int(idx)]
order_field = self.get_ordering_field(field_name)
if not order_field:
continue # No 'admin_order_field', skip it
ordering.append(pfx + order_field)
except (IndexError, ValueError):
continue # Invalid ordering specified, skip it.
# Add the given query's ordering fields, if any.
ordering.extend(queryset.query.order_by)
# Ensure that the primary key is systematically present in the list of
# ordering fields so we can guarantee a deterministic order across all
# database backends.
pk_name = self.lookup_opts.pk.name
if not (set(ordering) & set(['pk', '-pk', pk_name, '-' + pk_name])):
# The two sets do not intersect, meaning the pk isn't present. So
# we add it.
ordering.append('-pk')
return ordering
def get_ordering_field_columns(self):
"""
Returns a SortedDict of ordering field column numbers and asc/desc
"""
# We must cope with more than one column having the same underlying sort
# field, so we base things on column numbers.
ordering = self._get_default_ordering()
ordering_fields = SortedDict()
if ORDER_VAR not in self.params:
# for ordering specified on ModelAdmin or model Meta, we don't know
# the right column numbers absolutely, because there might be more
# than one column associated with that ordering, so we guess.
for field in ordering:
if field.startswith('-'):
field = field[1:]
order_type = 'desc'
else:
order_type = 'asc'
for index, attr in enumerate(self.list_display):
if self.get_ordering_field(attr) == field:
ordering_fields[index] = order_type
break
else:
for p in self.params[ORDER_VAR].split('.'):
none, pfx, idx = p.rpartition('-')
try:
idx = int(idx)
except ValueError:
continue # skip it
ordering_fields[idx] = 'desc' if pfx == '-' else 'asc'
return ordering_fields
def get_query_set(self, request):
# First, we collect all the declared list filters.
(self.filter_specs, self.has_filters, remaining_lookup_params,
use_distinct) = self.get_filters(request)
# Then, we let every list filter modify the queryset to its liking.
qs = self.root_query_set
for filter_spec in self.filter_specs:
new_qs = filter_spec.queryset(request, qs)
if new_qs is not None:
qs = new_qs
try:
# Finally, we apply the remaining lookup parameters from the query
# string (i.e. those that haven't already been processed by the
# filters).
qs = qs.filter(**remaining_lookup_params)
except (SuspiciousOperation, ImproperlyConfigured):
# Allow certain types of errors to be re-raised as-is so that the
# caller can treat them in a special way.
raise
except Exception as e:
# Every other error is caught with a naked except, because we don't
# have any other way of validating lookup parameters. They might be
# invalid if the keyword arguments are incorrect, or if the values
# are not in the correct type, so we might get FieldError,
# ValueError, ValidationError, or ?.
raise IncorrectLookupParameters(e)
# Use select_related() if one of the list_display options is a field
# with a relationship and the provided queryset doesn't already have
# select_related defined.
if not qs.query.select_related:
if self.list_select_related:
qs = qs.select_related()
else:
for field_name in self.list_display:
try:
field = self.lookup_opts.get_field(field_name)
except models.FieldDoesNotExist:
pass
else:
if isinstance(field.rel, models.ManyToOneRel):
qs = qs.select_related()
break
# Set ordering.
ordering = self.get_ordering(request, qs)
qs = qs.order_by(*ordering)
# Apply keyword searches.
def construct_search(field_name):
if field_name.startswith('^'):
return "%s__istartswith" % field_name[1:]
elif field_name.startswith('='):
return "%s__iexact" % field_name[1:]
elif field_name.startswith('@'):
return "%s__search" % field_name[1:]
else:
return "%s__icontains" % field_name
if self.search_fields and self.query:
orm_lookups = [construct_search(str(search_field))
for search_field in self.search_fields]
for bit in self.query.split():
or_queries = [models.Q(**{orm_lookup: bit})
for orm_lookup in orm_lookups]
qs = qs.filter(reduce(operator.or_, or_queries))
if not use_distinct:
for search_spec in orm_lookups:
if lookup_needs_distinct(self.lookup_opts, search_spec):
use_distinct = True
break
if use_distinct:
return qs.distinct()
else:
return qs
def url_for_result(self, result):
return "%s/" % quote(getattr(result, self.pk_attname))
|
|
# Copyright 2010 OpenStack Foundation
# Copyright 2013 NTT corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of an image service that uses Glance as the backend"""
import copy
import itertools
import random
import shutil
import sys
import textwrap
import time
import typing
from typing import Any, Dict, Tuple # noqa: H301
import urllib
import urllib.parse
import glanceclient.exc
from keystoneauth1.loading import session as ks_session
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from cinder import context
from cinder import exception
from cinder.i18n import _
from cinder import service_auth
image_opts = [
cfg.ListOpt('allowed_direct_url_schemes',
default=[],
help='A list of url schemes that can be downloaded directly '
'via the direct_url. Currently supported schemes: '
'[file, cinder].'),
cfg.StrOpt('verify_glance_signatures',
choices=['disabled', 'enabled'],
default='enabled',
help=textwrap.dedent(
"""
Enable image signature verification.
Cinder uses the image signature metadata from Glance and
verifies the signature of a signed image while downloading
that image. There are two options here.
1. ``enabled``: verify when image has signature metadata.
2. ``disabled``: verification is turned off.
If the image signature cannot be verified or if the image
signature metadata is incomplete when required, then Cinder
will not create the volume and update it into an error
state. This provides end users with stronger assurances
of the integrity of the image data they are using to
create volumes.
""")),
cfg.StrOpt('glance_catalog_info',
default='image:glance:publicURL',
help='Info to match when looking for glance in the service '
'catalog. Format is: separated values of the form: '
'<service_type>:<service_name>:<endpoint_type> - '
'Only used if glance_api_servers are not provided.'),
]
glance_core_properties_opts = [
cfg.ListOpt('glance_core_properties',
default=['checksum', 'container_format',
'disk_format', 'image_name', 'image_id',
'min_disk', 'min_ram', 'name', 'size'],
help='Default core properties of image')
]
CONF = cfg.CONF
CONF.register_opts(image_opts)
CONF.register_opts(glance_core_properties_opts)
_SESSION = None
LOG = logging.getLogger(__name__)
def _parse_image_ref(image_href):
"""Parse an image href into composite parts.
:param image_href: href of an image
:returns: a tuple of the form (image_id, netloc, use_ssl)
:raises ValueError:
"""
url = urllib.parse.urlparse(image_href)
netloc = url.netloc
image_id = url.path.split('/')[-1]
use_ssl = (url.scheme == 'https')
return (image_id, netloc, use_ssl)
def _create_glance_client(context, netloc, use_ssl):
"""Instantiate a new glanceclient.Client object."""
params = {'global_request_id': context.global_id}
if use_ssl and CONF.auth_strategy == 'noauth':
params = {'insecure': CONF.glance_api_insecure,
'cacert': CONF.glance_ca_certificates_file,
'timeout': CONF.glance_request_timeout,
'split_loggers': CONF.split_loggers
}
if CONF.auth_strategy == 'keystone':
global _SESSION
if not _SESSION:
config_options = {'insecure': CONF.glance_api_insecure,
'cacert': CONF.glance_ca_certificates_file,
'timeout': CONF.glance_request_timeout,
'cert': CONF.glance_certfile,
'key': CONF.glance_keyfile,
'split_loggers': CONF.split_loggers
}
_SESSION = ks_session.Session().load_from_options(**config_options)
auth = service_auth.get_auth_plugin(context)
params['auth'] = auth
params['session'] = _SESSION
scheme = 'https' if use_ssl else 'http'
endpoint = '%s://%s' % (scheme, netloc)
return glanceclient.Client('2', endpoint, **params)
def get_api_servers(context):
"""Return Iterable over shuffled api servers.
Shuffle a list of glance_api_servers and return an iterator
that will cycle through the list, looping around to the beginning
if necessary. If CONF.glance_api_servers is None then they will
be retrieved from the catalog.
"""
api_servers = []
api_servers_info = []
if CONF.glance_api_servers is None:
info = CONF.glance_catalog_info
try:
service_type, service_name, endpoint_type = info.split(':')
except ValueError:
raise exception.InvalidConfigurationValue(_(
"Failed to parse the configuration option "
"'glance_catalog_info', must be in the form "
"<service_type>:<service_name>:<endpoint_type>"))
for entry in context.service_catalog:
if entry.get('type') == service_type:
api_servers.append(
entry.get('endpoints')[0].get(endpoint_type))
else:
for api_server in CONF.glance_api_servers:
api_servers.append(api_server)
for api_server in api_servers:
if '//' not in api_server:
api_server = 'http://' + api_server
url = urllib.parse.urlparse(api_server)
netloc = url.netloc + url.path
use_ssl = (url.scheme == 'https')
api_servers_info.append((netloc, use_ssl))
random.shuffle(api_servers_info)
return itertools.cycle(api_servers_info)
class GlanceClientWrapper(object):
"""Glance client wrapper class that implements retries."""
def __init__(self, context=None, netloc=None, use_ssl=False):
if netloc is not None:
self.client = self._create_static_client(context,
netloc,
use_ssl)
else:
self.client = None
self.api_servers = None
def _create_static_client(self, context, netloc, use_ssl):
"""Create a client that we'll use for every call."""
self.netloc = netloc
self.use_ssl = use_ssl
return _create_glance_client(context,
self.netloc,
self.use_ssl)
def _create_onetime_client(self, context):
"""Create a client that will be used for one call."""
if self.api_servers is None:
self.api_servers = get_api_servers(context)
self.netloc, self.use_ssl = next(self.api_servers)
return _create_glance_client(context,
self.netloc,
self.use_ssl)
def call(self, context, method, *args, **kwargs):
"""Call a glance client method.
If we get a connection error,
retry the request according to CONF.glance_num_retries.
"""
retry_excs = (glanceclient.exc.ServiceUnavailable,
glanceclient.exc.InvalidEndpoint,
glanceclient.exc.CommunicationError)
num_attempts = 1 + CONF.glance_num_retries
glance_controller = kwargs.pop('controller', 'images')
store_id = kwargs.pop('store_id', None)
base_image_ref = kwargs.pop('base_image_ref', None)
for attempt in range(1, num_attempts + 1):
client = self.client or self._create_onetime_client(context)
keys = ('x-image-meta-store', 'x-openstack-base-image-ref',)
values = (store_id, base_image_ref,)
headers = {k: v for (k, v) in zip(keys, values) if v is not None}
if headers:
client.http_client.additional_headers = headers
try:
controller = getattr(client, glance_controller)
return getattr(controller, method)(*args, **kwargs)
except retry_excs as e:
netloc = self.netloc
extra = "retrying"
error_msg = _("Error contacting glance server "
"'%(netloc)s' for '%(method)s', "
"%(extra)s.")
if attempt == num_attempts:
extra = 'done trying'
LOG.exception(error_msg, {'netloc': netloc,
'method': method,
'extra': extra})
raise exception.GlanceConnectionFailed(reason=e)
LOG.exception(error_msg, {'netloc': netloc,
'method': method,
'extra': extra})
time.sleep(1)
except glanceclient.exc.HTTPOverLimit as e:
raise exception.ImageLimitExceeded(e)
class GlanceImageService(object):
"""Provides storage and retrieval of disk image objects within Glance."""
def __init__(self, client=None):
self._client = client or GlanceClientWrapper()
self._image_schema = None
self.temp_images = None
def detail(self, context, **kwargs):
"""Calls out to Glance for a list of detailed image information."""
params = self._extract_query_params(kwargs)
try:
images = self._client.call(context, 'list', **params)
except Exception:
_reraise_translated_exception()
_images = []
for image in images:
if self._is_image_available(context, image):
_images.append(self._translate_from_glance(context, image))
return _images
def _extract_query_params(self, params):
_params = {}
accepted_params = ('filters', 'marker', 'limit',
'sort_key', 'sort_dir')
for param in accepted_params:
if param in params:
_params[param] = params.get(param)
return _params
def list_members(self, context, image_id):
"""Returns a list of dicts with image member data."""
try:
return self._client.call(context,
'list',
controller='image_members',
image_id=image_id)
except Exception:
_reraise_translated_image_exception(image_id)
def get_stores(self, context):
"""Returns a list of dicts with stores information."""
try:
return self._client.call(context,
'get_stores_info')
except Exception:
_reraise_translated_exception()
def show(self,
context: context.RequestContext,
image_id: str) -> Dict[str, Any]:
"""Returns a dict with image data for the given opaque image id."""
try:
image = self._client.call(context, 'get', image_id)
except Exception:
_reraise_translated_image_exception(image_id)
if not self._is_image_available(context, image):
raise exception.ImageNotFound(image_id=image_id)
base_image_meta = self._translate_from_glance(context, image)
return base_image_meta
def get_location(self, context, image_id):
"""Get backend storage location url.
Returns a tuple containing the direct url and locations representing
the backend storage location, or (None, None) if these attributes are
not shown by Glance.
"""
try:
# direct_url is returned by v2 api
client = GlanceClientWrapper()
image_meta = client.call(context, 'get', image_id)
except Exception:
_reraise_translated_image_exception(image_id)
if not self._is_image_available(context, image_meta):
raise exception.ImageNotFound(image_id=image_id)
# some glance stores like nfs only meta data
# is stored and returned as locations.
# so composite of two needs to be returned.
return (getattr(image_meta, 'direct_url', None),
getattr(image_meta, 'locations', None))
def add_location(self, context, image_id, url, metadata):
"""Add a backend location url to an image.
Returns a dict containing image metadata on success.
"""
client = GlanceClientWrapper()
try:
return client.call(context, 'add_location',
image_id, url, metadata)
except Exception:
_reraise_translated_image_exception(image_id)
@typing.no_type_check
def download(self, context, image_id, data=None):
"""Calls out to Glance for data and writes data."""
if data and 'file' in CONF.allowed_direct_url_schemes:
direct_url, locations = self.get_location(context, image_id)
urls = [direct_url] + [loc.get('url') for loc in locations or []]
for url in urls:
if url is None:
continue
parsed_url = urllib.parse.urlparse(url)
if parsed_url.scheme == "file":
# a system call to cp could have significant performance
# advantages, however we do not have the path to files at
# this point in the abstraction.
with open(parsed_url.path, "rb") as f:
shutil.copyfileobj(f, data)
return
try:
image_chunks = self._client.call(context, 'data', image_id)
except Exception:
_reraise_translated_image_exception(image_id)
if image_chunks is None:
raise exception.ImageDownloadFailed(
image_href=image_id, reason=_('image contains no data.'))
if not data:
return image_chunks
else:
for chunk in image_chunks:
data.write(chunk)
def create(self, context, image_meta, data=None):
"""Store the image data and return the new image object."""
sent_service_image_meta = self._translate_to_glance(image_meta)
if data:
sent_service_image_meta['data'] = data
recv_service_image_meta = self._client.call(context, 'create',
**sent_service_image_meta)
return self._translate_from_glance(context, recv_service_image_meta)
def update(self, context, image_id,
image_meta, data=None, purge_props=True,
store_id=None, base_image_ref=None):
"""Modify the given image with the new data."""
# For v2, _translate_to_glance stores custom properties in image meta
# directly. We need the custom properties to identify properties to
# remove if purge_props is True. Save the custom properties before
# translate.
if purge_props:
props_to_update = image_meta.get('properties', {}).keys()
image_meta = self._translate_to_glance(image_meta)
# NOTE(bcwaldon): id is not an editable field, but it is likely to be
# passed in by calling code. Let's be nice and ignore it.
image_meta.pop('id', None)
kwargs = {}
if store_id:
kwargs['store_id'] = store_id
if base_image_ref:
kwargs['base_image_ref'] = base_image_ref
try:
if data:
self._client.call(context, 'upload', image_id, data, **kwargs)
if image_meta:
if purge_props:
# Properties to remove are those not specified in
# input properties.
cur_image_meta = self.show(context, image_id)
cur_props = cur_image_meta['properties'].keys()
remove_props = list(set(cur_props) -
set(props_to_update))
image_meta['remove_props'] = remove_props
image_meta = self._client.call(context, 'update', image_id,
**image_meta)
else:
image_meta = self._client.call(context, 'get', image_id)
except Exception:
_reraise_translated_image_exception(image_id)
else:
return self._translate_from_glance(context, image_meta)
def delete(self, context, image_id):
"""Delete the given image.
:raises ImageNotFound: if the image does not exist.
:raises NotAuthorized: if the user is not an owner.
"""
try:
self._client.call(context, 'delete', image_id)
except glanceclient.exc.NotFound:
raise exception.ImageNotFound(image_id=image_id)
return True
def _translate_from_glance(self, context, image) -> dict:
"""Get image metadata from glance image.
Extract metadata from image and convert it's properties
to type cinder expected.
:param image: glance image object
:return: image metadata dictionary
"""
if self._image_schema is None:
self._image_schema = self._client.call(context, 'get',
controller='schemas',
schema_name='image')
# NOTE(aarefiev): get base image property, store image 'schema'
# is redundant, so ignore it.
image_meta = {key: getattr(image, key)
for key in image.keys()
if self._image_schema.is_base_property(key) is True and
key != 'schema'}
# Process 'cinder_encryption_key_id' as a metadata key
if 'cinder_encryption_key_id' in image.keys():
image_meta['cinder_encryption_key_id'] = \
image['cinder_encryption_key_id']
# NOTE(aarefiev): nova is expected that all image properties
# (custom or defined in schema-image.json) stores in
# 'properties' key.
image_meta['properties'] = {
key: getattr(image, key) for key in image.keys()
if self._image_schema.is_base_property(key) is False}
image_meta = _convert_timestamps_to_datetimes(image_meta)
image_meta = _convert_from_string(image_meta)
return image_meta
@staticmethod
def _translate_to_glance(image_meta):
image_meta = _convert_to_string(image_meta)
image_meta = _remove_read_only(image_meta)
# NOTE(tsekiyama): From the Image API v2, custom properties must
# be stored in image_meta directly, instead of the 'properties' key.
properties = image_meta.get('properties')
if properties:
image_meta.update(properties)
del image_meta['properties']
return image_meta
def _is_image_available(self, context, image):
"""Check image availability.
This check is needed in case Nova and Glance are deployed
without authentication turned on.
"""
# The presence of an auth token implies this is an authenticated
# request and we need not handle the noauth use-case.
if hasattr(context, 'auth_token') and context.auth_token:
return True
if context.is_admin:
return True
if (getattr(image, 'is_public', False) or
getattr(image, 'visibility', 'private') == 'public'):
return True
properties = image.properties
if context.project_id and ('owner_id' in properties):
return str(properties['owner_id']) == str(context.project_id)
if context.project_id and ('project_id' in properties):
return str(properties['project_id']) == str(context.project_id)
if image.visibility == 'shared':
for member in self.list_members(context, image.id):
if (context.project_id == member['member_id'] and
member['status'] == 'accepted'):
return True
try:
user_id = properties['user_id']
except KeyError:
return False
return str(user_id) == str(context.user_id)
def _convert_timestamps_to_datetimes(image_meta):
"""Returns image with timestamp fields converted to datetime objects."""
for attr in ['created_at', 'updated_at', 'deleted_at']:
if image_meta.get(attr):
image_meta[attr] = timeutils.parse_isotime(image_meta[attr])
return image_meta
# NOTE(bcwaldon): used to store non-string data in glance metadata
def _json_loads(properties, attr):
prop = properties[attr]
if isinstance(prop, str):
properties[attr] = jsonutils.loads(prop)
def _json_dumps(properties, attr):
prop = properties[attr]
if not isinstance(prop, str):
properties[attr] = jsonutils.dumps(prop)
_CONVERT_PROPS = ('block_device_mapping', 'mappings')
def _convert(method, metadata):
metadata = copy.deepcopy(metadata)
properties = metadata.get('properties')
if properties:
for attr in _CONVERT_PROPS:
if attr in properties:
method(properties, attr)
return metadata
def _convert_from_string(metadata):
return _convert(_json_loads, metadata)
def _convert_to_string(metadata):
return _convert(_json_dumps, metadata)
def _extract_attributes(image):
# NOTE(hdd): If a key is not found, base.Resource.__getattr__() may perform
# a get(), resulting in a useless request back to glance. This list is
# therefore sorted, with dependent attributes as the end
# 'deleted_at' depends on 'deleted'
# 'checksum' depends on 'status' == 'active'
IMAGE_ATTRIBUTES = ['size', 'disk_format', 'owner',
'container_format', 'status', 'id',
'name', 'created_at', 'updated_at',
'deleted', 'deleted_at', 'checksum',
'min_disk', 'min_ram', 'protected',
'visibility',
'cinder_encryption_key_id']
output: Dict[str, Any] = {}
for attr in IMAGE_ATTRIBUTES:
if attr == 'deleted_at' and not output['deleted']:
output[attr] = None
elif attr == 'checksum' and output['status'] != 'active':
output[attr] = None
else:
output[attr] = getattr(image, attr, None)
output['properties'] = getattr(image, 'properties', {})
return output
def _remove_read_only(image_meta):
IMAGE_ATTRIBUTES = ['status', 'updated_at', 'created_at', 'deleted_at']
output = copy.deepcopy(image_meta)
for attr in IMAGE_ATTRIBUTES:
if attr in output:
del output[attr]
return output
def _reraise_translated_image_exception(image_id):
"""Transform the exception for the image but keep its traceback intact."""
_exc_type, exc_value, exc_trace = sys.exc_info()
new_exc = _translate_image_exception(image_id, exc_value)
raise new_exc.with_traceback(exc_trace)
def _reraise_translated_exception():
"""Transform the exception but keep its traceback intact."""
_exc_type, exc_value, exc_trace = sys.exc_info()
new_exc = _translate_plain_exception(exc_value)
raise new_exc.with_traceback(exc_trace)
def _translate_image_exception(image_id, exc_value):
if isinstance(exc_value, (glanceclient.exc.Forbidden,
glanceclient.exc.Unauthorized)):
return exception.ImageNotAuthorized(image_id=image_id)
if isinstance(exc_value, glanceclient.exc.NotFound):
return exception.ImageNotFound(image_id=image_id)
if isinstance(exc_value, glanceclient.exc.BadRequest):
return exception.Invalid(exc_value)
return exc_value
def _translate_plain_exception(exc_value):
if isinstance(exc_value, (glanceclient.exc.Forbidden,
glanceclient.exc.Unauthorized)):
return exception.NotAuthorized(exc_value)
if isinstance(exc_value, glanceclient.exc.NotFound):
return exception.NotFound(exc_value)
if isinstance(exc_value, glanceclient.exc.BadRequest):
return exception.Invalid(exc_value)
return exc_value
def get_remote_image_service(context: context.RequestContext,
image_href) -> Tuple[GlanceImageService, str]:
"""Create an image_service and parse the id from the given image_href.
The image_href param can be an href of the form
'http://example.com:9292/v1/images/b8b2c6f7-7345-4e2f-afa2-eedaba9cbbe3',
or just an id such as 'b8b2c6f7-7345-4e2f-afa2-eedaba9cbbe3'. If the
image_href is a standalone id, then the default image service is returned.
:param image_href: href that describes the location of an image
:returns: a tuple of the form (image_service, image_id)
"""
# NOTE(bcwaldon): If image_href doesn't look like a URI, assume its a
# standalone image ID
if '/' not in str(image_href):
image_service = get_default_image_service()
return image_service, image_href
try:
(image_id, glance_netloc, use_ssl) = _parse_image_ref(image_href)
glance_client = GlanceClientWrapper(context=context,
netloc=glance_netloc,
use_ssl=use_ssl)
except ValueError:
raise exception.InvalidImageRef(image_href=image_href)
image_service = GlanceImageService(client=glance_client)
return image_service, image_id
def get_default_image_service():
return GlanceImageService()
|
|
#!/usr/bin/env python
# Written by Dhiru Kholia <dhiru at openwall.com> in July 2012 for JtR project.
# Copyright (c) 2012-2013, Dhiru Kholia.
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# Code borrowed from https://github.com/Roguelazer/onepasswordpy
#
# Copyright (c) 2013, James Brown
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# Code borrowed from https://bitbucket.org/gwik/agilekeychain
#
# Copyright (c) 2009 Antonin Amand <antonin.amand@gmail.com>
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation.
#
# THE AUTHOR PROVIDES THIS SOFTWARE ``AS IS'' AND ANY EXPRESSED OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import struct
import base64
try:
import json
assert json
except ImportError:
try:
import simplejson as json
except ImportError:
sys.stderr.write("Please install json / simplejson module which is currently not installed.\n")
sys.exit(-1)
from base64 import b64decode
import binascii
OPDATA1_MINIMUM_SIZE = 80
DEFAULT_PBKDF_ITERATIONS = 1000
MINIMUM_PBKDF_ITERATIONS = 1000
A_AES_SIZE = 128
C_AES_SIZE = 256
KEY_SIZE = {
128: 16,
192: 24,
256: 32,
}
INITIAL_KEY_OFFSET = 12
PY3 = sys.version_info[0] == 3
PMV = sys.version_info[1] >= 6
class Key(object):
""" A Key in the keyring
"""
if PY3 or PMV:
exec("SALTED_PREFIX=b'Salted__'")
else:
SALTED_PREFIX = 'Salted__'
ZERO_IV = "\0" * 16
ITERATIONS = 1000
BLOCK_SIZE = 16
Nr = 14
Nb = 4
Nk = 8
def __init__(self, identifier, level, data, validation, iterations):
""" initialize key
"""
self.identifier = identifier
self.level = level
self.validation = validation
bin_data = data
if self.__is_salted(bin_data):
self.salt = bin_data[8:16]
self.data = bin_data[16:]
else:
self.salt = self.ZERO_IV
self.data = bin_data
self.iterations = iterations
def __is_salted(self, data):
return self.SALTED_PREFIX == data[:len(self.SALTED_PREFIX)]
def opdata1_unpack(data):
HEADER_LENGTH = 8
TOTAL_HEADER_LENGTH = 32
HMAC_LENGTH = 32
if data[:HEADER_LENGTH] != "opdata01":
data = base64.b64decode(data)
if PY3 or PMV:
exec('MAGIC = b"opdata01"')
else:
MAGIC = "opdata01"
if data[:HEADER_LENGTH] != MAGIC:
raise TypeError("expected opdata1 format message")
plaintext_length, iv = struct.unpack("<Q16s",
data[HEADER_LENGTH:TOTAL_HEADER_LENGTH])
cryptext = data[TOTAL_HEADER_LENGTH:-HMAC_LENGTH]
expected_hmac = data[-HMAC_LENGTH:]
hmac_d_data = data[:-HMAC_LENGTH]
return plaintext_length, iv, cryptext, expected_hmac, hmac_d_data
class CloudKeychain(object):
def __init__(self, path, name='default'):
self.path = path
self.keys = list()
self.name = name
self.entries = None
self.processed = False
self.__open_keys_file()
def __repr__(self):
return '<%s.CloudKeychain path="%s">' % (self.__module__, self.path)
def __open_keys_file(self):
try:
keys_file_path = \
os.path.join(self.path, 'default', 'profile.js')
if os.path.exists(keys_file_path):
self.processed = True
else:
return
f = open(keys_file_path, 'r')
ds = f.read()[INITIAL_KEY_OFFSET:-1]
data = json.loads(ds)
salt = base64.b64decode(data['salt'])
masterKey = base64.b64decode(data['masterKey'])
sys.stdout.write("$cloudkeychain$%s$%s$%s$%s$%s" % (len(salt),
binascii.hexlify(salt).decode("ascii"),
data["iterations"],
len(masterKey),
binascii.hexlify(masterKey).decode("ascii")))
plaintext_length, iv, cryptext, expected_hmac, hmac_d_data = \
opdata1_unpack(data['masterKey'])
sys.stdout.write("$%s$%s$%s$%s$%s$%s$%s$%s$%s\n" % \
(plaintext_length, len(iv),
binascii.hexlify(iv).decode("ascii"), len(cryptext),
binascii.hexlify(cryptext).decode("ascii"),
len(expected_hmac),
binascii.hexlify(expected_hmac).decode("ascii"),
len(hmac_d_data),
binascii.hexlify(hmac_d_data).decode("ascii")))
except (IOError, KeyError, ValueError, TypeError):
e = sys.exc_info()[1]
sys.stderr.write('Error while opening the keychain, %s\n' % str(e))
class AgileKeychain(object):
def __init__(self, path, name='default'):
self.path = path
self.name = name
self.entries = None
self.keys = list()
ret = self.__open_keys_file()
if ret:
self.john_output()
def __repr__(self):
return '<%s.AgileKeychain path="%s">' % (self.__module__, self.path)
def __open_keys_file(self):
"""Open the json file containing the keys for decrypting the
real keychain and parse it
"""
try:
keys_file_path = \
os.path.join(self.path, 'data', self.name, 'encryptionKeys.js')
keys_file = open(keys_file_path, 'r')
try:
keys = json.loads(keys_file.read())
self.keys = []
for kd in keys['list']:
try:
key = Key(kd['identifier'],
kd['level'],
b64decode(kd['data'][:-1]),
b64decode(kd['validation'][:-1]),
kd.get('iterations', Key.ITERATIONS))
self.keys.append(key)
except TypeError:
key = Key(kd['identifier'],
kd['level'],
b64decode(kd['data']),
b64decode(kd['validation']),
kd.get('iterations', Key.ITERATIONS))
self.keys.append(key)
finally:
keys_file.close()
except (IOError, KeyError, ValueError, TypeError):
e = sys.exc_info()[1]
sys.stderr.write('Error while opening the keychain, %s\n' % str(e))
return False
return True
def john_output(self):
sys.stdout.write("%s:$agilekeychain$%s" % (self.path, len(self.keys)))
for i in range(0, len(self.keys)):
sys.stdout.write("*%s*%s*%s*%s*%s" % (self.keys[i].iterations,
len(self.keys[i].salt),
binascii.hexlify(self.keys[i].salt).decode("ascii"),
len(self.keys[i].data),
binascii.hexlify(self.keys[i].data).decode("ascii")))
sys.stdout.write("\n")
def process_file(keychain):
keychainobj = CloudKeychain(keychain)
if not keychainobj.processed:
keychain = AgileKeychain(keychain)
if __name__ == "__main__":
if len(sys.argv) < 2:
sys.stderr.write("Usage: %s <1Password Agile Keychain(s)>\n" % \
sys.argv[0])
sys.exit(-1)
for j in range(1, len(sys.argv)):
process_file(sys.argv[j])
|
|
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import argparse
import os
import re
import pkg_resources
import sys
from datetime import datetime
from dateutil import tz
from botocore.compat import six
from cement.utils.misc import minimal_logger
from subprocess import Popen, PIPE, STDOUT
urllib = six.moves.urllib
from ebcli.objects.exceptions import CommandError, InvalidOptionsError
from ebcli.core import io, fileoperations
LOG = minimal_logger(__name__)
def prompt_for_item_in_list(lst, default=1):
ind = prompt_for_index_in_list(lst, default)
return lst[ind]
def prompt_for_index_in_list(lst, default=1):
lst = list(lst)
for x in range(0, len(lst)):
io.echo(str(x + 1) + ')', lst[x])
while True:
try:
choice = int(io.prompt('default is ' + str(default),
default=default))
if not (0 < choice <= len(lst)):
raise ValueError # Also thrown by non int numbers
else:
break
except ValueError:
io.echo('Sorry, that is not a valid choice. '
'Please choose a number between 1 and ' +
str(len(lst)) + '.')
return choice - 1
def get_unique_name(name, current_uniques):
# with warnings.catch_warnings():
# warnings.simplefilter('ignore')
# if sys.version_info[0] >= 3:
# base_name = name
# else:
# base_name = name.decode('utf8')
base_name = name
number = 2
while base_name in current_uniques:
base_name = name + str(number)
number += 1
return base_name
def mask_vars(key, value):
if (re.match('.*_CONNECTION_STRING', key) or
key == 'AWS_ACCESS_KEY_ID' or
key == 'AWS_SECRET_KEY') \
and value is not None:
value = "*****"
return key, value
def print_list_in_columns(lst):
"""
This function is currently only intended for environmant names,
which are guaranteed to be 23 characters or less.
:param lst: List of env names
"""
if sys.stdout.isatty():
lst = list_to_columns(lst)
index = 0
for x in range(0, len(lst[0])):
line = []
for i in range(0, len(lst)):
try:
line.append(lst[i][x])
except IndexError:
pass
io.echo_and_justify(42, *line)
else:
# Dont print in columns if using pipe
for i in lst:
io.echo(i)
def list_to_columns(lst):
COLUMN_NUM = 3
assert len(lst) > COLUMN_NUM, "List size must be greater than {0}".\
format(COLUMN_NUM)
remainder = len(lst) % COLUMN_NUM
column_size = len(lst) // COLUMN_NUM
if remainder != 0:
column_size += 1
colunms = [[] for i in range(0, COLUMN_NUM)]
index = 0
stop = column_size
for x in range(0, COLUMN_NUM):
colunms[x] += lst[index:stop]
index = stop
stop += column_size
return colunms
def url_encode(data):
return urllib.parse.quote(data)
def get_delta_from_now_and_datetime(date):
return datetime.now(tz.tzlocal()) - get_local_time(date)
def get_local_time(utctime):
from_zone = tz.tzutc()
to_zone = tz.tzlocal()
utctime = utctime.replace(tzinfo=from_zone)
return utctime.astimezone(to_zone)
def get_local_time_as_string(utctime):
localtime = get_local_time(utctime)
return localtime.strftime("%Y-%m-%d %H:%M:%S")
def is_ssh():
return "SSH_CLIENT" in os.environ or "SSH_TTY" in os.environ
def static_var(varname, value):
def decorate(func):
setattr(func, varname, value)
return func
return decorate
def exec_cmd(args, live_output=True):
"""
Execute a child program (args) in a new process. Displays
live output by default.
:param args: list: describes the command to be run
:param live_output: bool: whether to print live output
:return str: child program output
"""
LOG.debug(' '.join(args))
process = Popen(args, stdout=PIPE, stderr=STDOUT)
output = []
for line in iter(process.stdout.readline, b''):
line = line.decode('utf-8')
if line != os.linesep:
if live_output:
sys.stdout.write(line)
sys.stdout.flush()
else:
LOG.debug(line)
output.append(line)
process.stdout.close()
process.wait()
returncode = process.returncode
error_msg = 'Exited with return code {}'.format(returncode)
output_str = ''.join(output)
if returncode:
raise CommandError(error_msg, output_str, returncode)
return output_str
exec_cmd_live_output = exec_cmd
def exec_cmd_quiet(args):
return exec_cmd(args, False)
def flatten(lists):
"""
Return a new (shallow) flattened list.
:param lists: list: a list of lists
:return list
"""
return [item for sublist in lists for item in sublist]
def anykey(d):
"""
Return any key in dictionary.
:param d: dict: dictionary
:return object
"""
return next(six.iterkeys(d))
def last_modified_file(filepaths):
"""
Return the most recently modified file.
:param filepaths: list: paths to files
:return str
"""
return max(filepaths, key=os.path.getmtime)
def get_data_from_url(url, timeout=20):
return urllib.request.urlopen(url, timeout=timeout).read()
def print_from_url(url):
result = get_data_from_url(url)
io.echo(result)
def parse_version(version_string):
"""
Parse string as a verison object for comparison
Example: parse_version('1.9.2') > parse_version('1.9.alpha')
See docs for pkg_resource.parse_version as this is just a wrapper
"""
return pkg_resources.parse_version(version_string)
def save_file_from_url(url, location, filename):
result = get_data_from_url(url)
return fileoperations.save_to_file(result, location, filename)
# http://stackoverflow.com/a/5164027
def prettydate(d):
"""
Return a human readable str of how long d was compared to now.
:param d: datetime/float: datetime or unix timestamp
:return str
"""
if isinstance(d, float): # epoch timestamp
d = datetime.utcfromtimestamp(d)
diff = datetime.utcnow() - d
s = diff.seconds
if diff.days > 7 or diff.days < 0:
return d.strftime('%d %b %y')
elif diff.days == 1:
return '1 day ago'
elif diff.days > 1:
return '{0} days ago'.format(diff.days)
elif s <= 1:
return 'just now'
elif s < 60:
return '{0} seconds ago'.format(s)
elif s < 120:
return '1 minute ago'
elif s < 3600:
return '{0} minutes ago'.format(s // 60)
elif s < 7200:
return '1 hour ago'
else:
return '{0} hours ago'.format(s // 3600)
def merge_dicts(low_priority, high_priority):
"""
Return a new dict that is a merge of low_priority and high_priority dicts.
When keys collide, takes the value of higher_priority dict.
:param low_priority: dict: shallow dictionary
:param high_priority: dict: shallow dictionary
:return dict
"""
result_dict = low_priority.copy()
result_dict.update(high_priority)
return result_dict
def retract_string(string):
try:
string_len = len(string)
keep_characters = range(0, 4)
keep_characters.extend(range(string_len - 4, string_len))
retracted_string = []
for i, c in enumerate(string):
if i in keep_characters:
retracted_string.append(c)
else:
retracted_string.append('*')
return ''.join(retracted_string)
except:
return ''
def check_source(value):
match = re.match(r"([^/]+/[^/]+/[^/]+)", value)
if match is None or len(value.split("/")) > 3:
raise argparse.ArgumentTypeError(
"%s is a invalid source. Example source would be something like: codecommit/repo/branch" % value)
return value
def parse_source(source):
# Source is already validated by the check_source method.
if source is None:
return
split_source = source.split('/')
# Validate that we support the source location
source_location = split_source[0].lower()
validate_source_location(source_location)
repository = split_source[1]
branch = split_source[2]
return source_location, repository, branch
def validate_source_location(source_location):
valid_source_locations = ['codecommit']
if source_location in valid_source_locations:
return
else:
raise InvalidOptionsError("Source location '{0}' is not in the list of valid locations: {1}".format(source_location, valid_source_locations))
def encode_to_ascii(unicode_value):
empty_string = ""
if unicode_value is None:
return empty_string
return unicode_value.encode('ascii', 'ignore')
def decode_bytes(value):
if sys.version_info[0] >= 3:
if isinstance(value, bytes):
value = value.decode('utf8')
return value
|
|
from __future__ import absolute_import
from __future__ import print_function
from typing import cast, Any
import sys
import unittest
try:
from tools.lib.css_parser import (
CssParserException,
CssSection,
parse,
handle_prefluff,
handle_postfluff
)
except ImportError:
print('ERROR!!! You need to run this via tools/test-tools.')
sys.exit(1)
class ParserTestHappyPath(unittest.TestCase):
def test_basic_parse(self):
# type: () -> None
my_selector = 'li.foo'
my_block = '''{
color: red;
}'''
my_css = my_selector + ' ' + my_block
res = parse(my_css)
self.assertEqual(res.text(), 'li.foo {\n color: red;\n}')
section = cast(CssSection, res.sections[0])
block = section.declaration_block
self.assertEqual(block.text().strip(), '{\n color: red;\n}')
declaration = block.declarations[0]
self.assertEqual(declaration.css_property, 'color')
self.assertEqual(declaration.css_value.text().strip(), 'red')
def test_same_line_comment(self):
# type: () -> None
my_css = '''
li.hide {
display: none; /* comment here */
/* Not to be confused
with this comment */
color: green;
}'''
res = parse(my_css)
section = cast(CssSection, res.sections[0])
block = section.declaration_block
declaration = block.declarations[0]
self.assertIn('/* comment here */', declaration.text())
def test_no_semicolon(self):
# type: () -> None
my_css = '''
p { color: red }
'''
reformatted_css = '\np {\n color: red;\n}\n'
res = parse(my_css)
self.assertEqual(res.text(), reformatted_css)
section = cast(CssSection, res.sections[0])
self.assertFalse(section.declaration_block.declarations[0].semicolon)
def test_empty_block(self):
# type: () -> None
my_css = '''
div {
}'''
error = 'Empty declaration'
with self.assertRaisesRegex(CssParserException, error):
parse(my_css)
def test_multi_line_selector(self):
# type: () -> None
my_css = '''
h1,
h2,
h3 {
top: 0
}'''
res = parse(my_css)
section = res.sections[0]
selectors = section.selector_list.selectors
self.assertEqual(len(selectors), 3)
def test_comment_at_end(self):
# type: () -> None
'''
This test verifies the current behavior, which is to
attach comments to the preceding rule, but we should
probably change it so the comments gets attached to
the next block, if possible.
'''
my_css = '''
p {
color: black;
}
/* comment at the end of the text */
'''
res = parse(my_css)
self.assertEqual(len(res.sections), 1)
section = res.sections[0]
self.assertIn('comment at the end', section.post_fluff)
def test_media_block(self):
# type: () -> None
my_css = '''
@media (max-width: 300px) {
h5 {
margin: 0;
}
}'''
res = parse(my_css)
self.assertEqual(len(res.sections), 1)
self.assertEqual(res.text(), '\n @media (max-width: 300px) {\n h5 {\n margin: 0;\n }\n}')
def test_handle_prefluff(self):
# type: () -> None
PREFLUFF = ' \n '
PREFLUFF1 = ' '
PREFLUFF2 = ' /* some comment \nhere */'
PREFLUFF3 = '\n /* some comment \nhere */'
self.assertEqual(handle_prefluff(PREFLUFF), '\n')
self.assertEqual(handle_prefluff(PREFLUFF, True), '\n ')
self.assertEqual(handle_prefluff(PREFLUFF1), '')
self.assertEqual(handle_prefluff(PREFLUFF1, True), '\n ')
self.assertEqual(handle_prefluff(PREFLUFF2), '/* some comment\n here */\n')
self.assertEqual(handle_prefluff(PREFLUFF3, True), '\n /* some comment\n here */\n ')
def test_handle_postfluff(self):
# type: () -> None
POSTFLUFF = '/* Comment Here */'
POSTFLUFF1 = '/* Comment \nHere */'
POSTFLUFF2 = ' '
POSTFLUFF3 = '\n /* some comment \nhere */'
self.assertEqual(handle_postfluff(POSTFLUFF), '/* Comment Here */\n')
self.assertEqual(handle_postfluff(POSTFLUFF, space_after_first_line=True), ' /* Comment Here */\n')
self.assertEqual(handle_postfluff(POSTFLUFF, indent=True, space_after_first_line=True), ' /* Comment Here */\n')
self.assertEqual(handle_postfluff(POSTFLUFF1), '/* Comment\n Here */')
self.assertEqual(handle_postfluff(POSTFLUFF1, space_after_first_line=True), ' /* Comment\n Here */\n')
self.assertEqual(handle_postfluff(POSTFLUFF1, indent=True, space_after_first_line=True), ' /* Comment\n Here */\n')
self.assertEqual(handle_postfluff(POSTFLUFF2), '')
self.assertEqual(handle_postfluff(POSTFLUFF2, space_after_first_line=True), '')
self.assertEqual(handle_postfluff(POSTFLUFF2, indent=True, space_after_first_line=True), '\n')
self.assertEqual(handle_postfluff(POSTFLUFF3), '\n/* some comment\n here */')
self.assertEqual(handle_postfluff(POSTFLUFF3, space_after_first_line=True), '\n/* some comment\n here */\n')
self.assertEqual(handle_postfluff(POSTFLUFF3, indent=True, space_after_first_line=True), '\n /* some comment\n here */\n')
class ParserTestSadPath(unittest.TestCase):
'''
Use this class for tests that verify the parser will
appropriately choke on malformed CSS.
We prevent some things that are technically legal
in CSS, like having comments in the middle of list
of selectors. Some of this is just for expediency;
some of this is to enforce consistent formatting.
'''
def _assert_error(self, my_css, error):
# type: (str, str) -> None
with self.assertRaisesRegex(CssParserException, error):
parse(my_css)
def test_unexpected_end_brace(self):
# type: () -> None
my_css = '''
@media (max-width: 975px) {
body {
color: red;
}
}} /* whoops */'''
error = 'unexpected }'
self._assert_error(my_css, error)
def test_empty_section(self):
# type: () -> None
my_css = '''
/* nothing to see here, move along */
'''
error = 'unexpected empty section'
self._assert_error(my_css, error)
def test_missing_colon(self):
# type: () -> None
my_css = '''
.hide
{
display none /* no colon here */
}'''
error = 'We expect a colon here'
self._assert_error(my_css, error)
def test_unclosed_comment(self):
# type: () -> None
my_css = ''' /* comment with no end'''
error = 'unclosed comment'
self._assert_error(my_css, error)
def test_missing_selectors(self):
# type: () -> None
my_css = '''
/* no selectors here */
{
bottom: 0
}'''
error = 'Missing selector'
self._assert_error(my_css, error)
def test_missing_value(self):
# type: () -> None
my_css = '''
h1
{
bottom:
}'''
error = 'Missing value'
self._assert_error(my_css, error)
def test_disallow_comments_in_selectors(self):
# type: () -> None
my_css = '''
h1,
h2, /* comment here not allowed by Zulip */
h3 {
top: 0
}'''
error = 'Comments in selector section are not allowed'
self._assert_error(my_css, error)
|
|
"""
Binary_model - binary regression model including MBR
Developer: (James) Sung-Jin Kim, jaemssungjin.kim@gmail.com
Creation Date: July 11, 2015
Update Date: July 11, 2015
Version: ver 0.1 rev 0
"""
from sklearn import linear_model
import numpy as np
import j3x.jpyx
class BIKE_A_Ridge( linear_model.Ridge): # Later on, Viking will be built
"""
BIKE - BInary Kernel Ensemble (BIKE) method
"""
def __init__(self, A, alpha = 0.5):
"""
A is precomputed similarity matrix of xM(all)
Depending on k-fold indexing, the associated A[train] and A[test] matrices will be selected.
"""
self.A = A
super(BIKE_A_Ridge, self).__init__(alpha = alpha)
def _fit( self, xM_train_idx, yV):
self.train_idx = xM_train_idx[:,0]
A_train = self.A[ np.ix_(xM_train_idx[:,0], self.train_idx)]
super(BIKE_Ridge, self).fit( A_train, yV)
def fit( self, xM_train_idx, yV):
self.train_idx = xM_train_idx.T
A_train = self.A[ [xM_train_idx, self.train_idx]]
super(BIKE_A_Ridge, self).fit( A_train, yV)
def predict( self, xM_test_idx):
"""
The index vector of a train sequence will be used to pick up
testing similarity matrix (or precomputed kernel output matrix).
"""
A_test = self.A[ [xM_test_idx, self.train_idx]]
return super(BIKE_A_Ridge, self).predict( A_test)
class BIKE_Ridge( linear_model.Ridge): # Later on, Viking will be built
"""
BIKE - BInary Kernel Ensemble (BIKE) method
"""
def __init__(self, A_list = [], X = None, alpha = 0.5):
"""
A is precomputed similarity matrix of xM(all)
Depending on k-fold indexing, the associated A[train] and A[test] matrices will be selected.
"""
self.A_list = A_list
self.X = X
super(BIKE_Ridge, self).__init__(alpha = alpha)
def gen_AX( self, xM_idx):
AX_list = list()
for A in self.A_list:
AX_list.append( A[ [xM_idx, self.xM_train_idx_T]])
# Now X will be added as well since it is also a part of descriptor set.
if self.X is not None:
#print 'xM_idx[:,0] =', xM_idx[:,0]
xM_con = self.X[ xM_idx[:,0], :]
#print 'xM_con.shape = ', xM_con.shape
AX_list.append( xM_con)
# All kernel outputs and linear descriptors will be used as an input matrix.
return np.concatenate( AX_list, axis = 1)
def fit( self, xM_train_idx, yV):
"""
A common part between fit() and predict() are made be a function, gen_AX
"""
self.xM_train_idx_T = xM_train_idx.T
AX = self.gen_AX( xM_train_idx)
super(BIKE_Ridge, self).fit( AX, yV)
def predict( self, xM_test_idx):
"""
The index vector of a train sequence will be used to pick up
testing similarity matrix (or precomputed kernel output matrix).
"""
AX = self.gen_AX( xM_test_idx)
return super(BIKE_Ridge, self).predict( AX)
"""
MBR Ensemble
- Since 'direct' is a part of this resembling mode,
the performance of the direct (MLR) cases can be evaluated with this MBR-Ensemble method.
"""
class MBR_Ensemble_Ridge( linear_model.Ridge):
def __init__(self, alpha = 0.5, fsp_l = [], fpm_l = []):
"""
fsp_l: feature split points for spiting different descriptors
- refer to np.split()
fpm_l: feature preprocessing mode
- 'tanimoto', 'direct' (supporting), 'rbf', 'tm-rbf' (under-developing)
Note: len( fsp_l) == len( fpm_1) - 1 to specify preprocessing modes for each feature group
"""
self.fsp_l = fsp_l
if len(fpm_l) == 0:
fpm_l = ['tanimoto'] * (len( fsp_l) + 1)
else:
if len( fsp_l) == len( fpm_l) - 1:
self.fpm_l = fpm_l
else:
raise ValueError( "Check to be: len( fsp_l) == len( fpm_l) - 1")
super(MBR_Ensemble_Ridge, self).__init__(alpha = alpha)
def fit( self, xM_train_con, yV):
self.xM_train_l = np.split( xM_train_con, self.fsp_l, axis = 1)
#A_train_l = map(j3x.jpyx.calc_tm_sim_M, self.xM_train_l)
A_train_l = list()
for xM_train, fpm in zip( self.xM_train_l, self.fpm_l):
# print 'fpm, xM_train.shape', '->', fpm, xM_train.shape
if fpm == 'tanimoto':
# Since tanimoto is applied, xM must be binary but
# it is non binary type because it is combined with other type (float)
A_train_l.append( j3x.jpyx.calc_tm_sim_M( xM_train.astype( int)))
elif fpm == 'direct':
A_train_l.append( xM_train)
else:
raise ValueError("For fpm, the given mode is not supported:" + fpm)
A_train_ensemble = np.concatenate( A_train_l, axis = 1)
super(MBR_Ensemble_Ridge, self).fit( A_train_ensemble, yV)
def predict( self, xM_test_con):
xM_test_l = np.split( xM_test_con, self.fsp_l, axis = 1)
A_test_l = list()
for xM_train, xM_test, fpm in zip(self.xM_train_l, xM_test_l, self.fpm_l):
if fpm == 'tanimoto':
xM_all = np.concatenate( (xM_train, xM_test), axis = 0)
A_all = j3x.jpyx.calc_tm_sim_M( xM_all.astype( int))
A_test = A_all[ xM_train.shape[0]:, :xM_train.shape[0]]
A_test_l.append( A_test)
elif fpm == 'direct':
A_test_l.append( xM_test)
else:
raise ValueError("For fpm, the given mode is not supported:" + fpm)
A_test_ensemble = np.concatenate( A_test_l, axis = 1)
return super(MBR_Ensemble_Ridge, self).predict( A_test_ensemble)
"""
MBR EnsembleBin
- if MBR_Ensemble is meta class inherented from six.with_metaclass(ABCMeta, LinearModel),
MBR_Ensemble_Ridge and MBR_Ensemble_Lasso can be more compact such as
describing only __init__ by inhereting both MBR_Ensemble and either
linear_model.Ridge or linear_model.Lasso depending on the mode.
- Now it is implemnted more simply. Later, such deep implementation will be applied.
"""
class _MBR_EnsembleBin_Ridge_r0( linear_model.Ridge):
def __init__(self, alpha = 0.5):
super(MBR_EnsembleBin_Ridge, self).__init__(alpha = alpha)
def fit( self, xM_train_l, yV):
self.xM_train_l = xM_train_l
A_train_l = list(map(j3x.jpyx.calc_tm_sim_M, xM_train_l))
A_train_ensemble = np.concatenate( A_train_l, axis = 1)
super(MBR_EnsembleBin_Ridge, self).fit( A_train_ensemble, yV)
def predict( self, xM_test_l):
xM_all_l = [np.concatenate( (xM_train, xM_test), axis = 0)
for xM_train, xM_test in zip( self.xM_train_l, xM_test_l)]
A_all_l = list(map( j3x.jpyx.calc_tm_sim_M, xM_all_l))
A_test_l = [A_all[ xM_train.shape[0]:, :xM_train.shape[0]]
for A_all, xM_train in zip( A_all_l, self.xM_train_l)]
A_test_ensemble = np.concatenate( A_test_l, axis = 1)
return super(MBR_EnsembleBin_Ridge, self).predict( A_test_ensemble)
class MBR_EnsembleBin_Ridge( linear_model.Ridge):
def __init__(self, alpha = 0.5, fsp_l = []):
"""
fsp_l = feature split points for spliting differnt descriptors
"""
self.fsp_l = fsp_l
super(MBR_EnsembleBin_Ridge, self).__init__(alpha = alpha)
def fit( self, xM_train_con, yV):
self.xM_train_l = np.split( xM_train_con, self.fsp_l, axis = 1)
#self.xM_train_l = xM_train_l
A_train_l = list(map(j3x.jpyx.calc_tm_sim_M, self.xM_train_l))
A_train_ensemble = np.concatenate( A_train_l, axis = 1)
super(MBR_EnsembleBin_Ridge, self).fit( A_train_ensemble, yV)
def predict( self, xM_test_con):
xM_test_l = np.split( xM_test_con, self.fsp_l, axis = 1)
xM_all_l = [np.concatenate( (xM_train, xM_test), axis = 0)
for xM_train, xM_test in zip( self.xM_train_l, xM_test_l)]
A_all_l = list(map( j3x.jpyx.calc_tm_sim_M, xM_all_l))
A_test_l = [A_all[ xM_train.shape[0]:, :xM_train.shape[0]]
for A_all, xM_train in zip( A_all_l, self.xM_train_l)]
A_test_ensemble = np.concatenate( A_test_l, axis = 1)
return super(MBR_EnsembleBin_Ridge, self).predict( A_test_ensemble)
"""
MBR TM
- Gamma is not considered.
"""
class MBR_TM_Ridge( linear_model.Ridge):
def __init__(self, alpha = 0.5):
super(MBR_TM_Ridge, self).__init__(alpha = alpha)
def fit( self, xM_train, yV):
self.xM_train = xM_train
A_train = j3x.jpyx.calc_tm_sim_M( xM_train)
super(MBR_TM_Ridge, self).fit( A_train, yV)
def predict( self, xM_test):
#A = j3x.jpyx.calc_bin_sim_M( xM_test.astype(int), gamma = self.gamma)
xM_all = np.concatenate( (self.xM_train, xM_test), axis = 0)
A_all = j3x.jpyx.calc_tm_sim_M( xM_all)
A_test = A_all[ self.xM_train.shape[0]:, :self.xM_train.shape[0]]
return super(MBR_TM_Ridge, self).predict( A_test)
class MBR_TM_Lasso( linear_model.Lasso):
def __init__(self, alpha = 1.0, gamma = 1):
super(MBR_TM_Ridge, self).__init__(alpha = alpha)
def fit( self, xM_train, yV):
self.xM_train = xM_train
A_train = j3x.jpyx.calc_tm_sim_M( xM_train)
super(MBR_TM_Lasso, self).fit( A_train, yV)
def predict( self, xM_test):
xM_all = np.concatenate( (self.xM_train, xM_test), axis = 0)
A_all = j3x.jpyx.calc_tm_sim_M( xM_all)
A_test = A_all[ self.xM_train.shape[0]:, :self.xM_train.shape[0]]
return super(MBR_TM_Lasso, self).predict( A_test)
"""
MBR Sim
Similarityy control MBR
Original MBR does not have a functionality to change gamma,
although SVM has it. It will be considered later on.
"""
class MBR_Ridge( linear_model.Ridge):
def __init__(self, alpha = 0.5, gamma = 1):
self.alpha = alpha
self.gamma = gamma
#self.clf = linear_model.Ridge( alpha = self.alpha)
#linear_model.Ridge( self, alpha = self.alpha)
super(MBR_Ridge, self).__init__(alpha = self.alpha)
def fit( self, xM_train, yV):
self.xM_train = xM_train
A_train = j3x.jpyx.calc_bin_sim_M( xM_train, gamma = self.gamma)
super(MBR_Ridge, self).fit( A_train, yV)
def predict( self, xM_test):
#A = j3x.jpyx.calc_bin_sim_M( xM_test.astype(int), gamma = self.gamma)
xM_all = np.concatenate( (self.xM_train, xM_test), axis = 0)
A_all = j3x.jpyx.calc_bin_sim_M( xM_all, gamma = self.gamma)
A_test = A_all[ self.xM_train.shape[0]:, :self.xM_train.shape[0]]
return super(MBR_Ridge, self).predict( A_test)
class MBR_Lasso( linear_model.Lasso):
def __init__(self, alpha = 1.0, gamma = 1):
self.alpha = alpha
self.gamma = gamma
#self.clf = linear_model.Ridge( alpha = self.alpha)
#linear_model.Ridge( self, alpha = self.alpha)
super(MBR_Lasso, self).__init__(alpha = self.alpha)
def fit( self, xM_train, yV):
self.xM_train = xM_train
A_train = j3x.jpyx.calc_bin_sim_M( xM_train, gamma = self.gamma)
super(MBR_Lasso, self).fit( A_train, yV)
def predict( self, xM_test):
#A = j3x.jpyx.calc_bin_sim_M( xM_test.astype(int), gamma = self.gamma)
xM_all = np.concatenate( (self.xM_train, xM_test), axis = 0)
A_all = j3x.jpyx.calc_bin_sim_M( xM_all, gamma = self.gamma)
A_test = A_all[ self.xM_train.shape[0]:, :self.xM_train.shape[0]]
return super(MBR_Lasso, self).predict( A_test)
"""
MBR_Dist
"""
def sim_to_dist( A):
A *= -1
A = A + 1
A = np.power( np.abs(A), 2)
np.exp( A, A)
return A
class MBR_Dist_Lasso( linear_model.Lasso):
def __init__(self, alpha = 1.0, gamma = 1):
self.alpha = alpha
self.gamma = gamma
#self.clf = linear_model.Ridge( alpha = self.alpha)
#linear_model.Ridge( self, alpha = self.alpha)
super(MBR_Dist_Lasso, self).__init__(alpha = self.alpha)
def fit( self, xM_train, yV):
self.xM_train = xM_train
A_train = j3x.jpyx.calc_bin_sim_M( xM_train, gamma = self.gamma)
super(MBR_Dist_Lasso, self).fit( sim_to_dist( A_train), yV)
def predict( self, xM_test):
#A = j3x.jpyx.calc_bin_sim_M( xM_test.astype(int), gamma = self.gamma)
xM_all = np.concatenate( (self.xM_train, xM_test), axis = 0)
A_all = j3x.jpyx.calc_bin_sim_M( xM_all, gamma = self.gamma)
A_test = A_all[ self.xM_train.shape[0]:, :self.xM_train.shape[0]]
return super(MBR_Dist_Lasso, self).predict( sim_to_dist(A_test))
|
|
"""
TargetSelection class
"""
from itertools import chain
import numpy as np
from astropy import units as u
from astropy.coordinates import Angle, SkyCoord
from astropy.table import Table, vstack
from astropy.time import Time
from easyquery import Query, QueryMaker
from ..hosts import HostCatalog
from ..objects import ObjectCatalog, get_unique_objids
from ..observing.aat import get_gaia_guidestars, write_fld_file
from ..utils import add_skycoord
from .assign_targeting_score import (COLUMNS_USED, assign_targeting_score_v1,
assign_targeting_score_v2plus,
assign_targeting_score_v3)
__all__ = ["TargetSelection", "prepare_mmt_catalog", "prepare_aat_catalog"]
class TargetSelection(object):
"""
Parameters
----------
database: SAGA.Database object
Returns
-------
target_selection : SAGA.TargetSelection object
Examples
--------
>>> import SAGA
>>> from SAGA import ObjectCuts as C
>>> saga_database = SAGA.Database('/path/to/SAGA/Dropbox')
>>> saga_targets = SAGA.TargetSelection(saga_database, gmm_parameters='gmm_parameters_no_outlier')
>>> hosts = [161174, 52773, 163956, 69028, 144953, 165082, 165707, 145729, 165980, 147606]
>>> saga_targets.load_object_catalogs(hosts, (C.gri_cut & C.fibermag_r_cut & C.is_galaxy & C.is_clean))
>>> score_bins = [150, 200, 300, 400, 500, 600, 700, 800, 900, 1000]
>>> d = np.array([np.searchsorted(base['TARGETING_SCORE'], score_bins) for base in saga_targets.compile_target_list('iter')])
"""
def __init__(
self,
database,
host_catalog_class=HostCatalog,
cuts=None,
additional_columns=None,
assign_targeting_score_func=None,
gmm_parameters=None,
manual_selected_objids=None,
version=None,
assign_targeting_score_kwargs=None,
host_catalog_instance=None,
object_catalog_instance=None,
):
self._database = database
self._version = version
self._build_version, _ = self._database.resolve_base_version(self._version)
if host_catalog_instance is not None:
if not isinstance(host_catalog_instance, host_catalog_class):
raise ValueError("`host_catalog_instance` must be an instance of `host_catalog_class`.")
self._host_catalog = host_catalog_instance
else:
self._host_catalog = host_catalog_class(self._database)
if object_catalog_instance is not None:
if not isinstance(object_catalog_instance, ObjectCatalog):
raise ValueError("`object_catalog_instance` must be an instance of `ObjectCatalog`.")
self._object_catalog = object_catalog_instance
else:
self._object_catalog = ObjectCatalog(self._database, host_catalog_class, self._host_catalog)
self.target_catalogs = dict()
if assign_targeting_score_func is None:
if self._build_version == 1:
self.assign_targeting_score = assign_targeting_score_v1
elif self._build_version == 2:
self.assign_targeting_score = assign_targeting_score_v2plus
else:
self.assign_targeting_score = assign_targeting_score_v3
else:
self.assign_targeting_score = assign_targeting_score_func
if not callable(self.assign_targeting_score):
raise TypeError("*assign_targeting_score_func* must be callable")
if assign_targeting_score_kwargs is None:
self.assign_targeting_score_kwargs = dict()
else:
self.assign_targeting_score_kwargs = dict(assign_targeting_score_kwargs)
if gmm_parameters is None and self._build_version >= 3:
self._gmm_parameters = None
else:
self._gmm_parameters = self._load_gmm_parameters(gmm_parameters)
try:
self._manual_selected_objids = get_unique_objids(
self._database[manual_selected_objids or "manual_targets"].read()["OBJID"]
)
except (TypeError, KeyError):
self._manual_selected_objids = manual_selected_objids
if self._build_version >= 3:
remove_list_keys = (("decals_dr9", "OBJID"),)
else:
remove_list_keys = (
("sdss", "SDSS ID"),
("des", "DES_OBJID"),
("decals", "decals_objid"),
("decals_dr8", "OBJID"),
("decals_dr9", "OBJID"),
)
self._remove_lists = {}
for list_name, col in remove_list_keys:
survey = list_name.partition("_")[0]
try:
d = self._database["{}_remove".format(list_name)]
except KeyError:
continue
objids = get_unique_objids(d.read()[col])
if len(objids):
if survey in self._remove_lists:
self._remove_lists = np.concatenate((self._remove_lists[survey], objids))
else:
self._remove_lists[survey] = objids
self._cuts = cuts
if self._build_version < 2:
self.columns = list(
set(
chain(
("OBJID", "RA", "DEC", "HOST_NSAID"),
("PHOTPTYPE", "PSFMAG_U", "PSFMAG_G", "PSFMAG_R"),
COLUMNS_USED,
additional_columns or [],
)
)
)
else:
self.columns = None
if additional_columns is not None:
raise ValueError("`additional_columns` is not supported for version > 1")
def _load_gmm_parameters(self, gmm_parameters):
prefix = "gmm_parameters_"
keys = [k[len(prefix) :] for k in self._database.keys() if not isinstance(k, tuple) and k.startswith(prefix)]
if gmm_parameters is None:
return {k: self._database[prefix + k].read() for k in keys}
if isinstance(gmm_parameters, dict):
return {k: self._load_gmm_parameters(v) for k, v in gmm_parameters.items()}
try:
if gmm_parameters.startswith(prefix):
gmm_parameters = gmm_parameters[len(prefix) :]
except AttributeError:
return gmm_parameters
if gmm_parameters in keys:
return self._database[prefix + gmm_parameters].read()
def build_target_catalogs(
self,
hosts=None,
return_as=None,
columns=None,
reload_base=False,
recalculate_score=False,
):
"""
build target catalogs
Parameters
----------
hosts : int, str, list, None, optional
host names/IDs or a list of host names/IDs or short-hand names like
"paper1" or "paper1_complete"
return_as : str, optional
If set to None (default), no return
If set to 'dict', return as a dict
If set to 'list', return a list that contains all tables
If set to 'stack', return a stacked table
If set to 'iter', return an iterator for looping over hosts
If set to 'items', return an iterator like dict.items()
columns : list, optional
If set, only return a subset of columns
"""
return_as = (return_as if return_as else "none").lower()
if return_as[0] not in "ndsli":
raise ValueError('`return_as` should be None, "dict", "list", "stacked", or "iter"')
host_ids = self._host_catalog.resolve_id(hosts, "string")
for host_id in host_ids:
if reload_base or host_id not in self.target_catalogs:
self.target_catalogs[host_id] = self._object_catalog.load_single(
host_id,
cuts=self._cuts,
columns=self.columns,
version=self._version,
add_skycoord=False,
)
if recalculate_score or "TARGETING_SCORE" not in self.target_catalogs[host_id].colnames:
self.assign_targeting_score(
self.target_catalogs[host_id],
manual_selected_objids=self._manual_selected_objids,
gmm_parameters=self._gmm_parameters,
remove_lists=self._remove_lists,
**self.assign_targeting_score_kwargs,
)
if return_as[0] == "n":
return
output_iter = (
self.target_catalogs[host_id][columns] if columns else self.target_catalogs[host_id] for host_id in host_ids
)
if return_as.startswith("item"):
return zip(host_ids, output_iter)
if return_as[0] == "d":
return dict(zip(host_ids, output_iter))
if return_as[0] == "i":
return output_iter
if return_as[0] == "l":
return list(output_iter)
if return_as[0] == "s":
out = vstack(list(output_iter), "outer")
if out.masked:
for name, (dtype, _) in out.dtype.fields.items():
if dtype.kind == "i":
out[name].fill_value = -1
if dtype.kind == "b":
out[name].fill_value = False
return out.filled()
def clear_target_catalogs(self):
"""
clear target catalog cache
"""
self.target_catalogs = dict()
def prepare_mmt_catalog(
target_catalog,
write_to=None,
verbose=True,
remove_outskirts=1.1,
targeting_score_threshold=900,
flux_star_kwargs=None,
):
"""
Prepare MMT target catalog.
Parameters
----------
target_catalog : astropy.table.Table
Need to have `TARGETING_SCORE` column.
You can use `TargetSelection.build_target_catalogs` to generate `target_catalog`
write_to : str, optional
If set, it will write the catalog in MMT format to `write_to`.
verbose : bool, optional
If set to True (default), print out useful information
remove_outskirts : bool, optional
If set to True (default), remove targets that is outside of 400 kpc AND 40 arcmin.
targeting_score_threshold : int, optional (default: 900)
Targets with a score number higher than this value (i.e., priority lower than this value)
will be excluded.
flux_star_kwargs : dict or None, optional
min_dist_to_target : 20 (arcsec)
rank : 5
limit_to : 100
Returns
-------
mmt_target_catalog : astropy.table.Table
Examples
--------
>>> import SAGA
>>> from SAGA.targets import prepare_mmt_catalog
>>> saga_database = SAGA.Database('/path/to/SAGA/Dropbox')
>>> saga_targets = SAGA.TargetSelection(saga_database, gmm_parameters='gmm_parameters_no_outlier')
>>> mmt18_hosts = [161174, 52773, 163956, 69028, 144953, 165082, 165707, 145729, 165980, 147606]
>>> for host_id, target_catalog in saga_targets.build_target_catalogs(mmt18_hosts, return_as='dict').items():
>>> print('Working host NSA', host_id)
>>> SAGA.targets.prepare_mmt_catalog(target_catalog, '/home/yymao/Downloads/mmt_nsa{}.cat'.format(host_id))
>>> print()
Notes
-----
See https://www.cfa.harvard.edu/mmti/hectospec/hecto_software_manual.htm#4.1.1 for required format
"""
if "TARGETING_SCORE" not in target_catalog.colnames:
return KeyError(
'`target_catalog` does not have column "TARGETING_SCORE".'
"Have you run `compile_target_list` or `assign_targeting_score`?"
)
if remove_outskirts:
remove_outskirts = 1.1 if remove_outskirts is True else float(remove_outskirts)
target_catalog = (Query(f"RHOST_KPC < {300 * remove_outskirts}") | Query(f"RHOST_ARCM < {30 * remove_outskirts}")).filter(target_catalog)
is_target = Query("TARGETING_SCORE >= 0", "TARGETING_SCORE < {}".format(targeting_score_threshold))
if "PHOTPTYPE" in target_catalog.colnames:
is_star = Query("PHOTPTYPE == 6", "REMOVE == -1")
target_catalog["r_star"] = target_catalog["PSFMAG_R"]
target_catalog["gr_star"] = target_catalog["PSFMAG_G"] - target_catalog["PSFMAG_R"]
target_catalog["ug_star"] = target_catalog["PSFMAG_U"] - target_catalog["PSFMAG_G"]
elif "OBJID_sdss" in target_catalog.colnames:
is_star = Query("OBJID_sdss != -1", "morphology_info_sdss == 6", "REMOVE_sdss == 0")
target_catalog["r_star"] = target_catalog["r_mag_sdss"]
target_catalog["gr_star"] = target_catalog["g_mag_sdss"] - target_catalog["r_mag_sdss"]
target_catalog["ug_star"] = target_catalog["u_mag_sdss"] - target_catalog["g_mag_sdss"]
elif "REF_CAT" in target_catalog.colnames:
is_star = Query(
"is_galaxy == 0",
"REMOVE == 0",
Query("morphology_info == 80") | Query("(morphology_info >> 1) % 2 == 1"),
QueryMaker.startswith("REF_CAT", "G") | QueryMaker.startswith("REF_CAT", "T"),
"g_mag < 19",
"r_mag < 19",
"z_mag < 19",
)
A = np.column_stack([np.ones(len(target_catalog)), *target_catalog[["g_mag", "r_mag", "z_mag"]].itercols()])
target_catalog["r_star"] = A @ np.array([0.04942244, 0.10494481, 0.87591029, 0.01724201])
target_catalog["gr_star"] = A @ np.array([0.09917215, 0.73569233, -0.38515815, -0.35320598])
target_catalog["ug_star"] = A @ np.array([1.35222044, 1.18061772, -1.60904784, 0.38804513])
del A
else:
is_star = Query("is_galaxy == 0", "REMOVE == 0")
target_catalog["r_star"] = target_catalog["r_mag"]
target_catalog["gr_star"] = target_catalog["g_mag"] - target_catalog["r_mag"]
target_catalog["ug_star"] = target_catalog["u_mag"] - target_catalog["g_mag"]
is_guide_star = is_star & Query("r_star >= 14", "r_star < 15")
is_flux_star = is_star & Query("r_star >= 17", "r_star < 18")
is_flux_star &= Query("ug_star >= 0.6", "ug_star < 1.2")
is_flux_star &= Query("gr_star >= 0", "gr_star < 0.6")
is_flux_star &= Query("gr_star >= 0.75 * ug_star - 0.45")
target_catalog = (is_target | is_guide_star | is_flux_star).filter(target_catalog)
target_catalog.sort(["TARGETING_SCORE", "r_mag"])
target_catalog["rank"] = target_catalog["TARGETING_SCORE"] // 100
target_catalog["rank"][Query("rank < 2").mask(target_catalog)] = 2
target_catalog["rank"][Query("rank > 8").mask(target_catalog)] = 8
target_catalog["rank"][is_flux_star.mask(target_catalog)] = 1
# set guide star to rank 99 just for sorting
target_catalog["rank"][is_guide_star.mask(target_catalog)] = 99
is_guide_star = Query("rank == 99")
is_flux_star = Query("rank == 1")
is_target = Query("rank >= 2", "rank <= 8")
if flux_star_kwargs is None:
flux_star_kwargs = {}
fs_min_dist_to_target = float(flux_star_kwargs.get("min_dist_to_target", 20)) # arcsec
fs_max_dist_to_center = float(flux_star_kwargs.get("max_dist_to_center", 60)) # arcmin
fs_rank = int(flux_star_kwargs.get("rank", 5))
fs_limit = int(flux_star_kwargs.get("limit_to", 100))
# move flux star rank
if fs_rank < 1 or fs_rank > 8:
raise ValueError("not a valid rank value for flux stars")
if fs_rank > 1:
target_catalog["rank"][is_flux_star.mask(target_catalog)] = 0
target_catalog["rank"][Query("rank >= 2", "rank <= {}".format(fs_rank)).mask(target_catalog)] -= 1
target_catalog["rank"][Query("rank == 0").mask(target_catalog)] = fs_rank
is_flux_star = Query("rank == {}".format(fs_rank))
is_target = Query("rank >= 1", "rank <= 8", "rank != {}".format(fs_rank))
if fs_min_dist_to_target > 0:
target_catalog = add_skycoord(target_catalog)
flux_star_indices = np.flatnonzero(is_flux_star.mask(target_catalog))
is_target_mask = is_target.mask(target_catalog)
sep = (
target_catalog["coord"][flux_star_indices]
.match_to_catalog_sky(target_catalog["coord"][is_target_mask])[1]
.arcsec
)
target_catalog["rank"][flux_star_indices[sep < fs_min_dist_to_target]] = 0
target_catalog = Query("rank > 0").filter(target_catalog)
del target_catalog["coord"]
if fs_max_dist_to_center > 0:
to_remove = Query(is_flux_star, "RHOST_ARCM > {}".format(fs_max_dist_to_center))
target_catalog = (~to_remove).filter(target_catalog)
if fs_limit > 0 and is_flux_star.count(target_catalog) > fs_limit:
fs_idx = np.flatnonzero(is_flux_star.mask(target_catalog))
max_r = target_catalog["RHOST_ARCM"][fs_idx].max()
s = np.random.RandomState(fs_limit).rand(len(fs_idx)) / ((target_catalog["RHOST_ARCM"][fs_idx] / max_r) ** 0.25)
target_catalog["rank"][fs_idx[s.argsort()[fs_limit:]]] = 0
target_catalog = Query("rank > 0").filter(target_catalog)
if verbose:
print(
"host diameter in deg =",
np.rad2deg(np.arcsin(0.3 / target_catalog["HOST_DIST"][0])) * 2,
)
print("flux star ranked at =", fs_rank)
print("# of guide stars =", is_guide_star.count(target_catalog))
print("# of total targets =", (is_flux_star | is_target).count(target_catalog))
print("# of flux star targets =", is_flux_star.count(target_catalog))
print("# of galaxy targets =", is_target.count(target_catalog))
for rank in range(1, 9):
print(
"# of rank-{} targets =".format(rank),
Query("rank == {}".format(rank)).count(target_catalog),
)
target_catalog["type"] = "TARGET"
target_catalog["type"][is_guide_star.mask(target_catalog)] = "guide"
target_catalog.rename_column("RA", "ra")
target_catalog.rename_column("DEC", "dec")
target_catalog.rename_column("OBJID", "object")
target_catalog.rename_column("r_mag", "mag")
target_catalog.sort(["rank", "TARGETING_SCORE", "mag"])
target_catalog = target_catalog[["ra", "dec", "object", "rank", "type", "mag"]]
if write_to:
if verbose:
print("Writing to {}".format(write_to))
if not write_to.endswith(".cat"):
print("Warning: filename should end with '.cat'")
with open(write_to, "w") as fh:
fh.write("\t".join(target_catalog.colnames) + "\n")
# the MMT format is odd and *requires* "---"'s in the second header line
fh.write("\t".join(("-" * len(s) for s in target_catalog.colnames)) + "\n")
target_catalog.write(
fh,
delimiter="\t",
format="ascii.fast_no_header",
formats={
"ra": lambda x: Angle(x, "deg")
.wrap_at(360 * u.deg) # pylint: disable=no-member
.to_string("hr", sep=":", precision=3), # pylint: disable=E1101
"dec": lambda x: Angle(x, "deg").to_string("deg", sep=":", precision=3),
"mag": "%.2f",
"rank": lambda x: "" if int(x) == 99 else "{:d}".format(int(x)),
},
)
return target_catalog
def prepare_aat_catalog(
target_catalog,
write_to=None,
verbose=True,
flux_star_removal_threshold=20.0 * u.arcsec,
flux_star_r_range=(17, 17.7),
flux_star_gr_range=(0.1, 0.4),
flux_star_max=5,
sky_fiber_void_radius=10.0 * u.arcsec,
sky_fiber_needed=100,
sky_fiber_max=1.1 * u.deg,
sky_fiber_host_rvir_threshold=0.7 * u.deg,
sky_fiber_radial_adjustment=2.0,
targeting_score_threshold=900,
offset_ra=None,
offset_dec=None,
seed=None,
obstime=None,
gaia_catalog=None,
guidestar_max=100,
):
"""
Prepare AAT target catalog.
If the host's radius is less than `sky_fiber_host_rvir_threshold`,
all sky fiber will be distributed between `sky_fiber_max` and host's radius.
Otherwise, first fill the annulus between `sky_fiber_max` and host's radius,
then distribute the rest within the host (but prefer outer region,
as controlled by `sky_fiber_radial_adjustment`)
Format needed:
# TargetName(unique for header) RA(h m s) Dec(d m s) TargetType(Program,Fiducial,Sky) Priority(9 is highest) Magnitude 0 Notes
1237648721248518305 14 42 17.79 -0 12 05.95 P 2 22.03 0 magcol=fiber2mag_r, model_r=20.69
1237648721786045341 14 48 37.16 +0 21 33.81 P 1 21.56 0 magcol=fiber2mag_r, model_r=20.55
"""
# pylint: disable=no-member
if "TARGETING_SCORE" not in target_catalog.colnames:
return KeyError(
'`target_catalog` does not have column "TARGETING_SCORE".'
"Have you run `compile_target_list` or `assign_targeting_score`?"
)
if seed is None:
seed = target_catalog["HOST_PGC"][0]
if not isinstance(flux_star_removal_threshold, u.Quantity):
flux_star_removal_threshold = flux_star_removal_threshold * u.arcsec
if not isinstance(sky_fiber_void_radius, u.Quantity):
sky_fiber_void_radius = sky_fiber_void_radius * u.arcsec
if not isinstance(sky_fiber_max, u.Quantity):
sky_fiber_max = sky_fiber_max * u.deg
if not isinstance(sky_fiber_host_rvir_threshold, u.Quantity):
sky_fiber_host_rvir_threshold = sky_fiber_host_rvir_threshold * u.deg
host_ra = target_catalog["HOST_RA"][0] * u.deg
host_dec = target_catalog["HOST_DEC"][0] * u.deg
host_dist = target_catalog["HOST_DIST"][0]
host_rvir = np.arcsin(0.3 / host_dist) * u.rad
# `host` will be used during file writing
host = dict(HOSTID=target_catalog["HOSTID"][0], coord=SkyCoord(host_ra, host_dec))
annulus_actual = sky_fiber_max ** 2.0 - host_rvir ** 2.0
annulus_wanted = sky_fiber_max ** 2.0 - sky_fiber_host_rvir_threshold ** 2.0
if annulus_actual < 0:
raise ValueError("`sky_fiber_max` too small, this host is larger than that!")
if annulus_wanted < 0:
raise ValueError("`sky_fiber_max` must be larger than `sky_fiber_host_rvir_threshold`!")
def _gen_dist_rand(seed_this, size):
U = np.random.RandomState(seed_this).rand(size)
return np.sqrt(U * annulus_actual + host_rvir ** 2.0)
if annulus_actual < annulus_wanted:
def gen_dist_rand(seed_this, size):
size_out = int(np.around(size * annulus_actual / annulus_wanted))
size_in = size - size_out
dist_rand_out = _gen_dist_rand(seed_this, size_out)
index = 1.0 / (sky_fiber_radial_adjustment + 2.0)
dist_rand_in = (np.random.RandomState(seed_this + 1).rand(size_in) ** index) * host_rvir
return np.concatenate([dist_rand_out.to_value("deg"), dist_rand_in.to_value("deg")]) * u.deg
else:
gen_dist_rand = _gen_dist_rand
n_needed = sky_fiber_needed
ra_sky = []
dec_sky = []
target_catalog = add_skycoord(target_catalog)
while n_needed > 0:
n_rand = int(np.ceil(n_needed * 1.1))
dist_rand = gen_dist_rand(seed, n_rand)
theta_rand = np.random.RandomState(seed + 1).rand(n_rand) * (2.0 * np.pi)
ra_rand = np.remainder(host_ra + dist_rand * np.cos(theta_rand), 360.0 * u.deg)
dec_rand = host_dec + dist_rand * np.sin(theta_rand)
ok_mask = (dec_rand >= -90.0 * u.deg) & (dec_rand <= 90.0 * u.deg)
ra_rand = ra_rand[ok_mask]
dec_rand = dec_rand[ok_mask]
sky_sc = SkyCoord(ra_rand, dec_rand)
sep = sky_sc.match_to_catalog_sky(target_catalog["coord"])[1]
ok_mask = sep > sky_fiber_void_radius
n_needed -= np.count_nonzero(ok_mask)
ra_sky.append(ra_rand[ok_mask].to_value("deg"))
dec_sky.append(dec_rand[ok_mask].to_value("deg"))
seed += np.random.RandomState(seed + 2).randint(100, 200)
del ra_rand, dec_rand, sky_sc, sep, ok_mask
ra_sky = np.concatenate(ra_sky)[:sky_fiber_needed]
dec_sky = np.concatenate(dec_sky)[:sky_fiber_needed]
gaia_guidestars = None
if gaia_catalog is not None:
gaia_guidestars = get_gaia_guidestars(object_catalog=target_catalog, gaia_catalog=gaia_catalog)
if len(gaia_guidestars) > guidestar_max:
idx = np.random.RandomState(seed + 3).choice(len(gaia_guidestars), guidestar_max, False)
gaia_guidestars = gaia_guidestars[idx]
is_target = Query("TARGETING_SCORE >= 0", "TARGETING_SCORE < {}".format(targeting_score_threshold))
is_star = Query(~Query("is_galaxy"), "REMOVE == 0")
if "morphology_info_sdss" in target_catalog.colnames:
is_star &= Query("morphology_info_sdss == 6")
if "morphology_info_des" in target_catalog.colnames:
is_star &= Query("morphology_info_des == 0")
is_flux_star = Query(
is_star,
"r_mag >= {}".format(flux_star_r_range[0]),
"r_mag < {}".format(flux_star_r_range[1]),
)
is_flux_star &= Query(
"gr >= {}".format(flux_star_gr_range[0]),
"gr < {}".format(flux_star_gr_range[1]),
)
target_catalog = (is_target | is_flux_star).filter(target_catalog)
target_catalog["Priority"] = target_catalog["TARGETING_SCORE"] // 100
target_catalog["Priority"][Query("Priority < 1").mask(target_catalog)] = 1
target_catalog["Priority"][Query("Priority > 8").mask(target_catalog)] = 8
target_catalog["Priority"] = 9 - target_catalog["Priority"]
target_catalog["Priority"][(~is_target).mask(target_catalog)] = 0
flux_star_sc = is_flux_star.filter(target_catalog, "coord")
target_sc = is_target.filter(target_catalog, "coord")
sep = flux_star_sc.match_to_catalog_sky(target_sc)[1]
flux_star_indices = np.flatnonzero(is_flux_star.mask(target_catalog))
flux_star_indices = flux_star_indices[sep > flux_star_removal_threshold]
if len(flux_star_indices) > flux_star_max:
flux_star_indices = np.random.RandomState(seed + 4).choice(flux_star_indices, flux_star_max, False)
target_catalog["Priority"][flux_star_indices] = 9
target_catalog = Query("Priority > 0").filter(target_catalog)
n_flux_star = Query("Priority == 9").count(target_catalog)
del flux_star_indices, flux_star_sc, target_sc, sep, target_catalog["coord"]
target_catalog["TargetType"] = "P"
target_catalog["0"] = 0
target_catalog["Notes"] = "targets"
target_catalog["Notes"][is_flux_star.mask(target_catalog)] = "flux"
target_catalog.rename_column("DEC", "Dec")
target_catalog.rename_column("OBJID", "TargetName")
target_catalog.rename_column("r_mag", "Magnitude")
target_catalog.sort(["TARGETING_SCORE", "Magnitude"])
target_catalog = target_catalog[["TargetName", "RA", "Dec", "TargetType", "Priority", "Magnitude", "0", "Notes"]]
sky_catalog = Table(
{
"TargetName": np.arange(len(ra_sky)),
"RA": ra_sky,
"Dec": dec_sky,
"TargetType": np.repeat("S", len(ra_sky)),
"Priority": np.repeat(9, len(ra_sky)),
"Magnitude": np.repeat(99.0, len(ra_sky)),
"0": np.repeat(0, len(ra_sky)),
"Notes": np.repeat("sky", len(ra_sky)),
}
)
to_stack = [target_catalog, sky_catalog]
if gaia_guidestars is not None:
to_stack.append(gaia_guidestars)
target_catalog = vstack(to_stack)
del to_stack
if offset_ra:
target_catalog["RA"] -= float(offset_ra)
if offset_dec:
target_catalog["Dec"] -= float(offset_dec)
if verbose:
print("# of flux stars =", n_flux_star)
print("# of sky fibers =", len(sky_catalog))
if gaia_guidestars is not None:
print("# of guide stars =", len(gaia_guidestars))
for rank in range(1, 10):
print(
"# of Priority={} targets =".format(rank),
Query("Priority == {}".format(rank)).count(target_catalog),
)
if write_to:
if verbose:
print("Writing to {}".format(write_to))
if obstime is None:
obstime = Time("2020-01-01")
write_fld_file(target_catalog, host, obstime, write_to)
return target_catalog
|
|
# Copyright (c) 2016, Intel Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from osc_lib.command import command
from osc_lib import exceptions
from osc_lib import utils
from openstackclient.i18n import _
from openstackclient.identity import common as identity_common
from openstackclient.network import common
LOG = logging.getLogger(__name__)
def _get_columns(item):
column_map = {
'is_shared': 'shared',
}
hidden_columns = ['location']
return utils.get_osc_show_columns_for_sdk_resource(
item,
column_map,
hidden_columns
)
def _get_attrs(client_manager, parsed_args):
attrs = {}
if 'name' in parsed_args and parsed_args.name is not None:
attrs['name'] = parsed_args.name
if 'description' in parsed_args and parsed_args.description is not None:
attrs['description'] = parsed_args.description
if parsed_args.share:
attrs['shared'] = True
if parsed_args.no_share:
attrs['shared'] = False
# NOTE(ralonsoh): 'default' and 'no_default' parameters are defined only in
# create and set commands context only.
if 'default' in parsed_args and parsed_args.default:
attrs['is_default'] = True
if 'no_default' in parsed_args and parsed_args.no_default:
attrs['is_default'] = False
# NOTE(ralonsoh): 'project' parameter is defined only in create and list
# commands context only.
if 'project' in parsed_args and parsed_args.project is not None:
identity_client = client_manager.identity
project_id = identity_common.find_project(
identity_client,
parsed_args.project,
parsed_args.project_domain,
).id
attrs['project_id'] = project_id
return attrs
# TODO(abhiraut): Use the SDK resource mapped attribute names once the
# OSC minimum requirements include SDK 1.0.
class CreateNetworkQosPolicy(command.ShowOne,
common.NeutronCommandWithExtraArgs):
_description = _("Create a QoS policy")
def get_parser(self, prog_name):
parser = super(CreateNetworkQosPolicy, self).get_parser(prog_name)
parser.add_argument(
'name',
metavar='<name>',
help=_("Name of QoS policy to create")
)
parser.add_argument(
'--description',
metavar='<description>',
help=_("Description of the QoS policy")
)
share_group = parser.add_mutually_exclusive_group()
share_group.add_argument(
'--share',
action='store_true',
default=None,
help=_("Make the QoS policy accessible by other projects")
)
share_group.add_argument(
'--no-share',
action='store_true',
help=_("Make the QoS policy not accessible by other projects "
"(default)")
)
parser.add_argument(
'--project',
metavar='<project>',
help=_("Owner's project (name or ID)")
)
identity_common.add_project_domain_option_to_parser(parser)
default_group = parser.add_mutually_exclusive_group()
default_group.add_argument(
'--default',
action='store_true',
help=_("Set this as a default network QoS policy"),
)
default_group.add_argument(
'--no-default',
action='store_true',
help=_("Set this as a non-default network QoS policy"),
)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.network
attrs = _get_attrs(self.app.client_manager, parsed_args)
attrs.update(
self._parse_extra_properties(parsed_args.extra_properties))
obj = client.create_qos_policy(**attrs)
display_columns, columns = _get_columns(obj)
data = utils.get_item_properties(obj, columns, formatters={})
return (display_columns, data)
class DeleteNetworkQosPolicy(command.Command):
_description = _("Delete Qos Policy(s)")
def get_parser(self, prog_name):
parser = super(DeleteNetworkQosPolicy, self).get_parser(prog_name)
parser.add_argument(
'policy',
metavar="<qos-policy>",
nargs="+",
help=_("QoS policy(s) to delete (name or ID)")
)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.network
result = 0
for policy in parsed_args.policy:
try:
obj = client.find_qos_policy(policy, ignore_missing=False)
client.delete_qos_policy(obj)
except Exception as e:
result += 1
LOG.error(_("Failed to delete QoS policy "
"name or ID '%(qos_policy)s': %(e)s"),
{'qos_policy': policy, 'e': e})
if result > 0:
total = len(parsed_args.policy)
msg = (_("%(result)s of %(total)s QoS policies failed "
"to delete.") % {'result': result, 'total': total})
raise exceptions.CommandError(msg)
# TODO(abhiraut): Use only the SDK resource mapped attribute names once the
# OSC minimum requirements include SDK 1.0.
class ListNetworkQosPolicy(command.Lister):
_description = _("List QoS policies")
def get_parser(self, prog_name):
parser = super(ListNetworkQosPolicy, self).get_parser(prog_name)
parser.add_argument(
'--project',
metavar='<project>',
help=_("List qos policies according to their project (name or ID)")
)
identity_common.add_project_domain_option_to_parser(parser)
shared_group = parser.add_mutually_exclusive_group()
shared_group.add_argument(
'--share',
action='store_true',
help=_("List qos policies shared between projects")
)
shared_group.add_argument(
'--no-share',
action='store_true',
help=_("List qos policies not shared between projects")
)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.network
columns = (
'id',
'name',
'is_shared',
'is_default',
'project_id',
)
column_headers = (
'ID',
'Name',
'Shared',
'Default',
'Project',
)
attrs = _get_attrs(self.app.client_manager, parsed_args)
data = client.qos_policies(**attrs)
return (column_headers,
(utils.get_item_properties(
s, columns, formatters={},
) for s in data))
# TODO(abhiraut): Use the SDK resource mapped attribute names once the
# OSC minimum requirements include SDK 1.0.
class SetNetworkQosPolicy(common.NeutronCommandWithExtraArgs):
_description = _("Set QoS policy properties")
def get_parser(self, prog_name):
parser = super(SetNetworkQosPolicy, self).get_parser(prog_name)
parser.add_argument(
'policy',
metavar="<qos-policy>",
help=_("QoS policy to modify (name or ID)")
)
parser.add_argument(
'--name',
metavar="<name>",
help=_('Set QoS policy name')
)
parser.add_argument(
'--description',
metavar='<description>',
help=_("Description of the QoS policy")
)
enable_group = parser.add_mutually_exclusive_group()
enable_group.add_argument(
'--share',
action='store_true',
help=_('Make the QoS policy accessible by other projects'),
)
enable_group.add_argument(
'--no-share',
action='store_true',
help=_('Make the QoS policy not accessible by other projects'),
)
default_group = parser.add_mutually_exclusive_group()
default_group.add_argument(
'--default',
action='store_true',
help=_("Set this as a default network QoS policy"),
)
default_group.add_argument(
'--no-default',
action='store_true',
help=_("Set this as a non-default network QoS policy"),
)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.network
obj = client.find_qos_policy(
parsed_args.policy,
ignore_missing=False)
attrs = _get_attrs(self.app.client_manager, parsed_args)
attrs.update(
self._parse_extra_properties(parsed_args.extra_properties))
client.update_qos_policy(obj, **attrs)
class ShowNetworkQosPolicy(command.ShowOne):
_description = _("Display QoS policy details")
def get_parser(self, prog_name):
parser = super(ShowNetworkQosPolicy, self).get_parser(prog_name)
parser.add_argument(
'policy',
metavar="<qos-policy>",
help=_("QoS policy to display (name or ID)")
)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.network
obj = client.find_qos_policy(parsed_args.policy,
ignore_missing=False)
display_columns, columns = _get_columns(obj)
data = utils.get_item_properties(obj, columns)
return (display_columns, data)
|
|
# -*- coding: utf-8 -*-
import gsxws
import phonenumbers
from django import forms
from datetime import date
from django.conf import settings
from django_countries import countries
from django.core.validators import RegexValidator
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from django.forms import SelectDateWidget
from servo.validators import (apple_sn_validator,
phone_validator,
file_upload_validator,)
from servo.forms.base import SearchFieldInput
from servo.models import (Configuration, Device,
Attachment, Location,
Customer, Queue, User,)
# Generate list of years for purchase date picker
y = date.today().year
YEARS = [x + 1 for x in range(y - 10, y)]
def get_checkin_locations(user):
"""
Return possible checkin location choices for this user.
"""
if user.is_authenticated:
return user.locations.enabled()
else:
return User.get_checkin_user().locations.enabled()
class ConfirmationForm(forms.Form):
confirm = forms.BooleanField(required=False)
class DeviceForm(forms.ModelForm):
"""
Form for entering devices in the /checkin view
"""
required_css_class = 'required'
accessories = forms.CharField(
required=False,
label=_('Accessories'),
widget=forms.Textarea(attrs={'class': 'span12', 'rows': 3}),
help_text=_("Please list here any accessories you'd like to check in with your device (cables, power adapters, bags, etc)")
)
pop = forms.FileField(
required=False,
label=_('Proof of Purchase'),
validators=[file_upload_validator],
help_text=_('Proof of Purchase is required when setting purchase date manually')
)
condition = forms.CharField(
required=False,
label=_('Condition of device'),
widget=forms.Textarea(attrs={'class': 'span12', 'rows': 3}),
help_text=_('Please describe the condition of the device. Will be shown on the print-out.')
)
queue = forms.ModelChoiceField(
label=_('Queue'),
required=False,
queryset=Queue.objects.all(),
help_text=_('Assign order to this queue')
)
class Meta:
model = Device
fields = (
'description',
'sn',
'imei',
'purchased_on',
'purchase_country',
'username',
'password',
)
widgets = {
'sn': SearchFieldInput(),
'password': forms.PasswordInput(),
'username': forms.TextInput(),
'purchased_on': SelectDateWidget(years=YEARS),
'warranty_status': forms.Select(attrs={'readonly': 'readonly'}),
}
def __init__(self, *args, **kwargs):
super(DeviceForm, self).__init__(*args, **kwargs)
if Configuration.false('checkin_require_password'):
self.fields['password'].required = False
if Configuration.true('checkin_require_condition'):
self.fields['condition'].required = True
if kwargs.get('instance'):
prod = gsxws.Product('')
prod.description = self.instance.description
if prod.is_ios:
self.fields['password'].label = _('Passcode')
if not prod.is_ios:
del(self.fields['imei'])
if not prod.is_mac:
del(self.fields['username'])
if Configuration.true('checkin_password'):
self.fields['password'].widget = forms.TextInput(attrs={'class': 'span12'})
class CustomerForm(forms.Form):
"""
Form for entering customer info in /checkin
Not using a ModelForm for a reason.
"""
required_css_class = 'required'
fname = forms.CharField(label=_('First name'))
lname = forms.CharField(label=_('Last name'))
company = forms.CharField(
required=False,
label=_('Company (optional)')
)
email = forms.EmailField(
label=_('Email address'),
widget=forms.TextInput(attrs={'class': 'span12'})
)
phone = forms.CharField(label=_('Phone number'))
address = forms.CharField(label=_('Address'))
country = forms.ChoiceField(
label=_('Country'),
choices=Customer.COUNTRY_CHOICES,
initial=settings.INSTALL_COUNTRY.upper()
)
city = forms.CharField(label=_('City'))
postal_code = forms.CharField(label=_('Postal Code'))
checkin_location = forms.ModelChoiceField(
empty_label=None,
label=_(u'Check-in location'),
queryset=Location.objects.enabled(),
widget=forms.Select(attrs={'class': 'span12'}),
help_text=_('Choose where you want to leave the device')
)
checkout_location = forms.ModelChoiceField(
empty_label=None,
label=_(u'Check-out location'),
queryset=Location.objects.enabled(),
widget=forms.Select(attrs={'class': 'span12'}),
help_text=_('Choose where you want to pick up the device')
)
TERMS = _('I agree to the <a href="/checkin/terms/" target="_blank">terms of service.</a>')
agree_to_terms = forms.BooleanField(initial=False, label=mark_safe(TERMS))
notify_by_sms = forms.BooleanField(
initial=True,
required=False,
label=_('Notify by SMS')
)
notify_by_email = forms.BooleanField(
initial=True,
required=False,
label=_('Notify by Email')
)
def __init__(self, request, *args, **kwargs):
super(CustomerForm, self).__init__(*args, **kwargs)
location = request.session['checkin_location']
locations = get_checkin_locations(request.user)
self.show_location_picker = len(locations) > 1
self.fields['checkin_location'].initial = location
self.fields['checkout_location'].initial = location
if self.show_location_picker:
self.fields['checkin_location'].queryset = locations
self.fields['checkout_location'].queryset = locations
else:
self.fields['checkin_location'].widget = forms.HiddenInput()
self.fields['checkout_location'].widget = forms.HiddenInput()
if request.user.is_authenticated:
del(self.fields['agree_to_terms'])
self.fields['phone'].widget = SearchFieldInput()
def clean(self):
cd = super(CustomerForm, self).clean()
phone = cd.get('phone')
country = cd.get('country')
if len(phone) < 1:
return cd
try:
phonenumbers.parse(phone, country)
except phonenumbers.NumberParseException as e:
msg = _('Enter a valid phone number')
self._errors["phone"] = self.error_class([msg])
return cd
def clean_fname(self):
v = self.cleaned_data.get('fname')
return v.capitalize()
def clean_lname(self):
lname = self.cleaned_data.get('lname')
return lname.capitalize()
class AppleSerialNumberForm(forms.Form):
sn = forms.CharField(
min_length=8,
validators=[apple_sn_validator],
label=_(u'Serial number or IMEI')
)
def clean_sn(self):
sn = self.cleaned_data.get('sn')
return sn.upper()
class SerialNumberForm(forms.Form):
sn = forms.CharField(
min_length=8,
label=_(u'Serial number')
)
def clean_sn(self):
sn = self.cleaned_data.get('sn')
return sn.upper()
class StatusCheckForm(forms.Form):
code = forms.CharField(
min_length=8,
label=_('Service Order'),
validators=[RegexValidator(regex=r'\d{8}', message=_('Invalid Service Order number'))]
)
class IssueForm(forms.Form):
required_css_class = 'required'
issue_description = forms.CharField(
min_length=10,
label=_(u'Problem description'),
help_text=_('Will appear on the print-out'),
widget=forms.Textarea(attrs={'class': 'span12'})
)
attachment = forms.FileField(
required=False,
label=_('Attachment'),
validators=[file_upload_validator],
help_text=_('Please use this to attach relevant documents')
)
notes = forms.CharField(
required=False,
label=_(u'Notes for technician'),
widget=forms.Textarea(attrs={'class': 'span12'}),
help_text=_('Will not appear on the print-out')
)
class QuestionForm(forms.Form):
question = forms.CharField(widget=forms.HiddenInput)
answer = forms.CharField(widget=forms.HiddenInput)
class AttachmentForm(forms.ModelForm):
class Meta:
model = Attachment
exclude = []
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, Steve Gargan <steve.gargan@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
module: consul_session
short_description: "manipulate consul sessions"
description:
- allows the addition, modification and deletion of sessions in a consul
cluster. These sessions can then be used in conjunction with key value pairs
to implement distributed locks. In depth documentation for working with
sessions can be found here http://www.consul.io/docs/internals/sessions.html
requirements:
- "python >= 2.6"
- python-consul
- requests
version_added: "2.0"
author: "Steve Gargan @sgargan"
options:
state:
description:
- whether the session should be present i.e. created if it doesn't
exist, or absent, removed if present. If created, the ID for the
session is returned in the output. If absent, the name or ID is
required to remove the session. Info for a single session, all the
sessions for a node or all available sessions can be retrieved by
specifying info, node or list for the state; for node or info, the
node name or session id is required as parameter.
required: false
choices: ['present', 'absent', 'info', 'node', 'list']
default: present
name:
description:
- the name that should be associated with the session. This is opaque
to Consul and not required.
required: false
default: None
delay:
description:
- the optional lock delay that can be attached to the session when it
is created. Locks for invalidated sessions ar blocked from being
acquired until this delay has expired. Durations are in seconds
default: 15
required: false
node:
description:
- the name of the node that with which the session will be associated.
by default this is the name of the agent.
required: false
default: None
datacenter:
description:
- name of the datacenter in which the session exists or should be
created.
required: false
default: None
checks:
description:
- a list of checks that will be used to verify the session health. If
all the checks fail, the session will be invalidated and any locks
associated with the session will be release and can be acquired once
the associated lock delay has expired.
required: false
default: None
host:
description:
- host of the consul agent defaults to localhost
required: false
default: localhost
port:
description:
- the port on which the consul agent is running
required: false
default: 8500
scheme:
description:
- the protocol scheme on which the consul agent is running
required: false
default: http
version_added: "2.1"
validate_certs:
description:
- whether to verify the tls certificate of the consul agent
required: false
default: True
version_added: "2.1"
behavior:
description:
- the optional behavior that can be attached to the session when it
is created. This can be set to either 'release' or 'delete'. This
controls the behavior when a session is invalidated.
default: release
required: false
version_added: "2.2"
"""
EXAMPLES = '''
- name: register basic session with consul
consul_session:
name: session1
- name: register a session with an existing check
consul_session:
name: session_with_check
checks:
- existing_check_name
- name: register a session with lock_delay
consul_session:
name: session_with_delay
delay: 20s
- name: retrieve info about session by id
consul_session: id=session_id state=info
- name: retrieve active sessions
consul_session: state=list
'''
try:
import consul
from requests.exceptions import ConnectionError
python_consul_installed = True
except ImportError:
python_consul_installed = False
from ansible.module_utils.basic import AnsibleModule
def execute(module):
state = module.params.get('state')
if state in ['info', 'list', 'node']:
lookup_sessions(module)
elif state == 'present':
update_session(module)
else:
remove_session(module)
def lookup_sessions(module):
datacenter = module.params.get('datacenter')
state = module.params.get('state')
consul_client = get_consul_api(module)
try:
if state == 'list':
sessions_list = consul_client.session.list(dc=datacenter)
#ditch the index, this can be grabbed from the results
if sessions_list and sessions_list[1]:
sessions_list = sessions_list[1]
module.exit_json(changed=True,
sessions=sessions_list)
elif state == 'node':
node = module.params.get('node')
if not node:
module.fail_json(
msg="node name is required to retrieve sessions for node")
sessions = consul_client.session.node(node, dc=datacenter)
module.exit_json(changed=True,
node=node,
sessions=sessions)
elif state == 'info':
session_id = module.params.get('id')
if not session_id:
module.fail_json(
msg="session_id is required to retrieve indvidual session info")
session_by_id = consul_client.session.info(session_id, dc=datacenter)
module.exit_json(changed=True,
session_id=session_id,
sessions=session_by_id)
except Exception as e:
module.fail_json(msg="Could not retrieve session info %s" % e)
def update_session(module):
name = module.params.get('name')
delay = module.params.get('delay')
checks = module.params.get('checks')
datacenter = module.params.get('datacenter')
node = module.params.get('node')
behavior = module.params.get('behavior')
consul_client = get_consul_api(module)
try:
session = consul_client.session.create(
name=name,
behavior=behavior,
node=node,
lock_delay=delay,
dc=datacenter,
checks=checks
)
module.exit_json(changed=True,
session_id=session,
name=name,
behavior=behavior,
delay=delay,
checks=checks,
node=node)
except Exception as e:
module.fail_json(msg="Could not create/update session %s" % e)
def remove_session(module):
session_id = module.params.get('id')
if not session_id:
module.fail_json(msg="""A session id must be supplied in order to
remove a session.""")
consul_client = get_consul_api(module)
try:
consul_client.session.destroy(session_id)
module.exit_json(changed=True,
session_id=session_id)
except Exception as e:
module.fail_json(msg="Could not remove session with id '%s' %s" % (
session_id, e))
def get_consul_api(module):
return consul.Consul(host=module.params.get('host'),
port=module.params.get('port'))
def test_dependencies(module):
if not python_consul_installed:
module.fail_json(msg="python-consul required for this module. "\
"see http://python-consul.readthedocs.org/en/latest/#installation")
def main():
argument_spec = dict(
checks=dict(default=None, required=False, type='list'),
delay=dict(required=False,type='int', default='15'),
behavior=dict(required=False,type='str', default='release',
choices=['release', 'delete']),
host=dict(default='localhost'),
port=dict(default=8500, type='int'),
scheme=dict(required=False, default='http'),
validate_certs=dict(required=False, default=True),
id=dict(required=False),
name=dict(required=False),
node=dict(required=False),
state=dict(default='present',
choices=['present', 'absent', 'info', 'node', 'list']),
datacenter=dict(required=False)
)
module = AnsibleModule(argument_spec, supports_check_mode=False)
test_dependencies(module)
try:
execute(module)
except ConnectionError as e:
module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (
module.params.get('host'), module.params.get('port'), str(e)))
except Exception as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python
import numpy as np
import os
import shutil
import wx
import matplotlib
matplotlib.use('WXAgg')
from datetime import datetime
from matplotlib.figure import Figure
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigCanvas
from utils import take_screenshot, XboxController
SAMPLE_RATE = 200
class MainWindow(wx.Frame):
""" Main frame of the application
"""
title = 'Data Acquisition'
def __init__(self):
wx.Frame.__init__(self, None, title=self.title, size=(660,330))
# Init controller
self.controller = XboxController()
# Create GUI
self.create_main_panel()
# Timer
self.timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.on_timer, self.timer)
self.rate = SAMPLE_RATE
self.timer.Start(self.rate)
self.recording = False
self.t = 0
def create_main_panel(self):
# Panels
self.img_panel = wx.Panel(self)
self.joy_panel = wx.Panel(self)
self.record_panel = wx.Panel(self)
# Images
img = wx.Image(320,240)
self.image_widget = wx.StaticBitmap(self.img_panel, wx.ID_ANY, wx.Bitmap(img))
# Joystick
self.init_plot()
self.PlotCanvas = FigCanvas(self.joy_panel, wx.ID_ANY, self.fig)
# Recording
self.txt_outputDir = wx.TextCtrl(self.record_panel, wx.ID_ANY, pos=(5,0), size=(320,30))
uid = datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
self.txt_outputDir.ChangeValue("samples/train/" + uid)
self.btn_record = wx.Button(self.record_panel, wx.ID_ANY, label="Record", pos=(335,0), size=(100,30))
self.Bind(wx.EVT_BUTTON, self.on_btn_record, self.btn_record)
self.Bind(wx.EVT_UPDATE_UI, self.on_update_btn_record, self.btn_record)
# sizers
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(self.img_panel, 0, wx.ALL, 5)
sizer.Add(self.joy_panel, 0, wx.ALL, 5)
mainSizer_v = wx.BoxSizer(wx.VERTICAL)
mainSizer_v.Add(sizer, 0 , wx.ALL, 5)
mainSizer_v.Add(self.record_panel, 0 , wx.ALL, 5)
# finalize layout
self.SetAutoLayout(True)
self.SetSizer(mainSizer_v)
self.Layout()
def init_plot(self):
self.plotMem = 50 # how much data to keep on the plot
self.plotData = [[0] * (5)] * self.plotMem # mem storage for plot
self.fig = Figure((4,3))
self.axes = self.fig.add_subplot(111)
def on_timer(self, event):
self.poll()
# stop drawing if recording to avoid slow downs
if self.recording == False:
self.draw()
def poll(self):
self.bmp = take_screenshot()
self.controller_data = self.controller.read()
self.update_plot()
if self.recording == True:
self.save_data()
def update_plot(self):
self.plotData.append(self.controller_data) # adds to the end of the list
self.plotData.pop(0) # remove the first item in the list, ie the oldest
def save_data(self):
image_file = self.outputDir+'/'+'img_'+str(self.t)+'.png'
self.bmp.SaveFile(image_file, wx.BITMAP_TYPE_PNG)
# make / open outfile
outfile = open(self.outputDir+'/'+'data.csv', 'a')
# write line
outfile.write( image_file + ',' + ','.join(map(str, self.controller_data)) + '\n' )
outfile.close()
self.t += 1
def draw(self):
# Image
img = self.bmp.ConvertToImage()
img = img.Rescale(320,240)
self.image_widget.SetBitmap( img.ConvertToBitmap() )
# Joystick
x = np.asarray(self.plotData)
self.axes.plot(range(0,self.plotMem), x[:,0], 'r')
self.axes.hold(True)
self.axes.plot(range(0,self.plotMem), x[:,1], 'b')
self.axes.plot(range(0,self.plotMem), x[:,2], 'g')
self.axes.plot(range(0,self.plotMem), x[:,3], 'k')
self.axes.plot(range(0,self.plotMem), x[:,4], 'y')
self.axes.hold(False)
self.PlotCanvas.draw()
def on_update_btn_record(self, event):
label = "Stop" if self.recording else "Record"
self.btn_record.SetLabel(label)
def on_btn_record(self, event):
# pause timer
self.timer.Stop()
# switch state
self.recording = not self.recording
if self.recording:
self.start_recording()
# un pause timer
self.timer.Start(self.rate)
def start_recording(self):
# check that a dir has been specified
if self.txt_outputDir.IsEmpty():
msg = wx.MessageDialog(self, 'Specify the Output Directory', 'Error', wx.OK | wx.ICON_ERROR)
msg.ShowModal() == wx.ID_YES
msg.Destroy()
self.recording = False
else: # a directory was specified
self.outputDir = self.txt_outputDir.GetValue()
self.t = 0
# check if path exists - ie may be saving over data
if os.path.exists(self.outputDir):
msg = wx.MessageDialog(self, 'Output Directory Exists - Overwrite Data?', 'Yes or No', wx.YES_NO | wx.ICON_QUESTION)
result = msg.ShowModal() == wx.ID_YES
msg.Destroy()
# overwrite the data
if result == True:
# delete the dir
shutil.rmtree(self.outputDir)
# re-make dir
os.mkdir(self.outputDir)
# do not overwrite the data
else: # result == False
self.recording = False
self.txt_outputDir.SetFocus()
# no directory so make one
else:
os.mkdir(self.outputDir)
def on_exit(self, event):
self.Destroy()
if __name__ == '__main__':
app = wx.App()
app.frame = MainWindow()
app.frame.Show()
app.MainLoop()
|
|
#!/usr/bin/python
import sys
from random import shuffle, seed
from itertools import product
class Card:
FACES = {11: 'Jack', 12: 'Queen', 13: 'King', 14: 'Ace'}
SUITS = {'Hearts': 1, 'Diamonds': 2, 'Spades': 3, 'Clubs': 4}
COLORS = {'Hearts': 0, 'Diamonds': 0, 'Spades': 1, 'Clubs': 1}
def __init__(self, rank, suit):
self.suit = suit
self.rank = rank
self.suit_rank = self.SUITS[suit]
self.sort_rank = self.rank
if rank == 5:
if self.suit_rank == 1 or self.suit_rank == 3:
self.sort_rank = 100
else:
self.sort_rank = 0
def __str__(self):
value = self.FACES.get(self.rank, self.rank)
return "{0} of {1}".format(value, self.suit)
def __repr__(self):
return str(self)
def check_trump(self, trump_suit):
if self.rank != 5:
if self.suit == trump_suit:
return True
else:
return False
else:
if self.COLORS[self.suit] == self.COLORS[trump_suit]:
return True
else:
return False
class Deck:
def __init__(self):
ranks = range(2, 15)
suits = 'Spades Diamonds Clubs Hearts'.split()
self.cards = [Card(r, s) for s, r in product(suits, ranks)]
def __str__(self):
s = ''
for i in range(len(self.cards)):
s = s + ' ' * i + str(self.cards[i]) + '\n'
return s
def __repr__(self):
pass
def shuffle(self):
shuffle(self.cards)
def deal(self, hand, num_cards=1):
for i in range(num_cards):
hand.add(self.cards.pop())
class Hand:
def __init__(self):
self.cards = []
def clear_hand(self):
self.cards = []
def discard(self, trump_suit):
self.cards = [x for x in self.cards if x.check_trump(trump_suit)]
def sort_hand(self):
self.cards = sorted(
self.cards, key=lambda x: (
x.suit_rank, x.sort_rank))
def play(self, card):
return self.cards.pop(card - 1)
def add(self, card):
self.cards.append(card)
def __str__(self):
s = ''
for i in range(len(self.cards)):
s = s + ' ' + str(i + 1) + ':' + ' ' * (i + 1) + \
str(self.cards[i]) + '\n'
return s
def __repr__(self):
return str(self)
class Pedro_game:
def __init__(self, players):
self.players = players
self.trump_suit = None
def deal_round(self, first_bidder):
self.deck = Deck()
self.deck.shuffle()
order = [i for i in range(first_bidder, 4)] + \
[i for i in range(first_bidder)]
for player in self.players:
player.clear_hand()
for i in range(3):
for j in order:
self.deck.deal(self.players[j], 3)
for i in order:
self.players[i].sort_hand()
def bidding(self, first_bidder):
current_bid = 5
winning_bidder = -1
order = [i for i in range(first_bidder, 4)] + \
[i for i in range(first_bidder)]
for i, j in enumerate(order):
print self.players[j]
if current_bid < 14:
bid = int(raw_input('Bid?\n'))
if bid > current_bid:
current_bid = bid
winning_bidder = j
else:
bid = int(raw_input('Bid?\n'))
if bid == 14 and i == 3:
current_bid = bid
winning_bidder = j
print current_bid
print winning_bidder
self.winning_bidder = winning_bidder
print self.players[winning_bidder]
self.trump_suit = raw_input('Trump suit?\n')
def second_deal(self, first_bidder):
order = [i for i in range(first_bidder, 4)] + \
[i for i in range(first_bidder)]
for i, j in enumerate(order):
self.players[j].discard(self.trump_suit)
take = 6 - len(self.players[j].cards)
if take > 0:
self.deck.deal(self.players[j], take)
self.players[j].sort_hand()
def play_trick(self, lead):
trick = Trick(self.trump_suit)
order = [i for i in range(lead, 4)] + [i for i in range(lead)]
for i, j in enumerate(order):
print self.players[j]
card_number = int(raw_input('Play Card?\n'))
card = self.players[j].play(card_number)
trick.add(card)
print trick
class Trick:
def __init__(self, trump_suit, lead_card):
self.cards = [lead_card]
self.trump_suit = trump_suit
def add(self, card):
self.cards.append(card)
def __str__(self):
s = ''
for i in range(len(self.cards)):
s = s + ' ' + str(i + 1) + ':' + ' ' * (i + 1) + \
str(self.cards[i]) + '\n'
return s
def __repr__(self):
return str(self)
class Pedro_Player(object):
def __init__(self, name):
self.name = name
self.hand = Hand()
def bid(self, min_bid):
if min_bid > 14:
return False
else:
if min_bid > 5:
ask = 'Current Bid: ' + min_bid - 1 + '\n'
else:
ask = 'Minimum Bid: 6\n'
ask += ' ' + self.name + ': Bid?\n'
invalid_bid = True
while invalid_bid:
try:
bid = int(raw_input(ask))
if bid > min_bid and bid < 14:
return bid
else:
msg = 'Must be greater than ' + str(min_bid)
msg += ' and less than 14'
print msg
except ValueError:
print 'Please insert integer'
def discard(self, trump):
pass
def play_card(self, card):
pass
class Dealer(Pedro_Player):
def __init__(self, pedro_player):
"""
Dealer classes intialized
by a Pedro_Player instance
"""
self.name = pedro_player.name
self.hand = pedro_player.hand
def bid(self, current_bid):
return bid
jacob = Pedro_Player('Jacob')
jacob.bid(5)
sys.exit()
# to do dealer rotation
players = ['a', 'b', 'c', 'd']
print players
dealer = players.pop()
players.insert(0, dealer)
print players
seed(1)
# initialize the players
# Jacob=Hand('Jacob')
# Brigette=Hand('Brigette')
# David=Hand('David')
# Richard=Hand('Richard')
#
# players=[Jacob,Brigette,David,Richard]
# game=Pedro_game(players)
# game.deal_round(0)
# game.bidding(0)
# game.second_deal(0)
# game.play_trick(game.winning_bidder)
#
|
|
"""Support for departure information for Rhein-Main public transport."""
import asyncio
from datetime import timedelta
import logging
from RMVtransport import RMVtransport
from RMVtransport.rmvtransport import RMVtransportApiConnectionError
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import ATTR_ATTRIBUTION, CONF_NAME, TIME_MINUTES
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
CONF_NEXT_DEPARTURE = "next_departure"
CONF_STATION = "station"
CONF_DESTINATIONS = "destinations"
CONF_DIRECTION = "direction"
CONF_LINES = "lines"
CONF_PRODUCTS = "products"
CONF_TIME_OFFSET = "time_offset"
CONF_MAX_JOURNEYS = "max_journeys"
CONF_TIMEOUT = "timeout"
DEFAULT_NAME = "RMV Journey"
VALID_PRODUCTS = ["U-Bahn", "Tram", "Bus", "S", "RB", "RE", "EC", "IC", "ICE"]
ICONS = {
"U-Bahn": "mdi:subway",
"Tram": "mdi:tram",
"Bus": "mdi:bus",
"S": "mdi:train",
"RB": "mdi:train",
"RE": "mdi:train",
"EC": "mdi:train",
"IC": "mdi:train",
"ICE": "mdi:train",
"SEV": "mdi:checkbox-blank-circle-outline",
None: "mdi:clock",
}
ATTRIBUTION = "Data provided by opendata.rmv.de"
SCAN_INTERVAL = timedelta(seconds=60)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_NEXT_DEPARTURE): [
{
vol.Required(CONF_STATION): cv.string,
vol.Optional(CONF_DESTINATIONS, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
vol.Optional(CONF_DIRECTION): cv.string,
vol.Optional(CONF_LINES, default=[]): vol.All(
cv.ensure_list, [cv.positive_int, cv.string]
),
vol.Optional(CONF_PRODUCTS, default=VALID_PRODUCTS): vol.All(
cv.ensure_list, [vol.In(VALID_PRODUCTS)]
),
vol.Optional(CONF_TIME_OFFSET, default=0): cv.positive_int,
vol.Optional(CONF_MAX_JOURNEYS, default=5): cv.positive_int,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
],
vol.Optional(CONF_TIMEOUT, default=10): cv.positive_int,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the RMV departure sensor."""
timeout = config.get(CONF_TIMEOUT)
session = async_get_clientsession(hass)
sensors = []
for next_departure in config.get(CONF_NEXT_DEPARTURE):
sensors.append(
RMVDepartureSensor(
session,
next_departure[CONF_STATION],
next_departure.get(CONF_DESTINATIONS),
next_departure.get(CONF_DIRECTION),
next_departure.get(CONF_LINES),
next_departure.get(CONF_PRODUCTS),
next_departure.get(CONF_TIME_OFFSET),
next_departure.get(CONF_MAX_JOURNEYS),
next_departure.get(CONF_NAME),
timeout,
)
)
tasks = [sensor.async_update() for sensor in sensors]
if tasks:
await asyncio.wait(tasks)
if not any(sensor.data for sensor in sensors):
raise PlatformNotReady
async_add_entities(sensors)
class RMVDepartureSensor(Entity):
"""Implementation of an RMV departure sensor."""
def __init__(
self,
session,
station,
destinations,
direction,
lines,
products,
time_offset,
max_journeys,
name,
timeout,
):
"""Initialize the sensor."""
self._station = station
self._name = name
self._state = None
self.data = RMVDepartureData(
session,
station,
destinations,
direction,
lines,
products,
time_offset,
max_journeys,
timeout,
)
self._icon = ICONS[None]
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def available(self):
"""Return True if entity is available."""
return self._state is not None
@property
def state(self):
"""Return the next departure time."""
return self._state
@property
def state_attributes(self):
"""Return the state attributes."""
try:
return {
"next_departures": self.data.departures[1:],
"direction": self.data.departures[0].get("direction"),
"line": self.data.departures[0].get("line"),
"minutes": self.data.departures[0].get("minutes"),
"departure_time": self.data.departures[0].get("departure_time"),
"product": self.data.departures[0].get("product"),
ATTR_ATTRIBUTION: ATTRIBUTION,
}
except IndexError:
return {}
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._icon
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return TIME_MINUTES
async def async_update(self):
"""Get the latest data and update the state."""
await self.data.async_update()
if self._name == DEFAULT_NAME:
self._name = self.data.station
self._station = self.data.station
if not self.data.departures:
self._state = None
self._icon = ICONS[None]
return
self._state = self.data.departures[0].get("minutes")
self._icon = ICONS[self.data.departures[0].get("product")]
class RMVDepartureData:
"""Pull data from the opendata.rmv.de web page."""
def __init__(
self,
session,
station_id,
destinations,
direction,
lines,
products,
time_offset,
max_journeys,
timeout,
):
"""Initialize the sensor."""
self.station = None
self._station_id = station_id
self._destinations = destinations
self._direction = direction
self._lines = lines
self._products = products
self._time_offset = time_offset
self._max_journeys = max_journeys
self.rmv = RMVtransport(session, timeout)
self.departures = []
self._error_notification = False
@Throttle(SCAN_INTERVAL)
async def async_update(self):
"""Update the connection data."""
try:
_data = await self.rmv.get_departures(
self._station_id,
products=self._products,
direction_id=self._direction,
max_journeys=50,
)
except RMVtransportApiConnectionError:
self.departures = []
_LOGGER.warning("Could not retrieve data from rmv.de")
return
self.station = _data.get("station")
_deps = []
_deps_not_found = set(self._destinations)
for journey in _data["journeys"]:
# find the first departure meeting the criteria
_nextdep = {}
if self._destinations:
dest_found = False
for dest in self._destinations:
if dest in journey["stops"]:
dest_found = True
if dest in _deps_not_found:
_deps_not_found.remove(dest)
_nextdep["destination"] = dest
if not dest_found:
continue
elif self._lines and journey["number"] not in self._lines:
continue
elif journey["minutes"] < self._time_offset:
continue
for attr in ["direction", "departure_time", "product", "minutes"]:
_nextdep[attr] = journey.get(attr, "")
_nextdep["line"] = journey.get("number", "")
_deps.append(_nextdep)
if len(_deps) > self._max_journeys:
break
if not self._error_notification and _deps_not_found:
self._error_notification = True
_LOGGER.info("Destination(s) %s not found", ", ".join(_deps_not_found))
self.departures = _deps
|
|
"""Support for Nexia / Trane XL thermostats."""
import logging
from nexia.const import (
OPERATION_MODE_AUTO,
OPERATION_MODE_COOL,
OPERATION_MODE_HEAT,
OPERATION_MODE_OFF,
SYSTEM_STATUS_COOL,
SYSTEM_STATUS_HEAT,
SYSTEM_STATUS_IDLE,
UNIT_FAHRENHEIT,
)
import voluptuous as vol
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
ATTR_HUMIDITY,
ATTR_MAX_HUMIDITY,
ATTR_MIN_HUMIDITY,
ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW,
CURRENT_HVAC_COOL,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
CURRENT_HVAC_OFF,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
SUPPORT_AUX_HEAT,
SUPPORT_FAN_MODE,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_HUMIDITY,
SUPPORT_TARGET_TEMPERATURE,
SUPPORT_TARGET_TEMPERATURE_RANGE,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_TEMPERATURE,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.helpers import entity_platform
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import dispatcher_send
from .const import (
ATTR_AIRCLEANER_MODE,
ATTR_DEHUMIDIFY_SETPOINT,
ATTR_DEHUMIDIFY_SUPPORTED,
ATTR_HUMIDIFY_SETPOINT,
ATTR_HUMIDIFY_SUPPORTED,
ATTR_ZONE_STATUS,
DOMAIN,
NEXIA_DEVICE,
SIGNAL_THERMOSTAT_UPDATE,
SIGNAL_ZONE_UPDATE,
UPDATE_COORDINATOR,
)
from .entity import NexiaThermostatZoneEntity
from .util import percent_conv
SERVICE_SET_AIRCLEANER_MODE = "set_aircleaner_mode"
SERVICE_SET_HUMIDIFY_SETPOINT = "set_humidify_setpoint"
SET_AIRCLEANER_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_AIRCLEANER_MODE): cv.string,
}
)
SET_HUMIDITY_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_HUMIDITY): vol.All(
vol.Coerce(int), vol.Range(min=35, max=65)
),
}
)
_LOGGER = logging.getLogger(__name__)
#
# Nexia has two bits to determine hvac mode
# There are actually eight states so we map to
# the most significant state
#
# 1. Zone Mode : Auto / Cooling / Heating / Off
# 2. Run Mode : Hold / Run Schedule
#
#
HA_TO_NEXIA_HVAC_MODE_MAP = {
HVAC_MODE_HEAT: OPERATION_MODE_HEAT,
HVAC_MODE_COOL: OPERATION_MODE_COOL,
HVAC_MODE_HEAT_COOL: OPERATION_MODE_AUTO,
HVAC_MODE_AUTO: OPERATION_MODE_AUTO,
HVAC_MODE_OFF: OPERATION_MODE_OFF,
}
NEXIA_TO_HA_HVAC_MODE_MAP = {
value: key for key, value in HA_TO_NEXIA_HVAC_MODE_MAP.items()
}
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up climate for a Nexia device."""
nexia_data = hass.data[DOMAIN][config_entry.entry_id]
nexia_home = nexia_data[NEXIA_DEVICE]
coordinator = nexia_data[UPDATE_COORDINATOR]
platform = entity_platform.current_platform.get()
platform.async_register_entity_service(
SERVICE_SET_HUMIDIFY_SETPOINT,
SET_HUMIDITY_SCHEMA,
SERVICE_SET_HUMIDIFY_SETPOINT,
)
platform.async_register_entity_service(
SERVICE_SET_AIRCLEANER_MODE, SET_AIRCLEANER_SCHEMA, SERVICE_SET_AIRCLEANER_MODE
)
entities = []
for thermostat_id in nexia_home.get_thermostat_ids():
thermostat = nexia_home.get_thermostat_by_id(thermostat_id)
for zone_id in thermostat.get_zone_ids():
zone = thermostat.get_zone_by_id(zone_id)
entities.append(NexiaZone(coordinator, zone))
async_add_entities(entities, True)
class NexiaZone(NexiaThermostatZoneEntity, ClimateEntity):
"""Provides Nexia Climate support."""
def __init__(self, coordinator, zone):
"""Initialize the thermostat."""
super().__init__(
coordinator, zone, name=zone.get_name(), unique_id=zone.zone_id
)
self._undo_humidfy_dispatcher = None
self._undo_aircleaner_dispatcher = None
# The has_* calls are stable for the life of the device
# and do not do I/O
self._has_relative_humidity = self._thermostat.has_relative_humidity()
self._has_emergency_heat = self._thermostat.has_emergency_heat()
self._has_humidify_support = self._thermostat.has_humidify_support()
self._has_dehumidify_support = self._thermostat.has_dehumidify_support()
@property
def supported_features(self):
"""Return the list of supported features."""
supported = (
SUPPORT_TARGET_TEMPERATURE_RANGE
| SUPPORT_TARGET_TEMPERATURE
| SUPPORT_FAN_MODE
| SUPPORT_PRESET_MODE
)
if self._has_humidify_support or self._has_dehumidify_support:
supported |= SUPPORT_TARGET_HUMIDITY
if self._has_emergency_heat:
supported |= SUPPORT_AUX_HEAT
return supported
@property
def is_fan_on(self):
"""Blower is on."""
return self._thermostat.is_blower_active()
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS if self._thermostat.get_unit() == "C" else TEMP_FAHRENHEIT
@property
def current_temperature(self):
"""Return the current temperature."""
return self._zone.get_temperature()
@property
def fan_mode(self):
"""Return the fan setting."""
return self._thermostat.get_fan_mode()
@property
def fan_modes(self):
"""Return the list of available fan modes."""
return self._thermostat.get_fan_modes()
@property
def min_temp(self):
"""Minimum temp for the current setting."""
return (self._thermostat.get_setpoint_limits())[0]
@property
def max_temp(self):
"""Maximum temp for the current setting."""
return (self._thermostat.get_setpoint_limits())[1]
def set_fan_mode(self, fan_mode):
"""Set new target fan mode."""
self._thermostat.set_fan_mode(fan_mode)
self._signal_thermostat_update()
@property
def preset_mode(self):
"""Preset that is active."""
return self._zone.get_preset()
@property
def preset_modes(self):
"""All presets."""
return self._zone.get_presets()
def set_humidity(self, humidity):
"""Dehumidify target."""
self._thermostat.set_dehumidify_setpoint(humidity / 100.0)
self._signal_thermostat_update()
@property
def target_humidity(self):
"""Humidity indoors setpoint."""
if self._has_dehumidify_support:
return percent_conv(self._thermostat.get_dehumidify_setpoint())
if self._has_humidify_support:
return percent_conv(self._thermostat.get_humidify_setpoint())
return None
@property
def current_humidity(self):
"""Humidity indoors."""
if self._has_relative_humidity:
return percent_conv(self._thermostat.get_relative_humidity())
return None
@property
def target_temperature(self):
"""Temperature we try to reach."""
current_mode = self._zone.get_current_mode()
if current_mode == OPERATION_MODE_COOL:
return self._zone.get_cooling_setpoint()
if current_mode == OPERATION_MODE_HEAT:
return self._zone.get_heating_setpoint()
return None
@property
def target_temperature_step(self):
"""Step size of temperature units."""
if self._thermostat.get_unit() == UNIT_FAHRENHEIT:
return 1.0
return 0.5
@property
def target_temperature_high(self):
"""Highest temperature we are trying to reach."""
current_mode = self._zone.get_current_mode()
if current_mode in (OPERATION_MODE_COOL, OPERATION_MODE_HEAT):
return None
return self._zone.get_cooling_setpoint()
@property
def target_temperature_low(self):
"""Lowest temperature we are trying to reach."""
current_mode = self._zone.get_current_mode()
if current_mode in (OPERATION_MODE_COOL, OPERATION_MODE_HEAT):
return None
return self._zone.get_heating_setpoint()
@property
def hvac_action(self) -> str:
"""Operation ie. heat, cool, idle."""
system_status = self._thermostat.get_system_status()
zone_called = self._zone.is_calling()
if self._zone.get_requested_mode() == OPERATION_MODE_OFF:
return CURRENT_HVAC_OFF
if not zone_called:
return CURRENT_HVAC_IDLE
if system_status == SYSTEM_STATUS_COOL:
return CURRENT_HVAC_COOL
if system_status == SYSTEM_STATUS_HEAT:
return CURRENT_HVAC_HEAT
if system_status == SYSTEM_STATUS_IDLE:
return CURRENT_HVAC_IDLE
return CURRENT_HVAC_IDLE
@property
def hvac_mode(self):
"""Return current mode, as the user-visible name."""
mode = self._zone.get_requested_mode()
hold = self._zone.is_in_permanent_hold()
# If the device is in hold mode with
# OPERATION_MODE_AUTO
# overriding the schedule by still
# heating and cooling to the
# temp range.
if hold and mode == OPERATION_MODE_AUTO:
return HVAC_MODE_HEAT_COOL
return NEXIA_TO_HA_HVAC_MODE_MAP[mode]
@property
def hvac_modes(self):
"""List of HVAC available modes."""
return [
HVAC_MODE_OFF,
HVAC_MODE_AUTO,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_COOL,
]
def set_temperature(self, **kwargs):
"""Set target temperature."""
new_heat_temp = kwargs.get(ATTR_TARGET_TEMP_LOW)
new_cool_temp = kwargs.get(ATTR_TARGET_TEMP_HIGH)
set_temp = kwargs.get(ATTR_TEMPERATURE)
deadband = self._thermostat.get_deadband()
cur_cool_temp = self._zone.get_cooling_setpoint()
cur_heat_temp = self._zone.get_heating_setpoint()
(min_temp, max_temp) = self._thermostat.get_setpoint_limits()
# Check that we're not going to hit any minimum or maximum values
if new_heat_temp and new_heat_temp + deadband > max_temp:
new_heat_temp = max_temp - deadband
if new_cool_temp and new_cool_temp - deadband < min_temp:
new_cool_temp = min_temp + deadband
# Check that we're within the deadband range, fix it if we're not
if new_heat_temp and new_heat_temp != cur_heat_temp:
if new_cool_temp - new_heat_temp < deadband:
new_cool_temp = new_heat_temp + deadband
if new_cool_temp and new_cool_temp != cur_cool_temp:
if new_cool_temp - new_heat_temp < deadband:
new_heat_temp = new_cool_temp - deadband
self._zone.set_heat_cool_temp(
heat_temperature=new_heat_temp,
cool_temperature=new_cool_temp,
set_temperature=set_temp,
)
self._signal_zone_update()
@property
def is_aux_heat(self):
"""Emergency heat state."""
return self._thermostat.is_emergency_heat_active()
@property
def device_state_attributes(self):
"""Return the device specific state attributes."""
data = super().device_state_attributes
data[ATTR_ZONE_STATUS] = self._zone.get_status()
if not self._has_relative_humidity:
return data
min_humidity = percent_conv(self._thermostat.get_humidity_setpoint_limits()[0])
max_humidity = percent_conv(self._thermostat.get_humidity_setpoint_limits()[1])
data.update(
{
ATTR_MIN_HUMIDITY: min_humidity,
ATTR_MAX_HUMIDITY: max_humidity,
ATTR_DEHUMIDIFY_SUPPORTED: self._has_dehumidify_support,
ATTR_HUMIDIFY_SUPPORTED: self._has_humidify_support,
}
)
if self._has_dehumidify_support:
dehumdify_setpoint = percent_conv(
self._thermostat.get_dehumidify_setpoint()
)
data[ATTR_DEHUMIDIFY_SETPOINT] = dehumdify_setpoint
if self._has_humidify_support:
humdify_setpoint = percent_conv(self._thermostat.get_humidify_setpoint())
data[ATTR_HUMIDIFY_SETPOINT] = humdify_setpoint
return data
def set_preset_mode(self, preset_mode: str):
"""Set the preset mode."""
self._zone.set_preset(preset_mode)
self._signal_zone_update()
def turn_aux_heat_off(self):
"""Turn. Aux Heat off."""
self._thermostat.set_emergency_heat(False)
self._signal_thermostat_update()
def turn_aux_heat_on(self):
"""Turn. Aux Heat on."""
self._thermostat.set_emergency_heat(True)
self._signal_thermostat_update()
def turn_off(self):
"""Turn. off the zone."""
self.set_hvac_mode(OPERATION_MODE_OFF)
self._signal_zone_update()
def turn_on(self):
"""Turn. on the zone."""
self.set_hvac_mode(OPERATION_MODE_AUTO)
self._signal_zone_update()
def set_hvac_mode(self, hvac_mode: str) -> None:
"""Set the system mode (Auto, Heat_Cool, Cool, Heat, etc)."""
if hvac_mode == HVAC_MODE_AUTO:
self._zone.call_return_to_schedule()
self._zone.set_mode(mode=OPERATION_MODE_AUTO)
else:
self._zone.call_permanent_hold()
self._zone.set_mode(mode=HA_TO_NEXIA_HVAC_MODE_MAP[hvac_mode])
self.schedule_update_ha_state()
def set_aircleaner_mode(self, aircleaner_mode):
"""Set the aircleaner mode."""
self._thermostat.set_air_cleaner(aircleaner_mode)
self._signal_thermostat_update()
def set_humidify_setpoint(self, humidity):
"""Set the humidify setpoint."""
self._thermostat.set_humidify_setpoint(humidity / 100.0)
self._signal_thermostat_update()
def _signal_thermostat_update(self):
"""Signal a thermostat update.
Whenever the underlying library does an action against
a thermostat, the data for the thermostat and all
connected zone is updated.
Update all the zones on the thermostat.
"""
dispatcher_send(
self.hass, f"{SIGNAL_THERMOSTAT_UPDATE}-{self._thermostat.thermostat_id}"
)
def _signal_zone_update(self):
"""Signal a zone update.
Whenever the underlying library does an action against
a zone, the data for the zone is updated.
Update a single zone.
"""
dispatcher_send(self.hass, f"{SIGNAL_ZONE_UPDATE}-{self._zone.zone_id}")
async def async_update(self):
"""Update the entity.
Only used by the generic entity update service.
"""
await self._coordinator.async_request_refresh()
|
|
from __future__ import print_function
import os
import sys
import time
import requests
import pandas as pd
from googleapiclient.http import BatchHttpRequest
from googleapiclient.errors import HttpError
from .. base import ClientBase
class Client(ClientBase):
__ENDPOINT_GCE = "https://www.googleapis.com/auth/compute"
__API_NAME = "compute"
__API_VERSION = "beta"
__API_URI = "https://www.googleapis.com/{0}/{1}/".format(__API_NAME, __API_VERSION)
def __init__(self, project_id, keyfile_path=None, account_email=None):
super(Client, self).__init__(project_id, keyfile_path, account_email)
self._cecredentials, self._ceservice = super(Client, self)._build_service(Client.__API_NAME,
Client.__API_VERSION,
Client.__ENDPOINT_GCE)
def get_ceservice(self):
return self._ceservice
def _try_batch_execute(self, batch, retry=3):
while retry > 0:
try:
batch.execute()
return
except TypeError:
from httplib2 import Http
self._cecredentials.refresh(Http())
retry -= 1
@staticmethod
def _check_exception(request_id, response, exception):
if exception is not None:
raise Exception(exception)
@staticmethod
def _clean_ipcontroller(profile):
c0 = "'{print $2}'`"
c1 = "kill -9 `ps -ef | grep 'ipcontroller start --profile {0}' | grep -v 'grep' |awk ".format(profile)
ret = os.system(c1 + c0)
ret = os.system("rm -f ~/.ipython/profile_{0}/pid/*".format(profile))
ret = os.system("rm -f ~/.ipython/profile_{0}/security/*".format(profile))
def get_current_instance_metadata(self, param):
gh_url = 'http://metadata.google.internal/computeMetadata/v1/instance/{0}'.format(param)
resp = requests.get(gh_url, headers={"Metadata-Flavor": "Google"})
return resp.text
def get_current_instance_name(self):
name = self.get_current_instance_metadata("hostname").split(".")[0]
return name
def get_current_instance_zone(self):
zone = self.get_current_instance_metadata("zone").split("/")[-1]
return zone
def get_current_instance_disk(self):
disk = self.get_current_instance_metadata("disks/0/device-name")
return disk
def get_current_instance(self):
name = self.get_current_instance_name()
zone = self.get_current_instance_zone()
resp = self.get_instance(zone=zone, name=name)
return resp
def stop_current_instance(self):
name = self.get_current_instance_name()
zone = self.get_current_instance_zone()
self.stop_instance(zone, name)
def create_current_snapshot(self, name):
zone = self.get_current_instance_zone()
disk = self.get_current_instance_disk()
self.create_snapshot(name, zone, disk)
def get_instance(self, zone, name):
instances = self._ceservice.instances()
req = instances.get(project=self._project_id, zone=zone, instance=name)
resp = self._try_execute(req)
return resp
def list_instance(self, zone, filter_=None):
instances = self._ceservice.instances()
req = instances.list(project=self._project_id, zone=zone, filter=filter_)
resp = self._try_execute(req)
return resp
def create_instance(self, zone, names,
mtype, disks=None, image=None, sizegb=10, preemptible=False,
network=None, external_ip=False,
metadata=None, tags=None, config={}):
current_instance = self.get_current_instance()
if network is None:
network = current_instance["networkInterfaces"][0]["network"]
init_config = {
'machineType': "zones/{0}/machineTypes/{1}".format(zone, mtype),
'disks': [
{
'boot': True,
'autoDelete': True
}
],
'networkInterfaces': [
{
"network": network
}
],
'scheduling': {
'preemptible': preemptible
},
'tags': {
'items': []
},
'serviceAccounts': [{
'email': 'default',
'scopes': ["https://www.googleapis.com/auth/cloud-platform"]
}]
}
if external_ip:
access_configs = [{"type": "ONE_TO_ONE_NAT", "name": "External NAT"}]
init_config["networkInterfaces"][0]["accessConfigs"] = access_configs
if tags is not None:
if isinstance(tags, str):
tags = [tags]
init_config["tags"]["items"] = tags
if metadata is not None:
init_config["metadata"] = metadata
init_config.update(config)
if image is None and disks is None:
raise Exception("You must input image or disks!")
if isinstance(names, str):
names = [names]
def check_exception(request_id, response, exception):
if exception is not None:
raise Exception(exception)
#batch = BatchHttpRequest()
batch = self._ceservice.new_batch_http_request(callback=Client._check_exception)
instances = self._ceservice.instances()
if disks is not None:
if isinstance(disks, str):
disks = [disks]
if len(names) != len(disks):
raise Exception("instance num({0}) must be equal to disks num({1})!".format(len(names), len(disks)))
for name, disk in zip(names, disks):
body = init_config.copy()
body.update({"name": name})
body["disks"][0]["source"] = "zones/{0}/disks/{1}".format(zone, disk)
req = instances.insert(project=self._project_id, zone=zone, body=body)
batch.add(req)
elif image is not None:
init_config["disks"][0]["initializeParams"] = {'diskSizeGb': sizegb, 'sourceImage': image}
for name in names:
body = init_config.copy()
body.update({"name": name})
req = instances.insert(project=self._project_id, zone=zone, body=body)
batch.add(req)
self._try_batch_execute(batch)
check_names = list(names)
failed_names = []
nall = len(check_names)
wait_second = 0
while check_names:
for check_name in check_names:
resp = instances.get(project=self._project_id, zone=zone, instance=check_name).execute()
if resp["status"] in ["RUNNING", "SUSPENDED", "TERMINATED"]:
check_names.remove(check_name)
if resp["status"] == "SUSPENDED":
failed_names.append(check_name)
nfailed = len(failed_names)
ndoing = len(check_names)
ndone = nall - nfailed - ndoing
print("\r[CREATE INSTANCE] RUNNING: {0}, SUSPENDED: {1}, PROVISIONING: {2} (waiting {3}s)".format(ndone, nfailed, ndoing, wait_second), end="")
time.sleep(1)
wait_second += 1
print("\r[CREATE INSTANCE] RUNNING: {0}, SUSPENDED: {1} (waited {2}s) \n".format(ndone, nfailed, wait_second), end="")
def delete_instance(self, zone, names):
if isinstance(names, str):
names = [names]
batch = self._ceservice.new_batch_http_request(callback=Client._check_exception)
instances = self._ceservice.instances()
for name in names:
req = instances.delete(project=self._project_id,
zone=zone, instance=name)
batch.add(req)
self._try_batch_execute(batch)
def stop_instance(self, zone, names):
if isinstance(names, str):
names = [names]
batch = self._ceservice.new_batch_http_request(callback=Client._check_exception)
instances = self._ceservice.instances()
for name in names:
req = instances.stop(project=self._project_id,
zone=zone,
instance=name)
batch.add(req)
self._try_batch_execute(batch)
def start_instance(self, zone, names):
if isinstance(names, str):
names = [names]
batch = self._ceservice.new_batch_http_request(callback=Client._check_exception)
instances = self._ceservice.instances()
for name in names:
req = instances.insert(project=self._project_id,
zone=zone,
instance=name)
batch.add(req)
self._try_batch_execute(batch)
def create_disk(self, zone, names, snapshot=None, image=None):
if snapshot is not None:
if snapshot.startswith("global"):
snapshot = "projects/{0}/{1}".format(self._project_id, snapshot)
elif not snapshot.startswith("projects"):
snapshot = "projects/{0}/global/snapshots/{1}".format(self._project_id, snapshot)
config = {
"sourceSnapshot": snapshot
}
elif image is not None:
config = {
"sourceImage": image
}
else:
raise Exception("Both snapshot and image are None! Input at least one!")
if isinstance(names, str):
names = [names]
def check_exception(request_id, response, exception):
if exception is not None:
raise Exception(exception)
batch = self._ceservice.new_batch_http_request(callback=Client._check_exception)
disks = self._ceservice.disks()
for name in names:
body = config.copy()
body.update({"name": name})
job = disks.insert(project=self._project_id, zone=zone, body=body)
batch.add(job)
self._try_batch_execute(batch)
check_names = list(names)
nall = len(check_names)
failed_names = []
wait_second = 0
while check_names:
for check_name in check_names:
resp = disks.get(project=self._project_id, zone=zone, disk=check_name).execute()
if resp["status"] in ["DONE","FAILED","READY"]:
check_names.remove(check_name)
if resp["status"] == "FAILED":
failed_names.append(check_name)
nfailed = len(failed_names)
ndoing = len(check_names)
ndone = nall - nfailed - ndoing
print("\r[CREATE DISK] DONE: {0}, FAILED: {1}, DOING: {2} (waiting {3}s)".format(ndone, nfailed, ndoing, wait_second), end="")
time.sleep(1)
wait_second += 1
print("\r[CREATE DISK] DONE: {0}, FAILED: {1} (waited {2}s) \n".format(ndone, nfailed, wait_second), end="")
def delete_disk(self, zone, disk):
disks = self._ceservice.disks()
req = disks.delete(project=self._project_id, zone=zone, disk=disk)
resp = self._try_execute(req)
def resize_disk(self, zone, disk, sizegb):
config = {
"sizeGb": sizegb
}
disks = self._ceservice.disks()
req = disks.resize(project=self._project_id, zone=zone, disk=disk, body=config)
resp = self._try_execute(req)
def create_image(self, name, disk, snapshot=None):
config = {
"name": name,
"rawDisk": {
"source": "https://www.googleapis.com/compute/v1/projects/{0}/global/snapshots/{1}".format(self._project_id, snapshot_name)
}
}
images = self._ceservice.images()
req = images.insert(project=self._project_id, body=config)
resp = self._try_execute(req)
wait_second = 0
status = resp["status"]
while "DONE" != status:
print("\r[CREATE IMAGE] {0} (waiting second: {1}s, {2}%)".format(status, wait_second, resp["progress"]), end="")
time.sleep(5)
wait_second += 5
resp = images.get(project=self._project_id, image=name).execute()
status = resp["status"]
print("\r[CREATE IMAGE] DONE (waited second: {0}s)\n".format(wait_second))
if 'error' in resp:
raise Exception(resp['error'])
return resp
def delete_image(self, name):
images = self._ceservice.images()
req = images.delete(project=self._project_id, image=name)
resp = self._try_execute(req)
def create_snapshot(self, name, zone, disk):
config = {
"name": name
}
disks = self._ceservice.disks()
req = disks.createSnapshot(project=self._project_id,
zone=zone, disk=disk,
body=config)
resp = self._try_execute(req)
snapshots = self._ceservice.snapshots()
wait_second = 0
status = resp["status"]
while status not in ["DONE", "FAILED", "READY"]:
print("\r[CREATE SNAPSHOT] {0} (waiting {1}s)".format(status, wait_second), end="")
time.sleep(1)
wait_second += 1
resp = snapshots.get(project=self._project_id, snapshot=name).execute()
status = resp["status"]
print("\r[CREATE SNAPSHOT] {0} (waited {1}s)\n".format(status, wait_second), end="")
if status == "FAILED":
raise Exception("Failed to create snapshot from disk: {0}".format(disk))
return resp
def delete_snapshot(self, name):
snapshots = service.snapshots()
req = snapshots.delete(project=self._project_id, snapshot=name)
resp = self._try_execute(req)
def _check_params(self, zone, network, mtype, itype, core, pnum):
current_instance = self.get_current_instance()
if zone is None:
zone = current_instance["zone"].split("/")[-1]
if network is None:
network = current_instance["networkInterfaces"][0]["network"]
if mtype is None:
if itype == "micro" or itype == "small":
prefix = "f1" if itype == "micro" else "g1"
mtype = "{0}-{1}".format(prefix, itype)
core = 1
elif itype in ["standard","highmem","highcpu"]:
if core not in [1,2,4,8,16,32]:
raise Exception("core must be 1,2,4,8,16,32!")
mtype = "n1-{0}-{1}".format(itype, core)
else:
raise Exception("itype must be standard,highmem,highcpu,small,micro!")
pnum = core if pnum is None else pnum
network_ip = current_instance["networkInterfaces"][0]["networkIP"]
return zone, network, mtype, pnum, network_ip
def _create_disks_from_snapshot(self, zone, names, snapshot):
create_temp_snapshot = False
if snapshot is None:
disk = self.get_current_instance_disk()
snapshot = "{0}-{1}-{2}".format(os.uname()[1], os.getpid(), int(time.time()))
self.create_snapshot(snapshot, zone, disk)
create_temp_snapshot = True
self.create_disk(zone, names, snapshot=snapshot)
if create_temp_snapshot:
self.delete_snapshot(snapshot)
return snapshot
def _start_wait_ipcontroller(self, profile, network_ip, engine_file_path):
command = "ipcontroller start --profile {0} --ip {1} &".format(profile, network_ip)
ret = os.system(command)
if ret != 0:
raise Exception("Failed to start ipcontroller on this host!")
retry = 30
while not os.path.isfile(engine_file_path):
time.sleep(1)
retry -= 1
if retry < 0:
raise Exception("Failed to create engine file: {0}".format(engine_file_path))
return engine_file_path
def _create_startup_script(self, profile, engine_file_path, pnum):
with open(engine_file_path, "r") as engine_file:
engine_file_content = engine_file.read()
startup_script = """
#! /bin/bash
ls -l /usr/local/bin
ipython profile create --parallel --profile={0}
rm -f ~/.ipython/profile_{0}/pid/*
rm -f ~/.ipython/profile_{0}/security/*
cat <<EOF > ~/.ipython/profile_{0}/security/ipcontroller-engine.json
{1}
EOF
ipcluster engines --profile {0} -n {2} --daemonize
""".format(profile, engine_file_content, core if pnum is None else pnum)
return startup_script
def create_ipcluster(self, profile, itype="standard", core=1, num=1, pnum=None,
image=None, sizegb=10, snapshot=None, preemptible=False,
zone=None, network=None, mtype=None, external_ip=True, config=None):
if snapshot is not None and image is not None:
raise Exception("Both snapshot and image filled! chose one!")
# check existing profile
profile_dir = os.path.expanduser('~/.ipython/profile_{0}'.format(profile))
if os.path.isdir(profile_dir):
Client._clean_ipcontroller(profile)
else:
ret = os.system('ipython profile create --parallel --profile={0}'.format(profile))
if ret != 0:
raise Exception("Failed to create profile {0}".format(profile))
zone, network, mtype, pnum, network_ip = self._check_params(zone, network, mtype, itype, core, pnum)
names = ["ipcluster-{0}-{1}".format(profile, no) for no in range(num)]
if image is None:
snapshot = self._create_disks_from_snapshot(zone, names, snapshot)
engine_file_path = profile_dir + "/security/ipcontroller-engine.json"
self._start_wait_ipcontroller(profile, network_ip, engine_file_path)
startup_script = self._create_startup_script(profile, engine_file_path, pnum)
# create instances for ipengines.
metadata = {"items": [{"key": "startup-script", "value": startup_script}]}
if snapshot is not None:
self.create_instance(zone=zone, names=names, mtype=mtype, disks=names,
external_ip=external_ip, network=network, preemptible=preemptible,
metadata=metadata)
else:
self.create_instance(zone=zone, names=names, mtype=mtype, image=image, sizegb=sizegb,
external_ip=external_ip, network=network, preemptible=preemptible,
metadata=metadata)
def add_ipengine(self, profile, itype="standard", core=1, num=1, pnum=None,
image=None, sizegb=10, snapshot=None, preemptible=False,
mtype=None, external_ip=True, config=None):
if snapshot is not None and image is not None:
raise Exception("Both snapshot and image filled! chose one!")
# check existing profile
profile_dir = os.path.expanduser('~/.ipython/profile_{0}'.format(profile))
if not os.path.isdir(profile_dir):
raise Exception("No profile {0}".format(profile))
zone, network, mtype, pnum, network_ip = self._check_params(None, None, mtype, itype, core, pnum)
current_names = self.get_ipcluster_instance(profile)
sindex = max([int(name.split("-")[-1]) for name in current_names]) + 1 if current_names else 0
names = ["ipcluster-{0}-{1}".format(profile, no) for no in range(sindex,num+sindex)]
if image is None:
snapshot = self._create_disks_from_snapshot(zone, names, snapshot)
engine_file_path = profile_dir + "/security/ipcontroller-engine.json"
startup_script = self._create_startup_script(profile, engine_file_path, pnum)
# create instances for ipengines.
metadata = {"items": [{"key": "startup-script", "value": startup_script}]}
if snapshot is not None:
self.create_instance(zone=zone, names=names, mtype=mtype, disks=names,
external_ip=external_ip, network=network, preemptible=preemptible,
metadata=metadata)
else:
self.create_instance(zone=zone, names=names, mtype=mtype, image=image, sizegb=sizegb,
external_ip=external_ip, network=network, preemptible=preemptible,
metadata=metadata)
def delete_ipcluster(self, profile):
zone = self.get_current_instance_zone()
names = self.get_ipcluster_instance(profile, zone)
self.delete_instance(zone, names)
Client._clean_ipcontroller(profile)
def get_ipcluster_instance(self, profile, zone=None):
if zone is None:
zone = self.get_current_instance_zone()
filter_str = "name eq ipcluster-{0}-[0-9]+".format(profile)
instances = self.list_instance(zone, filter_str)
if len(instances["items"]) == 0:
return []
names = [instance["name"] for instance in instances["items"]]
return names
|
|
#!/usr/bin/env python
from __future__ import print_function
# GTK Interactive Console
# (C) 2003, Jon Anderson
# See www.python.org/2.2/license.html for
# license details.
#
import gtk
import gtk.gdk
import code
import os
import sys
import pango
import __builtin__
import __main__
banner = """GTK Interactive Python Console
Thanks to Jon Anderson
%s
""" % sys.version
banner += """
Welcome to matplotlib.
help(matplotlib) -- some general information about matplotlib
help(plotting) -- shows a list of plot specific commands
"""
class Completer(object):
"""
Taken from rlcompleter, with readline references stripped, and a local dictionary to use.
"""
def __init__(self, locals):
self.locals = locals
def complete(self, text, state):
"""Return the next possible completion for 'text'.
This is called successively with state == 0, 1, 2, ... until it
returns None. The completion should begin with 'text'.
"""
if state == 0:
if "." in text:
self.matches = self.attr_matches(text)
else:
self.matches = self.global_matches(text)
try:
return self.matches[state]
except IndexError:
return None
def global_matches(self, text):
"""Compute matches when text is a simple name.
Return a list of all keywords, built-in functions and names
currently defines in __main__ that match.
"""
import keyword
matches = []
n = len(text)
for list in [keyword.kwlist, __builtin__.__dict__.keys(), __main__.__dict__.keys(), self.locals.keys()]:
for word in list:
if word[:n] == text and word != "__builtins__":
matches.append(word)
return matches
def attr_matches(self, text):
"""Compute matches when text contains a dot.
Assuming the text is of the form NAME.NAME....[NAME], and is
evaluatable in the globals of __main__, it will be evaluated
and its attributes (as revealed by dir()) are used as possible
completions. (For class instances, class members are are also
considered.)
WARNING: this can still invoke arbitrary C code, if an object
with a __getattr__ hook is evaluated.
"""
import re
m = re.match(r"(\w+(\.\w+)*)\.(\w*)", text)
if not m:
return
expr, attr = m.group(1, 3)
object = eval(expr, __main__.__dict__, self.locals)
words = dir(object)
if hasattr(object, '__class__'):
words.append('__class__')
words = words + get_class_members(object.__class__)
matches = []
n = len(attr)
for word in words:
if word[:n] == attr and word != "__builtins__":
matches.append("%s.%s" % (expr, word))
return matches
def get_class_members(klass):
ret = dir(klass)
if hasattr(klass, '__bases__'):
for base in klass.__bases__:
ret = ret + get_class_members(base)
return ret
class OutputStream(object):
"""
A Multiplexing output stream.
It can replace another stream, and tee output to the original stream and too
a GTK textview.
"""
def __init__(self, view, old_out, style):
self.view = view
self.buffer = view.get_buffer()
self.mark = self.buffer.create_mark("End", self.buffer.get_end_iter(), False)
self.out = old_out
self.style = style
self.tee = 1
def write(self, text):
if self.tee:
self.out.write(text)
end = self.buffer.get_end_iter()
if self.view is not None:
self.view.scroll_to_mark(self.mark, 0, True, 1, 1)
self.buffer.insert_with_tags(end, text, self.style)
class GTKInterpreterConsole(gtk.ScrolledWindow):
"""
An InteractiveConsole for GTK. It's an actual widget,
so it can be dropped in just about anywhere.
"""
def __init__(self):
gtk.ScrolledWindow.__init__(self)
self.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.text = gtk.TextView()
self.text.set_wrap_mode(True)
self.interpreter = code.InteractiveInterpreter()
self.completer = Completer(self.interpreter.locals)
self.buffer = []
self.history = []
self.banner = banner
self.ps1 = ">>> "
self.ps2 = "... "
self.text.add_events(gtk.gdk.KEY_PRESS_MASK)
self.text.connect("key_press_event", self.key_pressed)
self.current_history = -1
self.mark = self.text.get_buffer().create_mark("End", self.text.get_buffer().get_end_iter(), False)
# setup colors
self.style_banner = gtk.TextTag("banner")
self.style_banner.set_property("foreground", "saddle brown")
self.style_ps1 = gtk.TextTag("ps1")
self.style_ps1.set_property("foreground", "DarkOrchid4")
self.style_ps1.set_property("editable", False)
self.style_ps1.set_property("font", "courier")
self.style_ps2 = gtk.TextTag("ps2")
self.style_ps2.set_property("foreground", "DarkOliveGreen")
self.style_ps2.set_property("editable", False)
self.style_ps2.set_property("font", "courier")
self.style_out = gtk.TextTag("stdout")
self.style_out.set_property("foreground", "midnight blue")
self.style_err = gtk.TextTag("stderr")
self.style_err.set_property("style", pango.STYLE_ITALIC)
self.style_err.set_property("foreground", "red")
self.text.get_buffer().get_tag_table().add(self.style_banner)
self.text.get_buffer().get_tag_table().add(self.style_ps1)
self.text.get_buffer().get_tag_table().add(self.style_ps2)
self.text.get_buffer().get_tag_table().add(self.style_out)
self.text.get_buffer().get_tag_table().add(self.style_err)
self.stdout = OutputStream(self.text, sys.stdout, self.style_out)
self.stderr = OutputStream(self.text, sys.stderr, self.style_err)
sys.stderr = self.stderr
sys.stdout = self.stdout
self.current_prompt = None
self.write_line(self.banner, self.style_banner)
self.prompt_ps1()
self.add(self.text)
self.text.show()
def reset_history(self):
self.history = []
def reset_buffer(self):
self.buffer = []
def prompt_ps1(self):
self.current_prompt = self.prompt_ps1
self.write_line(self.ps1, self.style_ps1)
def prompt_ps2(self):
self.current_prompt = self.prompt_ps2
self.write_line(self.ps2, self.style_ps2)
def write_line(self, text, style=None):
start, end = self.text.get_buffer().get_bounds()
if style is None:
self.text.get_buffer().insert(end, text)
else:
self.text.get_buffer().insert_with_tags(end, text, style)
self.text.scroll_to_mark(self.mark, 0, True, 1, 1)
def push(self, line):
self.buffer.append(line)
if len(line) > 0:
self.history.append(line)
source = "\n".join(self.buffer)
more = self.interpreter.runsource(source, "<<console>>")
if not more:
self.reset_buffer()
return more
def key_pressed(self, widget, event):
if event.keyval == gtk.gdk.keyval_from_name('Return'):
return self.execute_line()
if event.keyval == gtk.gdk.keyval_from_name('Up'):
self.current_history = self.current_history - 1
if self.current_history < - len(self.history):
self.current_history = - len(self.history)
return self.show_history()
elif event.keyval == gtk.gdk.keyval_from_name('Down'):
self.current_history = self.current_history + 1
if self.current_history > 0:
self.current_history = 0
return self.show_history()
elif event.keyval == gtk.gdk.keyval_from_name('Home'):
l = self.text.get_buffer().get_line_count() - 1
start = self.text.get_buffer().get_iter_at_line_offset(l, 4)
self.text.get_buffer().place_cursor(start)
return True
elif event.keyval == gtk.gdk.keyval_from_name('space') and event.state & gtk.gdk.CONTROL_MASK:
return self.complete_line()
return False
def show_history(self):
if self.current_history == 0:
return True
else:
self.replace_line(self.history[self.current_history])
return True
def current_line(self):
start, end = self.current_line_bounds()
return self.text.get_buffer().get_text(start, end, True)
def current_line_bounds(self):
txt_buffer = self.text.get_buffer()
l = txt_buffer.get_line_count() - 1
start = txt_buffer.get_iter_at_line(l)
if start.get_chars_in_line() >= 4:
start.forward_chars(4)
end = txt_buffer.get_end_iter()
return start, end
def replace_line(self, txt):
start, end = self.current_line_bounds()
self.text.get_buffer().delete(start, end)
self.write_line(txt)
def execute_line(self, line=None):
if line is None:
line = self.current_line()
self.write_line("\n")
else:
self.write_line(line + "\n")
more = self.push(line)
self.text.get_buffer().place_cursor(self.text.get_buffer().get_end_iter())
if more:
self.prompt_ps2()
else:
self.prompt_ps1()
self.current_history = 0
self.window.raise_()
return True
def complete_line(self):
line = self.current_line()
tokens = line.split()
token = tokens[-1]
completions = []
p = self.completer.complete(token, len(completions))
while p is not None:
completions.append(p)
p = self.completer.complete(token, len(completions))
if len(completions) != 1:
self.write_line("\n")
self.write_line("\n".join(completions), self.style_ps1)
self.write_line("\n")
self.current_prompt()
self.write_line(line)
else:
i = line.rfind(token)
line = line[0:i] + completions[0]
self.replace_line(line)
return True
def main():
w = gtk.Window()
console = GTKInterpreterConsole()
console.set_size_request(640, 480)
w.add(console)
def destroy(arg=None):
gtk.main_quit()
def key_event(widget, event):
if gtk.gdk.keyval_name(event.keyval) == 'd' and \
event.state & gtk.gdk.CONTROL_MASK:
destroy()
return False
w.connect("destroy", destroy)
w.add_events(gtk.gdk.KEY_PRESS_MASK)
w.connect('key_press_event', key_event)
w.show_all()
console.execute_line('import matplotlib')
console.execute_line("matplotlib.use('GTKAgg')")
console.execute_line('matplotlib.interactive(1)')
console.execute_line('from pylab import *')
if len(sys.argv) > 1:
fname = sys.argv[1]
if not os.path.exists(fname):
print('%s does not exist' % fname)
for line in file(fname):
line = line.strip()
console.execute_line(line)
gtk.main()
if __name__ == '__main__':
main()
|
|
# ######################################################################
# Copyright (c) 2014-2015, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
from __future__ import absolute_import, division, print_function
import six
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as mticks
from .utils import multiline
import pandas as pd
import logging
logger = logging.getLogger(__name__)
"""
Plotting tools for X-Ray Speckle Visibility Spectroscopy(XSVS)
Corresponding analysis tools can be found in the `skxray.core.speckle` module
"""
def mean_intensity_plotter(ax, dataframe,
title="Mean intensities", xlabel="Frames",
ylabel="Mean Intensity", cmap=None):
"""
This will plot mean intensities for ROIS' of the labeled array
for different image sets.
Parameters
----------
ax : list of Axes
List of `Axes` objects. Should have length == len(df)
dataframe : pd.Dataframe
The dataframe that has columns as different datasets (probably for
different locations in the same sample) and rows as ROIs
title : str, optional
title of the plot
x_label : str, optional
x axis label
y_label : str, optional
y axis label
color_cycle : str
Matplotlib string name of colormap (see matplotlib.pyplot.colormaps()
for a list of valid colormaps on your machine)
Returns
-------
artists : pd.DataFrame
Pandas DataFrame whose column/row names match the input `dataframe`
Examples
--------
"""
if cmap is None:
# TODO don't use viridis in production, yet...
if 'viridis' in plt.colormaps():
cmap = 'viridis'
else:
cmap = 'rainbow'
cmap = plt.get_cmap(cmap)
ax[-1].set_xlabel(xlabel)
# capture the artists in a nested dictionary
artists = {col_name: {} for col_name in dataframe}
# compute the offset for each column
offsets = []
cur = 0
prev = 0
# determine how far to offset each data set
for data in dataframe.ix[0]:
cur = prev + len(data)
offsets.append((prev, cur))
prev = cur
# loop over the rows of the dataframe
for idx, row_label in enumerate(dataframe.index):
# do some axes housekeeping
ax[idx].set_ylabel(ylabel)
ax[idx].set_title(title + ' for %s' % row_label)
row = dataframe.ix[row_label]
# loop over the columns of the dataframe, creating each line plot
# one at a time
for idx2, (column_name, color_idx) in enumerate(zip(
dataframe, np.arange(0, 1, 1/len(row)))):
x = range(*offsets[idx2])
y = row.ix[column_name]
art, = ax[idx].plot(x, y, label=column_name, color=cmap(color_idx))
# store the artists in a nested dictionary
artists[column_name][row_label] = art
# enable the legend for each plot after all data has been added
ax[idx].legend()
return pd.DataFrame(artists)
def combine_intensity_plotter(ax, combine_intensity,
title="Mean Intensities - All Image Sets",
xlabel="Frames", ylabel="Mean Intensity",
labels=None):
"""
This will plot the combine intensities for all image sets
Parameters
----------
ax : Axes
The matplotlib.axes.Axes object to add the roi data to
combine_intensity : list
List of intensities for each ROI. Each element in the list should be
a 1-D array where the x-axis is understood to be frame number
title : str, optional
title of the plot
x_label : str, optional
x axis label
y_label : str, optional
y axis label
labels : list, optional
Names for each ROI data set. If a list is provided, it should be the
same length as the `combine_intensity` list. If a list is not provided,
the default will be 'ROI #' where # is the index of the dataset in
`combine_intensity`+1
"""
num_rois = len(combine_intensity)
if labels is None:
labels = ['ROI ' + str(i+1) for i in range(num_rois)]
# to utilize the multiline plotting function, we need to create a list of
# axes that are all the same axis
axes = [ax] * num_rois
arts = multiline(axes, combine_intensity, labels)
# do some housekeeping
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
ax.set_title(title)
ax.legend()
# return the artists
return arts
def circular_average(ax, image_data, ring_averages, bin_centers,
im_title="Image Data", line_title="Circular Average",
line_xlabel="Bin Centers", line_ylabel="Ring Average",
im_kw=None, line_kw=None):
"""This will plot image data and circular average of the that image data
Specific plot that was asked for by 11id at NSLS2.
Parameters
----------
ax : tuple, list, etc.
Two axes. First is for displaying the image with imshow. Second is
for plotting the circular average with semilogy
image_data : array
ring_averages : array
bin_centers: array
im_title : str, optional
title for the image data
line_title : str, optional
title for the circular average of image data
line_xlabel : str, optional
x axis label for circular average plot
line_ylabel : str, optional
y axis label for circular average plot
im_kw : dict, optional
kwargs for the imshow axes
line_kw : dict, optional
kwargs for the semilogy axes
Returns
-------
im : matplotlib.image.AxesImage
The return value from imshow. Can be used for further manipulation of
the image plot
line : matplotlib.lines.Line2D
The return value from semilogy. Can be used for further manipulation of
the semilogy plot
"""
if im_kw is None:
im_kw = {}
if line_kw is None:
line_kw = {}
im = ax[0].imshow(image_data, **im_kw)
ax[0].set_title(im_title)
ax[0].figure.colorbar(im, ax=ax[0])
line, = ax[1].semilogy(bin_centers, ring_averages, **line_kw)
ax[1].set_title(line_title)
ax[1].set_xlabel(line_xlabel)
ax[1].set_ylabel(line_ylabel)
return (im, line)
def kymograph(ax, data, title="Kymograph", xlabel="Pixel",
ylabel="Frame", fps=None, frame_offset=0, **im_kw):
"""Plot the array of pixels (x, col) versus frame (y, row[kymograph_datay])
Note that the pixels in the resulting plot will not necessarily be square.
This is (1) legitimate because the x- and y-axes have different units and
(2) beneficial because it maximizes the viewable space for this image
Parameters
----------
ax : Axes
The matplotlib `Axes` object that the kymograph data should be added to
data : array
data for graphical representation of pixels variation over time
title : str, optional
title of the plot
x_label : str, optional
x axis label of the plot
y_label : str, optional
y axis label
cmap : str, optional
colormap for the data
fps : float, optional
Convert frame number to seconds and display time on the y-axis
frame_offset : int, optional
This is the frame number to start counting from
im_kw : dict
kwargs to be passed to matplotlib's imshow function
Returns
-------
im : matplotlib.image.AxesImage
The return value from imshow. Can be used for further manipulation of
the image plot
cb : matplotlib.colorbar.colorbar
The colorbar for the image
"""
extent = list((0, data.shape[1], 0, data.shape[0]))
if fps is not None:
ylabel = 'Time (s)'
extent[2] = frame_offset / fps
extent[3] = extent[2] + data.shape[0] / fps
im_kw['extent'] = extent
im = ax.imshow(data, **im_kw)
# do the housekeeping
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_title(title)
ax.set_aspect('auto')
integer = False
if 'int' in data.dtype.name:
# don't use float ticks if the data is integer typed
integer = True
cb = ax.figure.colorbar(im, ticks=mticks.MaxNLocator(integer=integer))
return im, cb
def rois_as_lines(ax, data, title='Intensities - ROI ', xlabel='pixels',
ylabel='Intensity', labels=None):
"""Plot each entry in 'data' in its own matplotlib line plot
Parameters
----------
ax : list of Axes
The matplotlib.axes.Axes objects in which to plot `data`
data : list
List of intensities. Each entry in the list should be a 1-D numpy array.
Any data that is not a 1-D numpy array will be `ravel`ed into a 1-D
array
title : str, optional
Will be added above the top axes
x_label : str, optional
x axis label. Will be added to the bottom axes
y_label : str, optional
y axis label. Will be added to all axes
labels : list, optional
labels for the legend. Should be the same length as `data`
Returns
-------
arts : list
List of matplotlib.lines.Line2D objects that can be used for further manipulation
of the plots
"""
num_rois = len(data)
# set the title on the first axes
ax[0].set_title(title)
if labels is None:
labels = ['ROI_' + str(i+1) for i in range(len(data))]
# set the ylabels on all the axes
ylabels = [ylabel] * len(data)
data = [d.ravel() for d in data]
arts = multiline(ax, data, labels, ylabels=ylabels)
# set the x axis on the last axes
ax[-1].set_xlabel(xlabel)
return arts
|
|
# encoding: utf-8
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
db.alter_column('auth_user', 'email', models.CharField(max_length=254, blank=True))
def backwards(self, orm):
"Write your backwards methods here."
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'canvas.apiapp': {
'Meta': {'object_name': 'APIApp'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'canvas.apiauthtoken': {
'Meta': {'unique_together': "(('user', 'app'),)", 'object_name': 'APIAuthToken'},
'app': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.APIApp']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'canvas.bestof': {
'Meta': {'object_name': 'BestOf'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'best_of'", 'null': 'True', 'blank': 'True', 'to': "orm['canvas.Category']"}),
'chosen_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'best_of'", 'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {})
},
'canvas.canvasanonymoususer': {
'Meta': {'object_name': 'CanvasAnonymousUser', '_ormbases': ['canvas.CanvasUser']},
'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'})
},
'canvas.canvasuser': {
'Meta': {'object_name': 'CanvasUser', 'db_table': "'auth_user'", '_ormbases': ['auth.User'], 'proxy': 'True'}
},
'canvas.category': {
'Meta': {'object_name': 'Category'},
'allow_textonlyop': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'disable_remix': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'founded': ('django.db.models.fields.FloatField', [], {'default': '1298956320'}),
'founder': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'founded_groups'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderators': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'moderated_categories'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'canvas.comment': {
'Meta': {'object_name': 'Comment'},
'anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'comments'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'comments'", 'null': 'True', 'blank': 'True', 'to': "orm['canvas.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}),
'judged': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ot_hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'parent_comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'replies'", 'null': 'True', 'blank': 'True', 'to': "orm['canvas.Comment']"}),
'parent_content': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comments'", 'null': 'True', 'to': "orm['canvas.Content']"}),
'replied_comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['canvas.Comment']", 'null': 'True', 'blank': 'True'}),
'reply_content': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'used_in_comments'", 'null': 'True', 'to': "orm['canvas.Content']"}),
'reply_text': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0', 'db_index': 'True'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {'default': '0'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'canvas.commentflag': {
'Meta': {'object_name': 'CommentFlag'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'flags'", 'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'type_id': ('django.db.models.fields.IntegerField', [], {}),
'undone': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'flags'", 'to': "orm['auth.User']"})
},
'canvas.commentmoderationlog': {
'Meta': {'object_name': 'CommentModerationLog'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'note': ('django.db.models.fields.TextField', [], {}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'moderated_comments_log'", 'to': "orm['auth.User']"}),
'visibility': ('django.db.models.fields.IntegerField', [], {})
},
'canvas.commentpin': {
'Meta': {'object_name': 'CommentPin'},
'auto': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'canvas.commentsticker': {
'Meta': {'object_name': 'CommentSticker'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stickers'", 'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'type_id': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'canvas.content': {
'Meta': {'object_name': 'Content'},
'alpha': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'animated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}),
'remix_of': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'remixes'", 'null': 'True', 'to': "orm['canvas.Content']"}),
'remix_text': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1000', 'blank': 'True'}),
'source_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '4000', 'blank': 'True'}),
'stamps_used': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'used_as_stamp'", 'blank': 'True', 'to': "orm['canvas.Content']"}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'url_mapping': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.ContentUrlMapping']", 'null': 'True', 'blank': 'True'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'canvas.contenturlmapping': {
'Meta': {'object_name': 'ContentUrlMapping'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'canvas.emailunsubscribe': {
'Meta': {'object_name': 'EmailUnsubscribe'},
'email': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'canvas.facebookinvite': {
'Meta': {'object_name': 'FacebookInvite'},
'fb_message_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invited_fbid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'invitee': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'facebook_invited_from'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}),
'inviter': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'facebook_sent_invites'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"})
},
'canvas.facebookuser': {
'Meta': {'object_name': 'FacebookUser'},
'email': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'fb_uid': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'gender': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_invited': ('canvas.util.UnixTimestampField', [], {'default': '0'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'canvas.followcategory': {
'Meta': {'object_name': 'FollowCategory'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'followers'", 'to': "orm['canvas.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'following'", 'to': "orm['auth.User']"})
},
'canvas.invitecode': {
'Meta': {'object_name': 'InviteCode'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invitee': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'invited_from'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}),
'inviter': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'sent_invites'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"})
},
'canvas.remixplugin': {
'Meta': {'object_name': 'RemixPlugin'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
's3md5': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {'default': '0'})
},
'canvas.stashcontent': {
'Meta': {'object_name': 'StashContent'},
'content': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Content']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'canvas.userinfo': {
'Meta': {'object_name': 'UserInfo'},
'free_invites': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invite_bypass': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'is_qa': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'post_anonymously': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'power_level': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'canvas.usermoderationlog': {
'Meta': {'object_name': 'UserModerationLog'},
'action': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'note': ('django.db.models.fields.TextField', [], {}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'moderation_log'", 'to': "orm['auth.User']"})
},
'canvas.userwarning': {
'Meta': {'object_name': 'UserWarning'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['canvas.Comment']", 'null': 'True', 'blank': 'True'}),
'confirmed': ('canvas.util.UnixTimestampField', [], {'default': '0'}),
'custom_message': ('django.db.models.fields.TextField', [], {}),
'disable_user': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issued': ('canvas.util.UnixTimestampField', [], {}),
'stock_message': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_warnings'", 'to': "orm['auth.User']"}),
'viewed': ('canvas.util.UnixTimestampField', [], {'default': '0'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['canvas']
|
|
# -*- coding: utf-8 -*-
"""
tests.testing
~~~~~~~~~~~~~
Test client and more.
:copyright: (c) 2014 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import pytest
import flask
from flask._compat import text_type
def test_environ_defaults_from_config():
app = flask.Flask(__name__)
app.testing = True
app.config['SERVER_NAME'] = 'example.com:1234'
app.config['APPLICATION_ROOT'] = '/foo'
@app.route('/')
def index():
return flask.request.url
ctx = app.test_request_context()
assert ctx.request.url == 'http://example.com:1234/foo/'
with app.test_client() as c:
rv = c.get('/')
assert rv.data == b'http://example.com:1234/foo/'
def test_environ_defaults():
app = flask.Flask(__name__)
app.testing = True
@app.route('/')
def index():
return flask.request.url
ctx = app.test_request_context()
assert ctx.request.url == 'http://localhost/'
with app.test_client() as c:
rv = c.get('/')
assert rv.data == b'http://localhost/'
def test_redirect_keep_session():
app = flask.Flask(__name__)
app.secret_key = 'testing'
@app.route('/', methods=['GET', 'POST'])
def index():
if flask.request.method == 'POST':
return flask.redirect('/getsession')
flask.session['data'] = 'foo'
return 'index'
@app.route('/getsession')
def get_session():
return flask.session.get('data', '<missing>')
with app.test_client() as c:
rv = c.get('/getsession')
assert rv.data == b'<missing>'
rv = c.get('/')
assert rv.data == b'index'
assert flask.session.get('data') == 'foo'
rv = c.post('/', data={}, follow_redirects=True)
assert rv.data == b'foo'
# This support requires a new Werkzeug version
if not hasattr(c, 'redirect_client'):
assert flask.session.get('data') == 'foo'
rv = c.get('/getsession')
assert rv.data == b'foo'
def test_session_transactions():
app = flask.Flask(__name__)
app.testing = True
app.secret_key = 'testing'
@app.route('/')
def index():
return text_type(flask.session['foo'])
with app.test_client() as c:
with c.session_transaction() as sess:
assert len(sess) == 0
sess['foo'] = [42]
assert len(sess) == 1
rv = c.get('/')
assert rv.data == b'[42]'
with c.session_transaction() as sess:
assert len(sess) == 1
assert sess['foo'] == [42]
def test_session_transactions_no_null_sessions():
app = flask.Flask(__name__)
app.testing = True
with app.test_client() as c:
try:
with c.session_transaction() as sess:
pass
except RuntimeError as e:
assert 'Session backend did not open a session' in str(e)
else:
assert False, 'Expected runtime error'
def test_session_transactions_keep_context():
app = flask.Flask(__name__)
app.testing = True
app.secret_key = 'testing'
with app.test_client() as c:
rv = c.get('/')
req = flask.request._get_current_object()
assert req is not None
with c.session_transaction():
assert req is flask.request._get_current_object()
def test_session_transaction_needs_cookies():
app = flask.Flask(__name__)
app.testing = True
c = app.test_client(use_cookies=False)
try:
with c.session_transaction() as s:
pass
except RuntimeError as e:
assert 'cookies' in str(e)
else:
assert False, 'Expected runtime error'
def test_test_client_context_binding():
app = flask.Flask(__name__)
app.config['LOGGER_HANDLER_POLICY'] = 'never'
@app.route('/')
def index():
flask.g.value = 42
return 'Hello World!'
@app.route('/other')
def other():
1 // 0
with app.test_client() as c:
resp = c.get('/')
assert flask.g.value == 42
assert resp.data == b'Hello World!'
assert resp.status_code == 200
resp = c.get('/other')
assert not hasattr(flask.g, 'value')
assert b'Internal Server Error' in resp.data
assert resp.status_code == 500
flask.g.value = 23
try:
flask.g.value
except (AttributeError, RuntimeError):
pass
else:
raise AssertionError('some kind of exception expected')
def test_reuse_client():
app = flask.Flask(__name__)
c = app.test_client()
with c:
assert c.get('/').status_code == 404
with c:
assert c.get('/').status_code == 404
def test_test_client_calls_teardown_handlers():
app = flask.Flask(__name__)
called = []
@app.teardown_request
def remember(error):
called.append(error)
with app.test_client() as c:
assert called == []
c.get('/')
assert called == []
assert called == [None]
del called[:]
with app.test_client() as c:
assert called == []
c.get('/')
assert called == []
c.get('/')
assert called == [None]
assert called == [None, None]
def test_full_url_request():
app = flask.Flask(__name__)
app.testing = True
@app.route('/action', methods=['POST'])
def action():
return 'x'
with app.test_client() as c:
rv = c.post('http://domain.com/action?vodka=42', data={'gin': 43})
assert rv.status_code == 200
assert 'gin' in flask.request.form
assert 'vodka' in flask.request.args
def test_subdomain():
app = flask.Flask(__name__)
app.config['SERVER_NAME'] = 'example.com'
@app.route('/', subdomain='<company_id>')
def view(company_id):
return company_id
with app.test_request_context():
url = flask.url_for('view', company_id='xxx')
with app.test_client() as c:
response = c.get(url)
assert 200 == response.status_code
assert b'xxx' == response.data
def test_nosubdomain():
app = flask.Flask(__name__)
app.config['SERVER_NAME'] = 'example.com'
@app.route('/<company_id>')
def view(company_id):
return company_id
with app.test_request_context():
url = flask.url_for('view', company_id='xxx')
with app.test_client() as c:
response = c.get(url)
assert 200 == response.status_code
assert b'xxx' == response.data
|
|
#!/usr/bin/env python3
#
# Sniffles2
# A fast structural variant caller for long-read sequencing data
#
# Created: 18.10.2021
# Author: Moritz Smolka
# Contact: moritz.g.smolka@gmail.com
#
import os
import sys
import datetime
import argparse
from sniffles import util
VERSION="Sniffles2"
BUILD="2.0.2"
SNF_VERSION="S2_rc3"
class ArgFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.RawDescriptionHelpFormatter):
pass
def tobool(v):
if v==True or v==False:
return v
elif v.lower()=="true" or v=="1":
return True
elif v.lower()=="false" or v=="0":
return False
else:
raise argparse.ArgumentTypeError("Boolean value (True | False) required for argument")
def from_cmdline():
header=f"Sniffles2: A fast structural variant (SV) caller for long-read sequencing data\n Version {BUILD}\n Contact: moritz.g.smolka@gmail.com"
example=""" Usage example A - Call SVs for a single sample:
sniffles --input sorted_indexed_alignments.bam --vcf output.vcf
... OR, with CRAM input and bgzipped+tabix indexed VCF output:
sniffles --input sample.cram --vcf output.vcf.gz
... OR, producing only a SNF file with SV candidates for later multi-sample calling:
sniffles --input sample1.bam --snf sample1.snf
... OR, simultaneously producing a single-sample VCF and SNF file for later multi-sample calling:
sniffles --input sample1.bam --vcf sample1.vcf.gz --snf sample1.snf
... OR, with additional options to specify tandem repeat annotations (for improved call accuracy), reference (for DEL sequences) and non-germline mode for detecting rare SVs:
sniffles --input sample1.bam --vcf sample1.vcf.gz --tandem-repeats tandem_repeats.bed --reference genome.fa --non-germline
Usage example B - Multi-sample calling:
Step 1. Create .snf for each sample: sniffles --input sample1.bam --snf sample1.snf
Step 2. Combined calling: sniffles --input sample1.snf sample2.snf ... sampleN.snf --vcf multisample.vcf
... OR, using a .tsv file containing a list of .snf files, and custom sample ids in an optional second column (one sample per line):
Step 2. Combined calling: sniffles --input snf_files_list.tsv --vcf multisample.vcf
Usage example C - Determine genotypes for a set of known SVs (force calling):
sniffles --input sample.bam --genotype-vcf input_known_svs.vcf --vcf output_genotypes.vcf
"""
usage="sniffles --input SORTED_INPUT.bam [--vcf OUTPUT.vcf] [--snf MERGEABLE_OUTPUT.snf] [--threads 4] [--non-germline]\n\n" + header + "\n\n" + example + "\n\n Use --help for full parameter/usage information\n \n"
parser = argparse.ArgumentParser(description="", epilog=example, formatter_class=lambda prog: ArgFormatter(prog,max_help_position=100,width=150), usage=usage)
parser.add_argument("--version", action="version", version=f"Sniffles2, Version {BUILD}")
main_args = parser.add_argument_group("Common parameters")
main_args.add_argument("-i","--input", metavar="IN", type=str, help="For single-sample calling: A coordinate-sorted and indexed .bam/.cram (BAM/CRAM format) file containing aligned reads. - OR - For multi-sample calling: Multiple .snf files (generated before by running Sniffles2 for individual samples with --snf)", required=True, nargs="+")
main_args.add_argument("-v","--vcf", metavar="OUT.vcf", type=str, help="VCF output filename to write the called and refined SVs to. If the given filename ends with .gz, the VCF file will be automatically bgzipped and a .tbi index built for it.", required=False)
main_args.add_argument("--snf", metavar="OUT.snf", type=str, help="Sniffles2 file (.snf) output filename to store candidates for later multi-sample calling", required=False)
main_args.add_argument("--reference", metavar="reference.fasta", type=str, help="(Optional) Reference sequence the reads were aligned against. To enable output of deletion SV sequences, this parameter must be set.", default=None)
main_args.add_argument("--tandem-repeats", metavar="IN.bed", type=str, help="(Optional) Input .bed file containing tandem repeat annotations for the reference genome.", default=None)
main_args.add_argument("--non-germline", help="Call non-germline SVs (rare, somatic or mosaic SVs)", default=False, action="store_true")
main_args.add_argument("--phase", help="Determine phase for SV calls (requires the input alignments to be phased)", default=False, action="store_true")
main_args.add_argument("-t","--threads", metavar="N", type=int, help="Number of parallel threads to use (speed-up for multi-core CPUs)", default=4)
filter_args = parser.add_argument_group("SV Filtering parameters")
filter_args.add_argument("--minsupport", metavar="auto", type=str, help="Minimum number of supporting reads for a SV to be reported (default: automatically choose based on coverage)", default="auto")
filter_args.add_argument("--minsupport-auto-mult", metavar="0.1/0.025", type=float, help="Coverage based minimum support multiplier for germline/non-germline modes (only for auto minsupport) ", default=None)
filter_args.add_argument("--minsvlen", metavar="N", type=int, help="Minimum SV length (in bp)", default=35)
filter_args.add_argument("--minsvlen-screen-ratio", metavar="N", type=float, help="Minimum length for SV candidates (as fraction of --minsvlen)", default=0.95)
filter_args.add_argument("--mapq", metavar="N", type=int, help="Alignments with mapping quality lower than this value will be ignored", default=25)
filter_args.add_argument("--no-qc", help="Output all SV candidates, disregarding quality control steps.", default=False, action="store_true")
filter_args.add_argument("--qc-stdev", help="Apply filtering based on SV start position and length standard deviation", metavar="True", type=tobool, default=True)
filter_args.add_argument("--qc-stdev-abs-max", help="Maximum standard deviation for SV length and size (in bp)", metavar="N", type=int, default=500)
filter_args.add_argument("--qc-strand", help="Apply filtering based on strand support of SV calls", metavar="False", type=tobool, default=False)
filter_args.add_argument("--qc-coverage", help="Minimum surrounding region coverage of SV calls", metavar="N", type=int, default=1)
filter_args.add_argument("--long-ins-length", help="Insertion SVs longer than this value are considered as hard to detect based on the aligner and read length and subjected to more sensitive filtering.", metavar="2500", type=int, default=2500)
filter_args.add_argument("--long-del-length", help="Deletion SVs longer than this value are subjected to central coverage drop-based filtering (Not applicable for --non-germline)", metavar="50000", type=int, default=50000)
filter_args.add_argument("--long-del-coverage", help="Long deletions with central coverage (in relation to upstream/downstream coverage) higher than this value will be filtered (Not applicable for --non-germline)", metavar="0.66", type=float, default=0.66)
filter_args.add_argument("--long-dup-length", help="Duplication SVs longer than this value are subjected to central coverage increase-based filtering (Not applicable for --non-germline)", metavar="50000", type=int, default=50000)
filter_args.add_argument("--long-dup-coverage", help="Long duplications with central coverage (in relation to upstream/downstream coverage) lower than this value will be filtered (Not applicable for --non-germline)", metavar="1.33", type=float, default=1.33)
filter_args.add_argument("--max-splits-kb", metavar="N", type=int, help="Additional number of splits per kilobase read sequence allowed before reads are ignored", default=0.1)
filter_args.add_argument("--max-splits-base", metavar="N", type=int, help="Base number of splits allowed before reads are ignored (in addition to --max-splits-kb)", default=3)
filter_args.add_argument("--min-alignment-length", metavar="N", type=int, help="Reads with alignments shorter than this length (in bp) will be ignored", default=1000)
filter_args.add_argument("--phase-conflict-threshold", metavar="F", type=float, help="Maximum fraction of conflicting reads permitted for SV phase information to be labelled as PASS (only for --phase)", default=0.1)
filter_args.add_argument("--detect-large-ins", help="Infer insertions that are longer than most reads and therefore are spanned by few alignments only.", metavar="True", type=tobool, default=True)
#filter_args.add_argument("--large-ins-threshold", metavar="N", type=int, help="Minimum clipping at read ends to be considered a potential large insertion (only with --detect-large-ins)", default=5000)
cluster_args = parser.add_argument_group("SV Clustering parameters")
cluster_args.add_argument("--cluster-binsize", metavar="N", type=int, help="Initial screening bin size in bp", default=100)
cluster_args.add_argument("--cluster-r", metavar="R", type=float, help="Multiplier for SV start position standard deviation criterion in cluster merging", default=2.5)
cluster_args.add_argument("--cluster-repeat-h", metavar="H", type=float, help="Multiplier for mean SV length criterion for tandem repeat cluster merging", default=1.5)
cluster_args.add_argument("--cluster-repeat-h-max", metavar="N", type=float, help="Max. merging distance based on SV length criterion for tandem repeat cluster merging", default=1000)
cluster_args.add_argument("--cluster-merge-pos", metavar="N", type=int, help="Max. merging distance for insertions and deletions on the same read and cluster in non-repeat regions", default=150)
cluster_args.add_argument("--cluster-merge-len", metavar="F", type=float, help="Max. size difference for merging SVs as fraction of SV length", default=0.33)
cluster_args.add_argument("--cluster-merge-bnd", metavar="N", type=int, help="Max. merging distance for breakend SV candidates.", default=1500)
genotype_args = parser.add_argument_group("SV Genotyping parameters")
genotype_args.add_argument("--genotype-ploidy", metavar="N", type=int, help="Sample ploidy (currently fixed at value 2)", default=2)
genotype_args.add_argument("--genotype-error", metavar="N", type=float, help="Estimated false positve rate for leads (relating to total coverage)", default=0.05)
genotype_args.add_argument("--sample-id", type=str, help="Custom ID for this sample, used for later multi-sample calling (stored in .snf)", default=None)
genotype_args.add_argument("--genotype-vcf", metavar="IN.vcf", type=str, help="Determine the genotypes for all SVs in the given input .vcf file (forced calling). Re-genotyped .vcf will be written to the output file specified with --vcf.", default=None)
multi_args = parser.add_argument_group("Multi-Sample Calling / Combine parameters")
multi_args.add_argument("--combine-high-confidence", metavar="F", type=float, help="Minimum fraction of samples in which a SV needs to have individually passed QC for it to be reported in combined output (a value of zero will report all SVs that pass QC in at least one of the input samples)", default=0.0)
multi_args.add_argument("--combine-low-confidence", metavar="F", type=float, help="Minimum fraction of samples in which a SV needs to be present (failed QC) for it to be reported in combined output", default=0.2)
multi_args.add_argument("--combine-low-confidence-abs", metavar="N", type=int, help="Minimum absolute number of samples in which a SV needs to be present (failed QC) for it to be reported in combined output", default=3)
multi_args.add_argument("--combine-null-min-coverage", metavar="N", type=int, help="Minimum coverage for a sample genotype to be reported as 0/0 (sample genotypes with coverage below this threshold at the SV location will be output as ./.)", default=5)
multi_args.add_argument("--combine-match", metavar="N", type=int, help="Maximum deviation of multiple SV's start/end position for them to be combined across samples. Given by max_dev=M*sqrt(min(SV_length_a,SV_length_b)), where M is this parameter.", default=500)
multi_args.add_argument("--combine-consensus", help="Output the consensus genotype of all samples", default=False, action="store_true")
multi_args.add_argument("--combine-separate-intra", help="Disable combination of SVs within the same sample", default=False, action="store_true")
multi_args.add_argument("--combine-output-filtered", help="Include low-confidence / putative non-germline SVs in multi-calling", default=False, action="store_true")
#multi_args.add_argument("--combine-exhaustive", help="(DEV) Disable performance optimization in multi-calling", default=False, action="store_true")
#multi_args.add_argument("--combine-relabel-rare", help="(DEV)", default=False, action="store_true")
#multi_args.add_argument("--combine-with-missing", help="(DEV)", default=False, action="store_true")
postprocess_args = parser.add_argument_group("SV Postprocessing, QC and output parameters")
postprocess_args.add_argument("--output-rnames", help="Output names of all supporting reads for each SV in the RNAMEs info field", default=False, action="store_true")
postprocess_args.add_argument("--no-consensus", help="Disable consensus sequence generation for insertion SV calls (may improve performance)", default=False, action="store_true")
postprocess_args.add_argument("--no-sort", help="Do not sort output VCF by genomic coordinates (may slightly improve performance)", default=False, action="store_true")
postprocess_args.add_argument("--no-progress", help="Disable progress display", default=False, action="store_true")
postprocess_args.add_argument("--quiet", help="Disable all logging, except errors", default=False, action="store_true")
postprocess_args.add_argument("--max-del-seq-len", metavar="N", type=int, help="Maximum deletion sequence length to be output. Deletion SVs longer than this value will be written to the output as symbolic SVs.", default=50000)
postprocess_args.add_argument("--symbolic", help="Output all SVs as symbolic, including insertions and deletions, instead of reporting nucleotide sequences.", default=False, action="store_true")
developer_args = parser.add_argument_group("Developer parameters")
developer_args.add_argument("--dev-cache", default=False, action="store_true", help=argparse.SUPPRESS)
developer_args.add_argument("--dev-cache-dir", metavar="PATH", type=str, default=None, help=argparse.SUPPRESS)
developer_args.add_argument("--dev-debug-svtyping", default=False, action="store_true", help=argparse.SUPPRESS)
developer_args.add_argument("--dev-keep-lowqual-splits", default=False, action="store_true", help=argparse.SUPPRESS)
developer_args.add_argument("--dev-call-region", metavar="REGION", type=str, default=None, help=argparse.SUPPRESS)
developer_args.add_argument("--dev-dump-clusters", default=False, action="store_true", help=argparse.SUPPRESS)
developer_args.add_argument("--dev-merge-inline", default=False, action="store_true", help=argparse.SUPPRESS)
developer_args.add_argument("--dev-seq-cache-maxlen", metavar="N", type=int, default=50000, help=argparse.SUPPRESS)
developer_args.add_argument("--consensus-max-reads", metavar="N", type=int, default=20, help=argparse.SUPPRESS)
developer_args.add_argument("--consensus-max-reads-bin", metavar="N", type=int, default=10, help=argparse.SUPPRESS)
developer_args.add_argument("--dev-dump-coverage", default=False, action="store_true", help=argparse.SUPPRESS)
developer_args.add_argument("--dev-no-resplit", default=False, action="store_true", help=argparse.SUPPRESS)
developer_args.add_argument("--dev-skip-snf-validation", default=False, action="store_true", help=argparse.SUPPRESS)
developer_args.add_argument("--low-memory", default=False, action="store_true", help=argparse.SUPPRESS)
developer_args.add_argument("--repeat", default=False, action="store_true", help=argparse.SUPPRESS)
developer_args.add_argument("--qc-nm", default=False, action="store_true", help=argparse.SUPPRESS)
developer_args.add_argument("--qc-nm-max", metavar="F", type=float, default=0.2, help=argparse.SUPPRESS)
#developer_args.add_argument("--qc-strand", help="(DEV)", default=False, action="store_true")
config=parser.parse_args()
if config.quiet:
sys.stdout=open(os.devnull,"w")
config.start_date=datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S")
config.sort=not config.no_sort
#if config.low_memory:
# config.task_count_multiplier=64
#else:
# config.task_count_multiplier=1
config.task_count_multiplier=0
config.version=VERSION
config.build=BUILD
config.snf_format_version=SNF_VERSION
config.command=" ".join(sys.argv)
if config.dev_call_region != None:
region_contig,region_startend=config.dev_call_region.replace(",","").split(":")
start,end=region_startend.split("-")
config.dev_call_region=dict(contig=region_contig,start=int(start),end=int(end))
#"--minsvlen" parameter is for final output filtering
#for intermediate steps, a lower threshold is used to account for sequencing, mapping imprecision
config.minsvlen_screen=int(config.minsvlen_screen_ratio*config.minsvlen)
#config.minsupport_screen=max(1,int(0.333*config.minsupport*(config.cluster_binsize/100.0)))
if config.minsupport!="auto":
config.minsupport=int(config.minsupport)
#--minsupport auto defaults
config.minsupport_auto_base=1.5
config.minsupport_auto_regional_coverage_weight=0.75
if config.minsupport_auto_mult==None:
if config.non_germline:
config.minsupport_auto_mult=0.025
else:
config.minsupport_auto_mult=0.1
if config.non_germline:
config.qc_nm=True
config.coverage_binsize=config.cluster_binsize
config.coverage_binsize_combine=config.cluster_binsize*5
config.coverage_updown_bins=5
config.coverage_shift_bins=3
#INS Consensus parameters
#config.consensus_max_reads=20
#config.consensus_max_reads_bin=10
config.consensus_min_reads=4
config.consensus_kmer_len=6
config.consensus_kmer_skip_base=3
config.consensus_kmer_skip_seqlen_mult=1.0/500.0
config.consensus_low_threshold=0.0 #0.15
#Large INS
config.long_ins_rescale_base=1.66
config.long_ins_rescale_mult=0.33
#BND
config.bnd_cluster_length=1000
config.bnd_cluster_resplit=0
#Genotyping
config.genotype_format="GT:GQ:DR:DV"
config.genotype_none=(".",".",0,0,0)
config.genotype_null=(0,0,0,0,0)
config.genotype_min_z_score=5
if config.genotype_ploidy!=2:
util.fatal_error("Currently only --genotype-ploidy 2 is supported")
#SNF
config.snf_block_size=10**5
config.snf_combine_keep_open=True #Keep file handles open during .snf combining (might be an issue if the number of .snf files to merge is very large)
#Combine
config.combine_exhaustive=False
config.combine_relabel_rare=False
config.combine_overlap_abs=2500
config.combine_min_size=100
#Misc
config.precise=25 #Max. sum of pos and length stdev for SVs to be labelled PRECISE
config.resplit_binsize=20
config.tandem_repeat_region_pad=500
config.id_prefix="Sniffles2."
config.dev_profile=False
config.workdir=os.getcwd()
return config
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import copy
import mock
from oslo_utils import units
from nova.compute import arch
from nova.compute import claims
from nova.compute import hv_type
from nova.compute import power_state
from nova.compute import resource_tracker
from nova.compute import task_states
from nova.compute import vm_mode
from nova.compute import vm_states
from nova import exception as exc
from nova import objects
from nova.objects import base as obj_base
from nova import test
_VIRT_DRIVER_AVAIL_RESOURCES = {
'vcpus': 4,
'memory_mb': 512,
'local_gb': 6,
'vcpus_used': 0,
'memory_mb_used': 0,
'local_gb_used': 0,
'hypervisor_type': 'fake',
'hypervisor_version': 0,
'hypervisor_hostname': 'fakehost',
'cpu_info': '',
'numa_topology': None,
}
_COMPUTE_NODE_FIXTURES = [
objects.ComputeNode(
id=1,
host='fake-host',
vcpus=_VIRT_DRIVER_AVAIL_RESOURCES['vcpus'],
memory_mb=_VIRT_DRIVER_AVAIL_RESOURCES['memory_mb'],
local_gb=_VIRT_DRIVER_AVAIL_RESOURCES['local_gb'],
vcpus_used=_VIRT_DRIVER_AVAIL_RESOURCES['vcpus_used'],
memory_mb_used=_VIRT_DRIVER_AVAIL_RESOURCES['memory_mb_used'],
local_gb_used=_VIRT_DRIVER_AVAIL_RESOURCES['local_gb_used'],
hypervisor_type='fake',
hypervisor_version=0,
hypervisor_hostname='fake-host',
free_ram_mb=(_VIRT_DRIVER_AVAIL_RESOURCES['memory_mb'] -
_VIRT_DRIVER_AVAIL_RESOURCES['memory_mb_used']),
free_disk_gb=(_VIRT_DRIVER_AVAIL_RESOURCES['local_gb'] -
_VIRT_DRIVER_AVAIL_RESOURCES['local_gb_used']),
current_workload=0,
running_vms=0,
cpu_info='{}',
disk_available_least=0,
host_ip='1.1.1.1',
supported_hv_specs=[
objects.HVSpec.from_list([arch.I686, hv_type.KVM, vm_mode.HVM])
],
metrics=None,
pci_device_pools=None,
extra_resources=None,
stats={},
numa_topology=None,
cpu_allocation_ratio=16.0,
ram_allocation_ratio=1.5,
),
]
_INSTANCE_TYPE_FIXTURES = {
1: {
'id': 1,
'flavorid': 'fakeid-1',
'name': 'fake1.small',
'memory_mb': 128,
'vcpus': 1,
'root_gb': 1,
'ephemeral_gb': 0,
'swap': 0,
'rxtx_factor': 0,
'vcpu_weight': 1,
'extra_specs': {},
},
2: {
'id': 2,
'flavorid': 'fakeid-2',
'name': 'fake1.medium',
'memory_mb': 256,
'vcpus': 2,
'root_gb': 5,
'ephemeral_gb': 0,
'swap': 0,
'rxtx_factor': 0,
'vcpu_weight': 1,
'extra_specs': {},
},
}
_INSTANCE_TYPE_OBJ_FIXTURES = {
1: objects.Flavor(id=1, flavorid='fakeid-1', name='fake1.small',
memory_mb=128, vcpus=1, root_gb=1,
ephemeral_gb=0, swap=0, rxtx_factor=0,
vcpu_weight=1, extra_specs={}),
2: objects.Flavor(id=2, flavorid='fakeid-2', name='fake1.medium',
memory_mb=256, vcpus=2, root_gb=5,
ephemeral_gb=0, swap=0, rxtx_factor=0,
vcpu_weight=1, extra_specs={}),
}
_2MB = 2 * units.Mi / units.Ki
_INSTANCE_NUMA_TOPOLOGIES = {
'2mb': objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([1]), memory=_2MB, pagesize=0),
objects.InstanceNUMACell(
id=1, cpuset=set([3]), memory=_2MB, pagesize=0)]),
}
_NUMA_LIMIT_TOPOLOGIES = {
'2mb': objects.NUMATopologyLimits(id=0,
cpu_allocation_ratio=1.0,
ram_allocation_ratio=1.0),
}
_NUMA_PAGE_TOPOLOGIES = {
'2kb*8': objects.NUMAPagesTopology(size_kb=2, total=8, used=0)
}
_NUMA_HOST_TOPOLOGIES = {
'2mb': objects.NUMATopology(cells=[
objects.NUMACell(id=0, cpuset=set([1, 2]), memory=_2MB,
cpu_usage=0, memory_usage=0,
mempages=[_NUMA_PAGE_TOPOLOGIES['2kb*8']],
siblings=[], pinned_cpus=set([])),
objects.NUMACell(id=1, cpuset=set([3, 4]), memory=_2MB,
cpu_usage=0, memory_usage=0,
mempages=[_NUMA_PAGE_TOPOLOGIES['2kb*8']],
siblings=[], pinned_cpus=set([]))]),
}
_INSTANCE_FIXTURES = [
objects.Instance(
id=1,
host=None, # prevent RT trying to lazy-load this
node=None,
uuid='c17741a5-6f3d-44a8-ade8-773dc8c29124',
memory_mb=_INSTANCE_TYPE_FIXTURES[1]['memory_mb'],
vcpus=_INSTANCE_TYPE_FIXTURES[1]['vcpus'],
root_gb=_INSTANCE_TYPE_FIXTURES[1]['root_gb'],
ephemeral_gb=_INSTANCE_TYPE_FIXTURES[1]['ephemeral_gb'],
numa_topology=_INSTANCE_NUMA_TOPOLOGIES['2mb'],
instance_type_id=1,
vm_state=vm_states.ACTIVE,
power_state=power_state.RUNNING,
task_state=None,
os_type='fake-os', # Used by the stats collector.
project_id='fake-project', # Used by the stats collector.
flavor = _INSTANCE_TYPE_OBJ_FIXTURES[1],
old_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[1],
new_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[1],
),
objects.Instance(
id=2,
host=None,
node=None,
uuid='33805b54-dea6-47b8-acb2-22aeb1b57919',
memory_mb=_INSTANCE_TYPE_FIXTURES[2]['memory_mb'],
vcpus=_INSTANCE_TYPE_FIXTURES[2]['vcpus'],
root_gb=_INSTANCE_TYPE_FIXTURES[2]['root_gb'],
ephemeral_gb=_INSTANCE_TYPE_FIXTURES[2]['ephemeral_gb'],
numa_topology=None,
instance_type_id=2,
vm_state=vm_states.DELETED,
power_state=power_state.SHUTDOWN,
task_state=None,
os_type='fake-os',
project_id='fake-project-2',
flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2],
old_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2],
new_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2],
),
]
_MIGRATION_FIXTURES = {
# A migration that has only this compute node as the source host
'source-only': objects.Migration(
id=1,
instance_uuid='f15ecfb0-9bf6-42db-9837-706eb2c4bf08',
source_compute='fake-host',
dest_compute='other-host',
source_node='fake-node',
dest_node='other-node',
old_instance_type_id=1,
new_instance_type_id=2,
status='migrating'
),
# A migration that has only this compute node as the dest host
'dest-only': objects.Migration(
id=2,
instance_uuid='f6ed631a-8645-4b12-8e1e-2fff55795765',
source_compute='other-host',
dest_compute='fake-host',
source_node='other-node',
dest_node='fake-node',
old_instance_type_id=1,
new_instance_type_id=2,
status='migrating'
),
# A migration that has this compute node as both the source and dest host
'source-and-dest': objects.Migration(
id=3,
instance_uuid='f4f0bfea-fe7e-4264-b598-01cb13ef1997',
source_compute='fake-host',
dest_compute='fake-host',
source_node='fake-node',
dest_node='fake-node',
old_instance_type_id=1,
new_instance_type_id=2,
status='migrating'
),
}
_MIGRATION_INSTANCE_FIXTURES = {
# source-only
'f15ecfb0-9bf6-42db-9837-706eb2c4bf08': objects.Instance(
id=101,
host=None, # prevent RT trying to lazy-load this
node=None,
uuid='f15ecfb0-9bf6-42db-9837-706eb2c4bf08',
memory_mb=_INSTANCE_TYPE_FIXTURES[1]['memory_mb'],
vcpus=_INSTANCE_TYPE_FIXTURES[1]['vcpus'],
root_gb=_INSTANCE_TYPE_FIXTURES[1]['root_gb'],
ephemeral_gb=_INSTANCE_TYPE_FIXTURES[1]['ephemeral_gb'],
numa_topology=_INSTANCE_NUMA_TOPOLOGIES['2mb'],
instance_type_id=1,
vm_state=vm_states.ACTIVE,
power_state=power_state.RUNNING,
task_state=task_states.RESIZE_MIGRATING,
system_metadata={},
os_type='fake-os',
project_id='fake-project',
flavor=_INSTANCE_TYPE_OBJ_FIXTURES[1],
old_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[1],
new_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2],
),
# dest-only
'f6ed631a-8645-4b12-8e1e-2fff55795765': objects.Instance(
id=102,
host=None, # prevent RT trying to lazy-load this
node=None,
uuid='f6ed631a-8645-4b12-8e1e-2fff55795765',
memory_mb=_INSTANCE_TYPE_FIXTURES[2]['memory_mb'],
vcpus=_INSTANCE_TYPE_FIXTURES[2]['vcpus'],
root_gb=_INSTANCE_TYPE_FIXTURES[2]['root_gb'],
ephemeral_gb=_INSTANCE_TYPE_FIXTURES[2]['ephemeral_gb'],
numa_topology=None,
instance_type_id=2,
vm_state=vm_states.ACTIVE,
power_state=power_state.RUNNING,
task_state=task_states.RESIZE_MIGRATING,
system_metadata={},
os_type='fake-os',
project_id='fake-project',
flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2],
old_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[1],
new_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2],
),
# source-and-dest
'f4f0bfea-fe7e-4264-b598-01cb13ef1997': objects.Instance(
id=3,
host=None, # prevent RT trying to lazy-load this
node=None,
uuid='f4f0bfea-fe7e-4264-b598-01cb13ef1997',
memory_mb=_INSTANCE_TYPE_FIXTURES[2]['memory_mb'],
vcpus=_INSTANCE_TYPE_FIXTURES[2]['vcpus'],
root_gb=_INSTANCE_TYPE_FIXTURES[2]['root_gb'],
ephemeral_gb=_INSTANCE_TYPE_FIXTURES[2]['ephemeral_gb'],
numa_topology=None,
instance_type_id=2,
vm_state=vm_states.ACTIVE,
power_state=power_state.RUNNING,
task_state=task_states.RESIZE_MIGRATING,
system_metadata={},
os_type='fake-os',
project_id='fake-project',
flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2],
old_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[1],
new_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2],
),
}
_MIGRATION_CONTEXT_FIXTURES = {
'f4f0bfea-fe7e-4264-b598-01cb13ef1997': objects.MigrationContext(
instance_uuid='f4f0bfea-fe7e-4264-b598-01cb13ef1997',
migration_id=3,
new_numa_topology=None,
old_numa_topology=None),
'c17741a5-6f3d-44a8-ade8-773dc8c29124': objects.MigrationContext(
instance_uuid='c17741a5-6f3d-44a8-ade8-773dc8c29124',
migration_id=3,
new_numa_topology=None,
old_numa_topology=None),
'f15ecfb0-9bf6-42db-9837-706eb2c4bf08': objects.MigrationContext(
instance_uuid='f15ecfb0-9bf6-42db-9837-706eb2c4bf08',
migration_id=1,
new_numa_topology=None,
old_numa_topology=_INSTANCE_NUMA_TOPOLOGIES['2mb']),
'f6ed631a-8645-4b12-8e1e-2fff55795765': objects.MigrationContext(
instance_uuid='f6ed631a-8645-4b12-8e1e-2fff55795765',
migration_id=2,
new_numa_topology=_INSTANCE_NUMA_TOPOLOGIES['2mb'],
old_numa_topology=None),
}
def overhead_zero(instance):
# Emulate that the driver does not adjust the memory
# of the instance...
return {
'memory_mb': 0
}
def setup_rt(hostname, nodename, virt_resources=_VIRT_DRIVER_AVAIL_RESOURCES,
estimate_overhead=overhead_zero):
"""Sets up the resource tracker instance with mock fixtures.
:param virt_resources: Optional override of the resource representation
returned by the virt driver's
`get_available_resource()` method.
:param estimate_overhead: Optional override of a function that should
return overhead of memory given an instance
object. Defaults to returning zero overhead.
"""
sched_client_mock = mock.MagicMock()
notifier_mock = mock.MagicMock()
vd = mock.MagicMock()
# Make sure we don't change any global fixtures during tests
virt_resources = copy.deepcopy(virt_resources)
vd.get_available_resource.return_value = virt_resources
vd.estimate_instance_overhead.side_effect = estimate_overhead
with contextlib.nested(
mock.patch('nova.scheduler.client.SchedulerClient',
return_value=sched_client_mock),
mock.patch('nova.rpc.get_notifier', return_value=notifier_mock)):
rt = resource_tracker.ResourceTracker(hostname, vd, nodename)
return (rt, sched_client_mock, vd)
class BaseTestCase(test.NoDBTestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
self.rt = None
self.flags(my_ip='1.1.1.1')
def _setup_rt(self, virt_resources=_VIRT_DRIVER_AVAIL_RESOURCES,
estimate_overhead=overhead_zero):
(self.rt, self.sched_client_mock,
self.driver_mock) = setup_rt(
'fake-host', 'fake-node', virt_resources, estimate_overhead)
class TestUpdateAvailableResources(BaseTestCase):
def _update_available_resources(self):
# We test RT._update separately, since the complexity
# of the update_available_resource() function is high enough as
# it is, we just want to focus here on testing the resources
# parameter that update_available_resource() eventually passes
# to _update().
with mock.patch.object(self.rt, '_update') as update_mock:
self.rt.update_available_resource(mock.sentinel.ctx)
return update_mock
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_no_instances_no_migrations_no_reserved(self, get_mock, migr_mock,
get_cn_mock):
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
self._setup_rt()
get_mock.return_value = []
migr_mock.return_value = []
get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0]
update_mock = self._update_available_resources()
vd = self.driver_mock
vd.get_available_resource.assert_called_once_with('fake-node')
get_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node',
expected_attrs=[
'system_metadata',
'numa_topology'])
get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node')
migr_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node')
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
expected_resources.update({
# host is added in update_available_resources()
# before calling _update()
'host': 'fake-host',
'host_ip': '1.1.1.1',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
'hypervisor_hostname': 'fakehost',
'free_disk_gb': 6,
'hypervisor_version': 0,
'local_gb': 6,
'free_ram_mb': 512,
'memory_mb_used': 0,
'pci_device_pools': objects.PciDevicePoolList(),
'vcpus_used': 0,
'hypervisor_type': 'fake',
'local_gb_used': 0,
'memory_mb': 512,
'current_workload': 0,
'vcpus': 4,
'running_vms': 0
})
update_mock.assert_called_once_with(mock.sentinel.ctx)
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
self.rt.compute_node))
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_no_instances_no_migrations_reserved_disk_and_ram(
self, get_mock, migr_mock, get_cn_mock):
self.flags(reserved_host_disk_mb=1024,
reserved_host_memory_mb=512)
self._setup_rt()
get_mock.return_value = []
migr_mock.return_value = []
get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0]
update_mock = self._update_available_resources()
get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node')
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
expected_resources.update({
# host is added in update_available_resources()
# before calling _update()
'host': 'fake-host',
'host_ip': '1.1.1.1',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
'hypervisor_hostname': 'fakehost',
'free_disk_gb': 5, # 6GB avail - 1 GB reserved
'hypervisor_version': 0,
'local_gb': 6,
'free_ram_mb': 0, # 512MB avail - 512MB reserved
'memory_mb_used': 512, # 0MB used + 512MB reserved
'pci_device_pools': objects.PciDevicePoolList(),
'vcpus_used': 0,
'hypervisor_type': 'fake',
'local_gb_used': 1, # 0GB used + 1 GB reserved
'memory_mb': 512,
'current_workload': 0,
'vcpus': 4,
'running_vms': 0
})
update_mock.assert_called_once_with(mock.sentinel.ctx)
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
self.rt.compute_node))
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_some_instances_no_migrations(self, get_mock, migr_mock,
get_cn_mock):
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
self._setup_rt()
get_mock.return_value = _INSTANCE_FIXTURES
migr_mock.return_value = []
get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0]
update_mock = self._update_available_resources()
get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node')
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
expected_resources.update({
# host is added in update_available_resources()
# before calling _update()
'host': 'fake-host',
'host_ip': '1.1.1.1',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
'hypervisor_hostname': 'fakehost',
'free_disk_gb': 5, # 6 - 1 used
'hypervisor_version': 0,
'local_gb': 6,
'free_ram_mb': 384, # 512 - 128 used
'memory_mb_used': 128,
'pci_device_pools': objects.PciDevicePoolList(),
# NOTE(jaypipes): Due to the design of the ERT, which now is used
# track VCPUs, the actual used VCPUs isn't
# "written" to the resources dictionary that is
# passed to _update() like all the other
# resources are. Instead, _update()
# calls the ERT's write_resources() method, which
# then queries each resource handler plugin for the
# changes in its resource usage and the plugin
# writes changes to the supplied "values" dict. For
# this reason, all other resources except VCPUs
# are accurate here. :(
'vcpus_used': 0,
'hypervisor_type': 'fake',
'local_gb_used': 1,
'memory_mb': 512,
'current_workload': 0,
'vcpus': 4,
'running_vms': 1 # One active instance
})
update_mock.assert_called_once_with(mock.sentinel.ctx)
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
self.rt.compute_node))
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_orphaned_instances_no_migrations(self, get_mock, migr_mock,
get_cn_mock):
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
self._setup_rt()
get_mock.return_value = []
migr_mock.return_value = []
get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0]
# Orphaned instances are those that the virt driver has on
# record as consuming resources on the compute node, but the
# Nova database has no record of the instance being active
# on the host. For some reason, the resource tracker only
# considers orphaned instance's memory usage in its calculations
# of free resources...
orphaned_usages = {
'71ed7ef6-9d2e-4c65-9f4e-90bb6b76261d': {
# Yes, the return result format of get_per_instance_usage
# is indeed this stupid and redundant. Also note that the
# libvirt driver just returns an empty dict always for this
# method and so who the heck knows whether this stuff
# actually works.
'uuid': '71ed7ef6-9d2e-4c65-9f4e-90bb6b76261d',
'memory_mb': 64
}
}
vd = self.driver_mock
vd.get_per_instance_usage.return_value = orphaned_usages
update_mock = self._update_available_resources()
get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node')
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
expected_resources.update({
# host is added in update_available_resources()
# before calling _update()
'host': 'fake-host',
'host_ip': '1.1.1.1',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
'hypervisor_hostname': 'fakehost',
'free_disk_gb': 6,
'hypervisor_version': 0,
'local_gb': 6,
'free_ram_mb': 448, # 512 - 64 orphaned usage
'memory_mb_used': 64,
'pci_device_pools': objects.PciDevicePoolList(),
'vcpus_used': 0,
'hypervisor_type': 'fake',
'local_gb_used': 0,
'memory_mb': 512,
'current_workload': 0,
'vcpus': 4,
# Yep, for some reason, orphaned instances are not counted
# as running VMs...
'running_vms': 0
})
update_mock.assert_called_once_with(mock.sentinel.ctx)
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
self.rt.compute_node))
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_no_instances_source_migration(self, get_mock, get_inst_mock,
migr_mock, get_cn_mock):
# We test the behavior of update_available_resource() when
# there is an active migration that involves this compute node
# as the source host not the destination host, and the resource
# tracker does not have any instances assigned to it. This is
# the case when a migration from this compute host to another
# has been completed, but the user has not confirmed the resize
# yet, so the resource tracker must continue to keep the resources
# for the original instance type available on the source compute
# node in case of a revert of the resize.
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
self._setup_rt()
get_mock.return_value = []
migr_obj = _MIGRATION_FIXTURES['source-only']
migr_mock.return_value = [migr_obj]
get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0]
# Migration.instance property is accessed in the migration
# processing code, and this property calls
# objects.Instance.get_by_uuid, so we have the migration return
inst_uuid = migr_obj.instance_uuid
instance = _MIGRATION_INSTANCE_FIXTURES[inst_uuid].obj_clone()
get_inst_mock.return_value = instance
instance.migration_context = _MIGRATION_CONTEXT_FIXTURES[inst_uuid]
update_mock = self._update_available_resources()
get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node')
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
expected_resources.update({
# host is added in update_available_resources()
# before calling _update()
'host': 'fake-host',
'host_ip': '1.1.1.1',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
'hypervisor_hostname': 'fakehost',
'free_disk_gb': 5,
'hypervisor_version': 0,
'local_gb': 6,
'free_ram_mb': 384, # 512 total - 128 for possible revert of orig
'memory_mb_used': 128, # 128 possible revert amount
'pci_device_pools': objects.PciDevicePoolList(),
'vcpus_used': 0,
'hypervisor_type': 'fake',
'local_gb_used': 1,
'memory_mb': 512,
'current_workload': 0,
'vcpus': 4,
'running_vms': 0
})
update_mock.assert_called_once_with(mock.sentinel.ctx)
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
self.rt.compute_node))
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_no_instances_dest_migration(self, get_mock, get_inst_mock,
migr_mock, get_cn_mock):
# We test the behavior of update_available_resource() when
# there is an active migration that involves this compute node
# as the destination host not the source host, and the resource
# tracker does not yet have any instances assigned to it. This is
# the case when a migration to this compute host from another host
# is in progress, but the user has not confirmed the resize
# yet, so the resource tracker must reserve the resources
# for the possibly-to-be-confirmed instance's instance type
# node in case of a confirm of the resize.
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
self._setup_rt()
get_mock.return_value = []
migr_obj = _MIGRATION_FIXTURES['dest-only']
migr_mock.return_value = [migr_obj]
inst_uuid = migr_obj.instance_uuid
instance = _MIGRATION_INSTANCE_FIXTURES[inst_uuid].obj_clone()
get_inst_mock.return_value = instance
get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0]
instance.migration_context = _MIGRATION_CONTEXT_FIXTURES[inst_uuid]
update_mock = self._update_available_resources()
get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node')
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
expected_resources.update({
# host is added in update_available_resources()
# before calling _update()
'host': 'fake-host',
'host_ip': '1.1.1.1',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
'hypervisor_hostname': 'fakehost',
'free_disk_gb': 1,
'hypervisor_version': 0,
'local_gb': 6,
'free_ram_mb': 256, # 512 total - 256 for possible confirm of new
'memory_mb_used': 256, # 256 possible confirmed amount
'pci_device_pools': objects.PciDevicePoolList(),
'vcpus_used': 0, # See NOTE(jaypipes) above about why this is 0
'hypervisor_type': 'fake',
'local_gb_used': 5,
'memory_mb': 512,
'current_workload': 0,
'vcpus': 4,
'running_vms': 0
})
update_mock.assert_called_once_with(mock.sentinel.ctx)
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
self.rt.compute_node))
@mock.patch('nova.objects.MigrationContext.get_by_instance_uuid',
return_value=None)
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_some_instances_source_and_dest_migration(self, get_mock,
get_inst_mock, migr_mock,
get_cn_mock,
get_mig_ctxt_mock):
# We test the behavior of update_available_resource() when
# there is an active migration that involves this compute node
# as the destination host AND the source host, and the resource
# tracker has a few instances assigned to it, including the
# instance that is resizing to this same compute node. The tracking
# of resource amounts takes into account both the old and new
# resize instance types as taking up space on the node.
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
self._setup_rt()
migr_obj = _MIGRATION_FIXTURES['source-and-dest']
migr_mock.return_value = [migr_obj]
inst_uuid = migr_obj.instance_uuid
# The resizing instance has already had its instance type
# changed to the *new* instance type (the bigger one, instance type 2)
resizing_instance = _MIGRATION_INSTANCE_FIXTURES[inst_uuid].obj_clone()
resizing_instance.migration_context = (
_MIGRATION_CONTEXT_FIXTURES[resizing_instance.uuid])
all_instances = _INSTANCE_FIXTURES + [resizing_instance]
get_mock.return_value = all_instances
get_inst_mock.return_value = resizing_instance
get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0]
update_mock = self._update_available_resources()
get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node')
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
expected_resources.update({
# host is added in update_available_resources()
# before calling _update()
'host': 'fake-host',
'host_ip': '1.1.1.1',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
'hypervisor_hostname': 'fakehost',
# 6 total - 1G existing - 5G new flav - 1G old flav
'free_disk_gb': -1,
'hypervisor_version': 0,
'local_gb': 6,
# 512 total - 128 existing - 256 new flav - 128 old flav
'free_ram_mb': 0,
'memory_mb_used': 512, # 128 exist + 256 new flav + 128 old flav
'pci_device_pools': objects.PciDevicePoolList(),
# See NOTE(jaypipes) above for reason why this isn't accurate until
# _update() is called.
'vcpus_used': 0,
'hypervisor_type': 'fake',
'local_gb_used': 7, # 1G existing, 5G new flav + 1 old flav
'memory_mb': 512,
'current_workload': 1, # One migrating instance...
'vcpus': 4,
'running_vms': 2
})
update_mock.assert_called_once_with(mock.sentinel.ctx)
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
self.rt.compute_node))
class TestInitComputeNode(BaseTestCase):
@mock.patch('nova.objects.ComputeNode.create')
@mock.patch('nova.objects.Service.get_by_compute_host')
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
def test_no_op_init_compute_node(self, get_mock, service_mock,
create_mock):
self._setup_rt()
resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES)
compute_node = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
self.rt.compute_node = compute_node
self.rt._init_compute_node(mock.sentinel.ctx, resources)
self.assertFalse(service_mock.called)
self.assertFalse(get_mock.called)
self.assertFalse(create_mock.called)
self.assertFalse(self.rt.disabled)
@mock.patch('nova.objects.ComputeNode.create')
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
def test_compute_node_loaded(self, get_mock, create_mock):
self._setup_rt()
def fake_get_node(_ctx, host, node):
res = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
return res
get_mock.side_effect = fake_get_node
resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES)
self.rt._init_compute_node(mock.sentinel.ctx, resources)
get_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node')
self.assertFalse(create_mock.called)
self.assertFalse(self.rt.disabled)
@mock.patch('nova.objects.ComputeNode.create')
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
def test_compute_node_created_on_empty(self, get_mock, create_mock):
self._setup_rt()
get_mock.side_effect = exc.NotFound
cpu_alloc_ratio = 1.0
ram_alloc_ratio = 1.0
resources = {
'host_ip': '1.1.1.1',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
'hypervisor_hostname': 'fakehost',
'free_disk_gb': 6,
'hypervisor_version': 0,
'local_gb': 6,
'free_ram_mb': 512,
'memory_mb_used': 0,
'pci_device_pools': [],
'vcpus_used': 0,
'hypervisor_type': 'fake',
'local_gb_used': 0,
'memory_mb': 512,
'current_workload': 0,
'vcpus': 4,
'running_vms': 0,
'pci_passthrough_devices': '[]'
}
# The expected compute represents the initial values used
# when creating a compute node.
expected_compute = objects.ComputeNode(
host_ip=resources['host_ip'],
vcpus=resources['vcpus'],
memory_mb=resources['memory_mb'],
local_gb=resources['local_gb'],
cpu_info=resources['cpu_info'],
vcpus_used=resources['vcpus_used'],
memory_mb_used=resources['memory_mb_used'],
local_gb_used=resources['local_gb_used'],
numa_topology=resources['numa_topology'],
hypervisor_type=resources['hypervisor_type'],
hypervisor_version=resources['hypervisor_version'],
hypervisor_hostname=resources['hypervisor_hostname'],
# NOTE(sbauza): ResourceTracker adds host field
host='fake-host',
# NOTE(sbauza): ResourceTracker adds CONF allocation ratios
ram_allocation_ratio=ram_alloc_ratio,
cpu_allocation_ratio=cpu_alloc_ratio,
)
# Forcing the flags to the values we know
self.rt.ram_allocation_ratio = ram_alloc_ratio
self.rt.cpu_allocation_ratio = cpu_alloc_ratio
self.rt._init_compute_node(mock.sentinel.ctx, resources)
self.assertFalse(self.rt.disabled)
get_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node')
create_mock.assert_called_once_with()
self.assertTrue(obj_base.obj_equal_prims(expected_compute,
self.rt.compute_node))
def test_copy_resources_adds_allocation_ratios(self):
self.flags(cpu_allocation_ratio=4.0, ram_allocation_ratio=3.0)
self._setup_rt()
resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES)
compute_node = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
self.rt.compute_node = compute_node
self.rt._copy_resources(resources)
self.assertEqual(4.0, self.rt.compute_node.cpu_allocation_ratio)
self.assertEqual(3.0, self.rt.compute_node.ram_allocation_ratio)
class TestUpdateComputeNode(BaseTestCase):
@mock.patch('nova.objects.Service.get_by_compute_host')
def test_existing_compute_node_updated_same_resources(self, service_mock):
self._setup_rt()
# This is the same set of resources as the fixture, deliberately. We
# are checking below to see that update_resource_stats() is not
# needlessly called when the resources don't actually change.
compute = objects.ComputeNode(
host_ip='1.1.1.1',
numa_topology=None,
metrics='[]',
cpu_info='',
hypervisor_hostname='fakehost',
free_disk_gb=6,
hypervisor_version=0,
local_gb=6,
free_ram_mb=512,
memory_mb_used=0,
pci_device_pools=objects.PciDevicePoolList(),
vcpus_used=0,
hypervisor_type='fake',
local_gb_used=0,
memory_mb=512,
current_workload=0,
vcpus=4,
running_vms=0,
cpu_allocation_ratio=16.0,
ram_allocation_ratio=1.5,
)
self.rt.compute_node = compute
self.rt._update(mock.sentinel.ctx)
self.assertFalse(self.rt.disabled)
self.assertFalse(service_mock.called)
# The above call to _update() will populate the
# RT.old_resources collection with the resources. Here, we check that
# if we call _update() again with the same resources, that
# the scheduler client won't be called again to update those
# (unchanged) resources for the compute node
self.sched_client_mock.reset_mock()
urs_mock = self.sched_client_mock.update_resource_stats
self.rt._update(mock.sentinel.ctx)
self.assertFalse(urs_mock.called)
@mock.patch('nova.objects.Service.get_by_compute_host')
def test_existing_compute_node_updated_new_resources(self, service_mock):
self._setup_rt()
# Deliberately changing local_gb_used, vcpus_used, and memory_mb_used
# below to be different from the compute node fixture's base usages.
# We want to check that the code paths update the stored compute node
# usage records with what is supplied to _update().
compute = objects.ComputeNode(
host='fake-host',
host_ip='1.1.1.1',
numa_topology=None,
metrics='[]',
cpu_info='',
hypervisor_hostname='fakehost',
free_disk_gb=2,
hypervisor_version=0,
local_gb=6,
free_ram_mb=384,
memory_mb_used=128,
pci_device_pools=objects.PciDevicePoolList(),
vcpus_used=2,
hypervisor_type='fake',
local_gb_used=4,
memory_mb=512,
current_workload=0,
vcpus=4,
running_vms=0,
cpu_allocation_ratio=16.0,
ram_allocation_ratio=1.5,
)
expected_resources = copy.deepcopy(compute)
expected_resources.stats = {}
expected_resources.vcpus = 4
expected_resources.vcpus_used = 2
self.rt.compute_node = compute
self.rt.ext_resources_handler.reset_resources(self.rt.compute_node,
self.rt.driver)
# This emulates the behavior that occurs in the
# RT.update_available_resource() method, which updates resource
# information in the ERT differently than all other resources.
self.rt.ext_resources_handler.update_from_instance(dict(vcpus=2))
self.rt._update(mock.sentinel.ctx)
self.assertFalse(self.rt.disabled)
self.assertFalse(service_mock.called)
urs_mock = self.sched_client_mock.update_resource_stats
urs_mock.assert_called_once_with(self.rt.compute_node)
class TestInstanceClaim(BaseTestCase):
def setUp(self):
super(TestInstanceClaim, self).setUp()
self._setup_rt()
self.rt.compute_node = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
# not using mock.sentinel.ctx because instance_claim calls #elevated
self.ctx = mock.MagicMock()
self.elevated = mock.MagicMock()
self.ctx.elevated.return_value = self.elevated
self.instance = _INSTANCE_FIXTURES[0].obj_clone()
def assertEqualNUMAHostTopology(self, expected, got):
attrs = ('cpuset', 'memory', 'id', 'cpu_usage', 'memory_usage')
if None in (expected, got):
if expected != got:
raise AssertionError("Topologies don't match. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
else:
return
if len(expected) != len(got):
raise AssertionError("Topologies don't match due to different "
"number of cells. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
for exp_cell, got_cell in zip(expected.cells, got.cells):
for attr in attrs:
if getattr(exp_cell, attr) != getattr(got_cell, attr):
raise AssertionError("Topologies don't match. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
def test_claim_disabled(self):
self.rt.compute_node = None
self.assertTrue(self.rt.disabled)
with mock.patch.object(self.instance, 'save'):
claim = self.rt.instance_claim(mock.sentinel.ctx, self.instance,
None)
self.assertEqual(self.rt.host, self.instance.host)
self.assertEqual(self.rt.host, self.instance.launched_on)
self.assertEqual(self.rt.nodename, self.instance.node)
self.assertIsInstance(claim, claims.NopClaim)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_update_usage_with_claim(self, migr_mock, pci_mock):
# Test that RT.update_usage() only changes the compute node
# resources if there has been a claim first.
pci_mock.return_value = objects.InstancePCIRequests(requests=[])
expected = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
self.rt.update_usage(self.ctx, self.instance)
self.assertTrue(obj_base.obj_equal_prims(expected,
self.rt.compute_node))
disk_used = self.instance.root_gb + self.instance.ephemeral_gb
expected.update({
'local_gb_used': disk_used,
'memory_mb_used': self.instance.memory_mb,
'free_disk_gb': expected['local_gb'] - disk_used,
"free_ram_mb": expected['memory_mb'] - self.instance.memory_mb,
'running_vms': 1,
# vcpus are claimed by the ERT in RT._update(), which is mocked
# out below...
'vcpus_used': 0,
'pci_device_pools': objects.PciDevicePoolList(),
})
with mock.patch.object(self.rt, '_update') as update_mock:
with mock.patch.object(self.instance, 'save'):
self.rt.instance_claim(self.ctx, self.instance, None)
update_mock.assert_called_once_with(self.elevated)
self.assertTrue(obj_base.obj_equal_prims(expected,
self.rt.compute_node))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_claim(self, migr_mock, pci_mock):
self.assertFalse(self.rt.disabled)
pci_mock.return_value = objects.InstancePCIRequests(requests=[])
disk_used = self.instance.root_gb + self.instance.ephemeral_gb
expected = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
expected.update({
'local_gb_used': disk_used,
'memory_mb_used': self.instance.memory_mb,
'free_disk_gb': expected['local_gb'] - disk_used,
"free_ram_mb": expected['memory_mb'] - self.instance.memory_mb,
'running_vms': 1,
# vcpus are claimed by the ERT in RT._update(), which is mocked
# out below...
'vcpus_used': 0,
'pci_device_pools': objects.PciDevicePoolList(),
})
with mock.patch.object(self.rt, '_update') as update_mock:
with mock.patch.object(self.instance, 'save'):
self.rt.instance_claim(self.ctx, self.instance, None)
update_mock.assert_called_once_with(self.elevated)
self.assertTrue(obj_base.obj_equal_prims(expected,
self.rt.compute_node))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_claim_abort(self, migr_mock, pci_mock):
pci_mock.return_value = objects.InstancePCIRequests(requests=[])
disk_used = self.instance.root_gb + self.instance.ephemeral_gb
with mock.patch.object(self.instance, 'save'):
claim = self.rt.instance_claim(self.ctx, self.instance, None)
self.assertEqual(disk_used, self.rt.compute_node.local_gb_used)
self.assertEqual(self.instance.memory_mb,
self.rt.compute_node.memory_mb_used)
self.assertEqual(1, self.rt.compute_node.running_vms)
claim.abort()
self.assertEqual(0, self.rt.compute_node.local_gb_used)
self.assertEqual(0, self.rt.compute_node.memory_mb_used)
self.assertEqual(0, self.rt.compute_node.running_vms)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_claim_limits(self, migr_mock, pci_mock):
self.assertFalse(self.rt.disabled)
pci_mock.return_value = objects.InstancePCIRequests(requests=[])
good_limits = {
'memory_mb': _COMPUTE_NODE_FIXTURES[0]['memory_mb'],
'disk_gb': _COMPUTE_NODE_FIXTURES[0]['local_gb'],
'vcpu': _COMPUTE_NODE_FIXTURES[0]['vcpus'],
}
for key in good_limits.keys():
bad_limits = copy.deepcopy(good_limits)
bad_limits[key] = 0
self.assertRaises(exc.ComputeResourcesUnavailable,
self.rt.instance_claim,
self.ctx, self.instance, bad_limits)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_claim_numa(self, migr_mock, pci_mock):
self.assertFalse(self.rt.disabled)
pci_mock.return_value = objects.InstancePCIRequests(requests=[])
self.instance.numa_topology = _INSTANCE_NUMA_TOPOLOGIES['2mb']
host_topology = _NUMA_HOST_TOPOLOGIES['2mb']
self.rt.compute_node['numa_topology'] = host_topology._to_json()
limits = {'numa_topology': _NUMA_LIMIT_TOPOLOGIES['2mb']}
expected_numa = copy.deepcopy(host_topology)
for cell in expected_numa.cells:
cell.memory_usage += _2MB
cell.cpu_usage += 1
with mock.patch.object(self.rt, '_update') as update_mock:
with mock.patch.object(self.instance, 'save'):
self.rt.instance_claim(self.ctx, self.instance, limits)
update_mock.assert_called_once_with(self.ctx.elevated())
updated_compute_node = self.rt.compute_node
new_numa = updated_compute_node['numa_topology']
new_numa = objects.NUMATopology.obj_from_db_obj(new_numa)
self.assertEqualNUMAHostTopology(expected_numa, new_numa)
@mock.patch('nova.objects.Instance.save')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
class TestMoveClaim(BaseTestCase):
def setUp(self):
super(TestMoveClaim, self).setUp()
self._setup_rt()
self.rt.compute_node = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
self.instance = _INSTANCE_FIXTURES[0].obj_clone()
self.flavor = _INSTANCE_TYPE_OBJ_FIXTURES[1]
self.limits = {}
# not using mock.sentinel.ctx because resize_claim calls #elevated
self.ctx = mock.MagicMock()
self.elevated = mock.MagicMock()
self.ctx.elevated.return_value = self.elevated
# Initialise extensible resource trackers
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
with contextlib.nested(
mock.patch('nova.objects.InstanceList.get_by_host_and_node'),
mock.patch('nova.objects.MigrationList.'
'get_in_progress_by_host_and_node')
) as (inst_list_mock, migr_mock):
inst_list_mock.return_value = objects.InstanceList(objects=[])
migr_mock.return_value = objects.MigrationList(objects=[])
self.rt.update_available_resource(self.ctx)
def register_mocks(self, pci_mock, inst_list_mock, inst_by_uuid,
migr_mock, inst_save_mock):
pci_mock.return_value = objects.InstancePCIRequests(requests=[])
self.inst_list_mock = inst_list_mock
self.inst_by_uuid = inst_by_uuid
self.migr_mock = migr_mock
self.inst_save_mock = inst_save_mock
def audit(self, rt, instances, migrations, migr_inst):
self.inst_list_mock.return_value = \
objects.InstanceList(objects=instances)
self.migr_mock.return_value = \
objects.MigrationList(objects=migrations)
self.inst_by_uuid.return_value = migr_inst
rt.update_available_resource(self.ctx)
def assertEqual(self, expected, actual):
if type(expected) != dict or type(actual) != dict:
super(TestMoveClaim, self).assertEqual(expected, actual)
return
fail = False
for k, e in expected.items():
a = actual[k]
if e != a:
print("%s: %s != %s" % (k, e, a))
fail = True
if fail:
self.fail()
def adjust_expected(self, expected, flavor):
disk_used = flavor['root_gb'] + flavor['ephemeral_gb']
expected.free_disk_gb -= disk_used
expected.local_gb_used += disk_used
expected.free_ram_mb -= flavor['memory_mb']
expected.memory_mb_used += flavor['memory_mb']
expected.vcpus_used += flavor['vcpus']
@mock.patch('nova.objects.Flavor.get_by_id')
def test_claim(self, flavor_mock, pci_mock, inst_list_mock, inst_by_uuid,
migr_mock, inst_save_mock):
"""Resize self.instance and check that the expected quantities of each
resource have been consumed.
"""
self.register_mocks(pci_mock, inst_list_mock, inst_by_uuid, migr_mock,
inst_save_mock)
self.driver_mock.get_host_ip_addr.return_value = "fake-ip"
flavor_mock.return_value = objects.Flavor(**self.flavor)
mig_context_obj = _MIGRATION_CONTEXT_FIXTURES[self.instance.uuid]
self.instance.migration_context = mig_context_obj
expected = copy.deepcopy(self.rt.compute_node)
self.adjust_expected(expected, self.flavor)
create_mig_mock = mock.patch.object(self.rt, '_create_migration')
mig_ctxt_mock = mock.patch('nova.objects.MigrationContext',
return_value=mig_context_obj)
with create_mig_mock as migr_mock, mig_ctxt_mock as ctxt_mock:
migr_mock.return_value = _MIGRATION_FIXTURES['source-only']
claim = self.rt.resize_claim(
self.ctx, self.instance, self.flavor, None)
self.assertEqual(1, ctxt_mock.call_count)
self.assertIsInstance(claim, claims.MoveClaim)
inst_save_mock.assert_called_once_with()
self.assertTrue(obj_base.obj_equal_prims(expected,
self.rt.compute_node))
def test_same_host(self, pci_mock, inst_list_mock, inst_by_uuid,
migr_mock, inst_save_mock):
"""Resize self.instance to the same host but with a different flavor.
Then abort the claim. Check that the same amount of resources are
available afterwards as we started with.
"""
self.register_mocks(pci_mock, inst_list_mock, inst_by_uuid, migr_mock,
inst_save_mock)
migr_obj = _MIGRATION_FIXTURES['source-and-dest']
self.instance = _MIGRATION_INSTANCE_FIXTURES[migr_obj['instance_uuid']]
self.instance._context = self.ctx
mig_context_obj = _MIGRATION_CONTEXT_FIXTURES[self.instance.uuid]
self.instance.migration_context = mig_context_obj
with mock.patch.object(self.instance, 'save'):
self.rt.instance_claim(self.ctx, self.instance, None)
expected = copy.deepcopy(self.rt.compute_node)
create_mig_mock = mock.patch.object(self.rt, '_create_migration')
mig_ctxt_mock = mock.patch('nova.objects.MigrationContext',
return_value=mig_context_obj)
with create_mig_mock as migr_mock, mig_ctxt_mock as ctxt_mock:
migr_mock.return_value = migr_obj
claim = self.rt.resize_claim(self.ctx, self.instance,
_INSTANCE_TYPE_OBJ_FIXTURES[1], None)
self.assertEqual(1, ctxt_mock.call_count)
self.audit(self.rt, [self.instance], [migr_obj], self.instance)
inst_save_mock.assert_called_once_with()
self.assertNotEqual(expected, self.rt.compute_node)
claim.instance.migration_context = mig_context_obj
with mock.patch('nova.objects.MigrationContext._destroy') as destroy_m:
claim.abort()
self.assertTrue(obj_base.obj_equal_prims(expected,
self.rt.compute_node))
destroy_m.assert_called_once_with(self.ctx, claim.instance.uuid)
def test_revert_reserve_source(
self, pci_mock, inst_list_mock, inst_by_uuid, migr_mock,
inst_save_mock):
"""Check that the source node of an instance migration reserves
resources until the migration has completed, even if the migration is
reverted.
"""
self.register_mocks(pci_mock, inst_list_mock, inst_by_uuid, migr_mock,
inst_save_mock)
# Get our migrations, instances and itypes in a row
src_migr = _MIGRATION_FIXTURES['source-only']
src_instance = (
_MIGRATION_INSTANCE_FIXTURES[src_migr['instance_uuid']].obj_clone()
)
src_instance.migration_context = (
_MIGRATION_CONTEXT_FIXTURES[src_instance.uuid])
old_itype = _INSTANCE_TYPE_FIXTURES[src_migr['old_instance_type_id']]
dst_migr = _MIGRATION_FIXTURES['dest-only']
dst_instance = (
_MIGRATION_INSTANCE_FIXTURES[dst_migr['instance_uuid']].obj_clone()
)
new_itype = _INSTANCE_TYPE_FIXTURES[dst_migr['new_instance_type_id']]
dst_instance.migration_context = (
_MIGRATION_CONTEXT_FIXTURES[dst_instance.uuid])
# Set up the destination resource tracker
# update_available_resource to initialise extensible resource trackers
src_rt = self.rt
(dst_rt, _, _) = setup_rt("other-host", "other-node")
dst_rt.compute_node = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
inst_list_mock.return_value = objects.InstanceList(objects=[])
dst_rt.update_available_resource(self.ctx)
# Register the instance with dst_rt
expected = copy.deepcopy(dst_rt.compute_node)
with mock.patch.object(dst_instance, 'save'):
dst_rt.instance_claim(self.ctx, dst_instance)
self.adjust_expected(expected, new_itype)
expected.stats = {'num_task_resize_migrating': 1,
'io_workload': 1,
'num_instances': 1,
'num_proj_fake-project': 1,
'num_vm_active': 1,
'num_os_type_fake-os': 1}
expected.current_workload = 1
expected.running_vms = 1
self.assertTrue(obj_base.obj_equal_prims(expected,
dst_rt.compute_node))
# Provide the migration via a mock, then audit dst_rt to check that
# the instance + migration resources are not double-counted
self.audit(dst_rt, [dst_instance], [dst_migr], dst_instance)
self.assertTrue(obj_base.obj_equal_prims(expected,
dst_rt.compute_node))
# Audit src_rt with src_migr
expected = copy.deepcopy(src_rt.compute_node)
self.adjust_expected(expected, old_itype)
self.audit(src_rt, [], [src_migr], src_instance)
self.assertTrue(obj_base.obj_equal_prims(expected,
src_rt.compute_node))
# Flag the instance as reverting and re-audit
src_instance['vm_state'] = vm_states.RESIZED
src_instance['task_state'] = task_states.RESIZE_REVERTING
self.audit(src_rt, [], [src_migr], src_instance)
self.assertTrue(obj_base.obj_equal_prims(expected,
src_rt.compute_node))
def test_update_available_resources_migration_no_context(self, pci_mock,
inst_list_mock, inst_by_uuid, migr_mock, inst_save_mock):
"""When migrating onto older nodes - it is possible for the
migration_context record to be missing. Confirm resource audit works
regardless.
"""
self.register_mocks(pci_mock, inst_list_mock, inst_by_uuid, migr_mock,
inst_save_mock)
migr_obj = _MIGRATION_FIXTURES['source-and-dest']
self.instance = _MIGRATION_INSTANCE_FIXTURES[migr_obj['instance_uuid']]
self.instance.migration_context = None
expected = copy.deepcopy(self.rt.compute_node)
self.adjust_expected(expected, self.flavor)
self.audit(self.rt, [], [migr_obj], self.instance)
self.assertTrue(obj_base.obj_equal_prims(expected,
self.rt.compute_node))
def test_dupe_filter(self, pci_mock, inst_list_mock, inst_by_uuid,
migr_mock, inst_save_mock):
self.register_mocks(pci_mock, inst_list_mock, inst_by_uuid, migr_mock,
inst_save_mock)
migr_obj = _MIGRATION_FIXTURES['source-and-dest']
# This is good enough to prevent a lazy-load; value is unimportant
migr_obj['updated_at'] = None
self.instance = _MIGRATION_INSTANCE_FIXTURES[migr_obj['instance_uuid']]
self.instance.migration_context = (
_MIGRATION_CONTEXT_FIXTURES[self.instance.uuid])
self.audit(self.rt, [], [migr_obj, migr_obj], self.instance)
self.assertEqual(1, len(self.rt.tracked_migrations))
class TestInstanceInResizeState(test.NoDBTestCase):
def test_active_suspending(self):
instance = objects.Instance(vm_state=vm_states.ACTIVE,
task_state=task_states.SUSPENDING)
self.assertFalse(resource_tracker._instance_in_resize_state(instance))
def test_resized_suspending(self):
instance = objects.Instance(vm_state=vm_states.RESIZED,
task_state=task_states.SUSPENDING)
self.assertTrue(resource_tracker._instance_in_resize_state(instance))
def test_resized_resize_migrating(self):
instance = objects.Instance(vm_state=vm_states.RESIZED,
task_state=task_states.RESIZE_MIGRATING)
self.assertTrue(resource_tracker._instance_in_resize_state(instance))
def test_resized_resize_finish(self):
instance = objects.Instance(vm_state=vm_states.RESIZED,
task_state=task_states.RESIZE_FINISH)
self.assertTrue(resource_tracker._instance_in_resize_state(instance))
|
|
"""
Contains information relevant to Instrument object
Classes and Functions
---------------------
"""
import operator
import unidecode
from nordb.core.validationTools import validateFloat
from nordb.core.validationTools import validateInteger
from nordb.core.validationTools import validateString
from nordb.core.validationTools import validateDate
from nordb.core.utils import addString2String
from nordb.core.utils import addInteger2String
from nordb.core.utils import addFloat2String
from nordb.core.utils import stringToDate
class Instrument:
"""
Class for instrument information. Comes from css instrument format.
:param array data: all the relevant data for Sensor in an array. These values are accessed by its numerations.
:ivar int header_type: 12
:ivar Response response: response file to which this response is attached to
:ivar string band: Frequency band. Maximum of 1 character
:ivar string instrument_name: Name of the instrument. Maximum of 50 characters
:ivar string instrument_type: Type of the instrument. Maximum of 6 characters
:ivar string digital: data type: d - digital a - analog. Maximum of 1 characters
:ivar float samprate: sampling rate in samples/sec
:ivar float ncalib: nominal calibration (nn/count)
:ivar flaot ncalper: nominal calibration period (sec)
:ivar string resp_dir: Directory for instrument response file. Maximum of 64 characters
:ivar string dfile: Maximum of 32 characters
:ivar string rsptype: response type. Maximum of 6 characters
:ivar int i_id: id of this instrument
:ivar int css_id: css id of this instrument
:ivar int response_id: id of the response of this instrument
:ivar date lddate: load date
:ivar int response_id: id of the response of the instrument
:ivar int INSTRUMENT_NAME: Enumeration of the data list. Value of 0
:ivar int INSTRUMENT_TYPE: Enumeration of the data list. Value of 1
:ivar int BAND: Enumeration of the data list. Value of 2
:ivar int DIGITAL: Enumeration of the data list. Value of 3
:ivar int SAMPRATE: Enumeration of the data list. Value of 4
:ivar int NCALIB: Enumeration of the data list. Value of 5
:ivar int NCALPER: Enumeration of the data list. Value of 6
:ivar int RESP_DIR: Enumeration of the data list. Value of 7
:ivar int DFILE: Enumeration of the data list. Value of 8
:ivar int RSPTYPE: Enumeration of the data list. Value of 9
:ivar int LDDATE: Enumeration of the data list. Value of 10
:ivar int I_ID: Enumeration of the data list. Value of 11
:ivar int CSS_ID: Enumeration of the data list. Value of 12
:ivar int RESPONSE ID: Enumeration of the data list. Value of 13
"""
header_type = 12
INSTRUMENT_NAME = 0
INSTRUMENT_TYPE = 1
BAND = 2
DIGITAL = 3
SAMPRATE = 4
NCALIB = 5
NCALPER = 6
RESP_DIR = 7
DFILE = 8
RSPTYPE = 9
LDDATE = 10
I_ID = 11
CSS_ID = 12
RESPONSE_ID = 13
def __init__(self, data = None):
if data is None:
self.response = None
self.instrument_name = None
self.instrument_type = None
self.band = None
self.digital = None
self.samprate = None
self.ncalib = None
self.ncalper = None
self.resp_dir = None
self.dfile = None
self.rsptype = None
self.lddate = None
self.i_id = -1
self.css_id = -1
self.response_id = -1
else:
self.response = None
self.instrument_name = data[self.INSTRUMENT_NAME]
self.instrument_type = data[self.INSTRUMENT_TYPE]
self.band = data[self.BAND]
self.digital = data[self.DIGITAL]
self.samprate = data[self.SAMPRATE]
self.ncalib = data[self.NCALIB]
self.ncalper = data[self.NCALPER]
self.resp_dir = data[self.RESP_DIR]
self.dfile = data[self.DFILE]
self.rsptype = data[self.RSPTYPE]
self.lddate = data[self.LDDATE]
self.i_id = data[self.I_ID]
self.css_id = data[self.CSS_ID]
self.response_id = data[self.RESPONSE_ID]
instrument_name = property(operator.attrgetter('_instrument_name'), doc="")
@instrument_name.setter
def instrument_name(self, val):
val_instrument_name = validateString(val, "instrument_name", 0, 50, None, self.header_type)
self._instrument_name = val_instrument_name
instrument_type = property(operator.attrgetter('_instrument_type'), doc="")
@instrument_type.setter
def instrument_type(self, val):
val_instrument_type = validateString(val, "instrument_type", 0, 6, None, self.header_type)
self._instrument_type = val_instrument_type
band = property(operator.attrgetter('_band'), doc="")
@band.setter
def band(self, val):
val_band = validateString(val, "band", 0, 1, None, self.header_type)
self._band = val_band
digital = property(operator.attrgetter('_digital'), doc="")
@digital.setter
def digital(self, val):
val_digital = validateString(val, "digital", 0, 1, None, self.header_type)
self._digital = val_digital
samprate = property(operator.attrgetter('_samprate'), doc="")
@samprate.setter
def samprate(self, val):
val_samprate = validateFloat(val, "samprate", 0.0, 1000.0, self.header_type)
self._samprate = val_samprate
ncalib = property(operator.attrgetter('_ncalib'), doc="")
@ncalib.setter
def ncalib(self, val):
val_ncalib = validateFloat(val, "ncalib", -1.0, 10000.0, self.header_type)
self._ncalib = val_ncalib
ncalper = property(operator.attrgetter('_ncalper'), doc="")
@ncalper.setter
def ncalper(self, val):
val_ncalper = validateFloat(val, "ncalper", -1.0, 10000.0, self.header_type)
self._ncalper = val_ncalper
resp_dir = property(operator.attrgetter('_resp_dir'), doc="")
@resp_dir.setter
def resp_dir(self, val):
val_resp_dir = validateString(val, "resp_dir", 0, 64, None, self.header_type)
self._resp_dir = val_resp_dir
dfile = property(operator.attrgetter('_dfile'), doc="")
@dfile.setter
def dfile(self, val):
val_dfile = validateString(val, "dfile", 0, 32, None, self.header_type)
self._dfile = val_dfile
rsptype = property(operator.attrgetter('_rsptype'), doc="")
@rsptype.setter
def rsptype(self, val):
val_rsptype = validateString(val, "rsptype", 0, 6, None, self.header_type)
self._rsptype = val_rsptype
lddate = property(operator.attrgetter('_lddate'), doc="")
@lddate.setter
def lddate(self, val):
val_lddate = validateDate(val, "lddate", self.header_type)
self._lddate = val_lddate
css_id = property(operator.attrgetter('_css_id'), doc="")
@css_id.setter
def css_id(self, val):
val_css_id = validateInteger(val, "css_id", None, None, self.header_type)
self._css_id = val_css_id
response_id = property(operator.attrgetter('_response_id'), doc="")
@response_id.setter
def response_id(self, val):
val_response_id = validateInteger(val, "response_id", None, None, self.header_type)
self._response_id = val_response_id
def __str__(self):
instrumentString = ""
instrumentString += addInteger2String(self.css_id, 8, '>')
instrumentString += " "
instrumentString += addString2String(self.instrument_name, 50, '<')
instrumentString += " "
instrumentString += addString2String(self.instrument_type, 6, '<')
instrumentString += " "
instrumentString += addString2String(self.band, 2, '<')
instrumentString += addString2String(self.digital, 2, '<')
instrumentString += addFloat2String (self.samprate, 11, 6, '>')
instrumentString += " "
instrumentString += addFloat2String (self.ncalib, 13, 6, '>')
instrumentString += " "
instrumentString += addFloat2String(self.ncalper, 13, 6, '>')
instrumentString += " "
instrumentString += addString2String(self.resp_dir, 64, '<')
instrumentString += " "
instrumentString += addString2String(self.dfile, 32, '<')
instrumentString += " "
instrumentString += addString2String(self.rsptype, 6, '<')
instrumentString += " "
instrumentString += addString2String(self.lddate.strftime("%Y-%b-%d"), 11, '<')
return instrumentString
def getAsList(self):
instrument_list = [ self.css_id,
self.instrument_name,
self.instrument_type,
self.band,
self.digital,
self.samprate,
self.ncalib,
self.ncalper,
self.resp_dir,
self.dfile,
self.rsptype,
self.lddate,
self.response_id]
return instrument_list
def readInstrumentStringToInstrument(ins_line):
"""
Function for reading instrument info to a Instrument object
:param str ins_line: css intrument line
:returns: Instrument object
"""
instrument = [None]*14
instrument[Instrument.INSTRUMENT_NAME] = unidecode.unidecode(ins_line[8:58].strip())
instrument[Instrument.INSTRUMENT_TYPE] = unidecode.unidecode(ins_line[60:67].strip())
instrument[Instrument.BAND] = unidecode.unidecode(ins_line[67].strip())
instrument[Instrument.DIGITAL] = unidecode.unidecode(ins_line[69].strip())
instrument[Instrument.SAMPRATE] = unidecode.unidecode(ins_line[70:82].strip())
instrument[Instrument.NCALIB] = unidecode.unidecode(ins_line[82:100].strip())
instrument[Instrument.NCALPER] = unidecode.unidecode(ins_line[101:116].strip())
instrument[Instrument.RESP_DIR] = unidecode.unidecode(ins_line[117:182].strip())
instrument[Instrument.DFILE] = unidecode.unidecode(ins_line[182:215].strip())
instrument[Instrument.RSPTYPE] = unidecode.unidecode(ins_line[215:228].strip())
instrument[Instrument.LDDATE] = unidecode.unidecode(stringToDate(ins_line[228:].strip()))
instrument[Instrument.I_ID] = -1
instrument[Instrument.CSS_ID] = unidecode.unidecode(ins_line[:8].strip())
instrument[Instrument.RESPONSE_ID] = -1
return Instrument(instrument)
|
|
import pandas as pd
import os
import subprocess as sub
import re
import sys
from Bio import SeqUtils
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
from scipy import stats as st
import matplotlib as mpl
#
from matplotlib import rc
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
## for Palatino and other serif fonts use:
# rc('font',**{'family':'serif','serif':['Palatino']})
rc('text', usetex=True)
# #
#
mpl.rcParams['text.latex.preamble'] = [
r'\usepackage{textcomp}', # i need upright \micro symbols, but you need...
# r'\sisetup{detect-all}', # ...this to force siunitx to actually use your fonts
r'\usepackage{helvet}', # set the normal font here
r'\usepackage{sansmath}', # load up the sansmath so that math -> helvet
r'\sansmath' # <- tricky! -- gotta actually tell tex to use!
]
#
font = {#'family' : 'sans-serif',
#'weight' : 'bold',
'size' :9}
rc('font', **font)
# # data loading ...
# path = os.path.join(os.path.expanduser('~'),'GENOMES_BACTER_RELEASE69/genbank')
root_path = os.path.expanduser('~')
bact_path = os.path.join(root_path,'GENOMES_BACTER_RELEASE69/genbank')
arch_path = os.path.join(root_path,'GENOMES_ARCH_SEP2015')
# this is BACTERIA script, so just do
path = bact_path
uid = 'GenomicID'
topt_id = 'OptimumTemperature'
aacids = sorted(list('CMFILVWYAGTSNQDEHRKP'))
the_num_of_quantiles = 5
#######################
def get_one_trop(all_cds_grouped,idx):
org_cds = all_cds_grouped.get_group(idx)
# check if TrOp ...
# for a given organism(id) all TrOp values must be same
trop_vals = org_cds['TrOp'].unique()
assert trop_vals.size == 1
# then just figure out TrOp value after unpacking ...
trop, = trop_vals
if pd.isnull(trop):
# special return - not enough ribosomal proteins ...
return 'none'
if not trop:
# False, return False
return 'false'
elif trop == True:
# if it's True just return ...
return 'true'
else:
raise ValueError
def get_quantiles_summary(cds_cai_dat,num_of_quantiles,R20_vec_compare,vec_cost):
# we can use this 'qcut' function from pandas to divide our proteins by the quantiles ...
category,bins = pd.qcut(cds_cai_dat['CAI'],q=num_of_quantiles,retbins=True,labels=False)
# then we could iterate over proteins/cDNAs in these categories ...
fivywrel_cat, r20_cat, cost_cat = [],[],[]
for cat in range(num_of_quantiles):
cds_cai_category = cds_cai_dat[category==cat]
protein_length_distro = cds_cai_category['protein'].str.len()
# average protein length per quantile as a stability measure ...
average_length = protein_length_distro.mean()
# total proteins length in quantile for AA freqs calculations ...
total_length = protein_length_distro.sum()
IVYWREL = sum(cds_cai_category['protein'].str.count(aa).sum() for aa in list('IVYWREL'))
# IVYWREL = cds_cai_category['protein'].str.count('|'.join("IVYWREL")).sum() # tiny bit slower ...
f_IVYWREL = float(IVYWREL)/float(total_length)
# 20-vector for of amino acid composition ...
aa_freq_20 = np.true_divide([cds_cai_category['protein'].str.count(aa).sum() for aa in aacids],float(total_length))
# slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)
_1,_2,R20,_4,_5 = stats.linregress(aa_freq_20, R20_vec_compare)
# Akashi ...
cost = np.dot(aa_freq_20,vec_cost)
# storing info ...
fivywrel_cat.append(f_IVYWREL)
r20_cat.append(R20)
cost_cat.append(cost)
#returning ...
return (fivywrel_cat,r20_cat,cost_cat)
def quantile_summary_storage_init(num_of_quantiles=5):
# data structure to store info ...
the_storage = {uid:[],
topt_id:[],
'TrOp':[]}
for i in range(num_of_quantiles):
the_storage['q%d'%i] = []
the_storage['R20_q%d'%i] = []
the_storage['Akashi_q%d'%i] = []
# the_storage['ProtLen_q%d'%i] = []
# returning ...
return the_storage
# ['DbxRefs','Description','FeaturesNum','assembly_accession','GenomicLen','GenomicName','Keywords','NucsPresent','Organism_des',
# 'SourceDbxRefs','SourceOrganism','SourcePlasmid','SourceStrain','Taxonomy','BioProject','TaxonID','Organism_env',
# 'OptimumTemperature','TemperatureRange','OxygenReq','Habitat','Salinity','crit_NC','crit_WGS','crit_genlen',
# 'crit_features','crit_comp_genome','crit_plasmid']
# env_dat = pd.read_csv(os.path.join(path,"summary_organisms_interest.dat"))
env_dat = pd.read_csv(os.path.join(path,'env_catalog_compgenome.dat'))
#['assembly_accession','cDNA','fid','pid','product','protein','status','table','ribosomal','CAI','TrOp']
gen_dat = pd.read_csv(os.path.join(path,"complete_CDS_CAI_DNA_Rnd.dat"))
#
original_cai_dat = pd.read_csv(os.path.join(path,"complete_CDS_CAI_DNA.dat"))
#
# PROTEOME LEVEL AMINO ACID FREQUENCIES ...
# "proteome_all.dat"
# # file with the organisms of interest
# dat_fname = os.path.join(bib2_scr_path,'catalog_with_accesion.dat')
# dat = pd.read_csv(dat_fname)
#
cost_vec_path = path
akashi = os.path.join(cost_vec_path,'akashi-cost.d')
argentina = os.path.join(cost_vec_path,'argentina-cost.d')
#
akashi_cost = pd.read_csv(akashi,header=None,sep=' ')
argentina_cost = pd.read_csv(argentina,header=None,sep=' ')
thermo_freq = pd.read_csv(os.path.join(path,'thermo.dat'),header=None,sep=' ')
#
akashi_cost.set_index(0,inplace=True)
argentina_cost.set_index(0,inplace=True)
thermo_freq.set_index(0,inplace=True)
#
akashi_cost.sort_index(inplace=True)
argentina_cost.sort_index(inplace=True)
thermo_freq.sort_index(inplace=True)
#
#####################################################
# QUANTILE STATISTICALL DATA EXTRACTION ....
#####################################################
gen_dat_org = gen_dat.groupby(uid)
original_gen_dat_org = original_cai_dat.groupby(uid)
#
# data structure to store info ...
stat_dat = quantile_summary_storage_init(num_of_quantiles=the_num_of_quantiles)
# data structure to ORIGINAL store info ...
original_stat_dat = quantile_summary_storage_init(num_of_quantiles=the_num_of_quantiles)
# start iterating over different organisms ...
for idx,topt in env_dat[[uid,topt_id]].itertuples(index=False):
if topt < 1000:
# halophiles already excluded ...
cds_cai_dat = gen_dat_org.get_group(idx)
original_cds_cai_dat = original_gen_dat_org.get_group(idx)
# is it a translationally optimized organism ?
# after messing up codons, 0 organisms are going to be TrOp..
# so, just use the original definition to check if TrOp affects the results ...
trop_status = get_one_trop(original_gen_dat_org,idx)
#
if trop_status != 'none':
# fill in the record for each quantile (random-shuffled) ...
stat_dat[uid].append(idx)
stat_dat[topt_id].append(topt)
stat_dat['TrOp'].append(trop_status)
counter = 0
for fivywrel_qs,r20_qs,cost_qs in zip(*get_quantiles_summary(cds_cai_dat,the_num_of_quantiles,thermo_freq[1],akashi_cost[1])):
stat_dat['q%d'%counter].append(fivywrel_qs)
stat_dat['R20_q%d'%counter].append(r20_qs)
stat_dat['Akashi_q%d'%counter].append(cost_qs)
# stat_dat['ProtLen_q%d'%counter].append(protlen_qs)
counter += 1
########################################
# fill in the record for each quantile (ORIGINAL) ...
original_stat_dat[uid].append(idx)
original_stat_dat[topt_id].append(topt)
original_stat_dat['TrOp'].append(trop_status)
counter = 0
for fivywrel_qs,r20_qs,cost_qs in zip(*get_quantiles_summary(original_cds_cai_dat,the_num_of_quantiles,thermo_freq[1],akashi_cost[1])):
original_stat_dat['q%d'%counter].append(fivywrel_qs)
original_stat_dat['R20_q%d'%counter].append(r20_qs)
original_stat_dat['Akashi_q%d'%counter].append(cost_qs)
# original_stat_dat['ProtLen_q%d'%counter].append(protlen_qs)
counter += 1
#
cai_stats_quant = pd.DataFrame(stat_dat)
cai_stats_quant_TrOp = cai_stats_quant[cai_stats_quant['TrOp']=='true']
cai_stats_quant_noTrOp = cai_stats_quant[cai_stats_quant['TrOp']=='false']
#
original_cai_stats_quant = pd.DataFrame(original_stat_dat)
original_cai_stats_quant_TrOp = original_cai_stats_quant[original_cai_stats_quant['TrOp']=='true']
original_cai_stats_quant_noTrOp = original_cai_stats_quant[original_cai_stats_quant['TrOp']=='false']
#
###############################################
# PLOTTING ...
###############################################
# k1, k2, k3 = 'q%d'%i,'R20_q%d'%i, 'Akashi_q%d'%i
def quantile_plotter(dat,kx,fname,ax=None,savefig=False,title='',color='blue',lims=False,x_offset=0):
#############################
def lims_to_range(local_lims,ext_coeff = 1.1):
# small conversion func to turnd data range limits
# into slightly extended limist for plotting ...
mean = 0.5*sum(local_lims)
ext_half = ext_coeff*abs(local_lims[0] - mean)
return (mean - ext_half, mean + ext_half)
###############################
if ax is None:
plt.clf()
ax = plt.gca()
else:
pass
###############################
x_toplot, y_toplot, err_plot = [],[],[]
for i in range(the_num_of_quantiles):
kxi,ylabel = kx(i,label=True)
x_toplot.append(i+1+x_offset)
y_toplot.append(dat[kxi].mean())
err_plot.append(dat[kxi].std())
# plotting ...
ax.errorbar(x_toplot,y_toplot,yerr=err_plot,fmt='o',color=color, label=title, mew=0,ms=6)
ax.set_xlim(0.5,5.5)
ax.set_ylabel(ylabel)
ax.set_xlabel('CAI quantile')
#
#########################
a,b,r,pval,_ = st.linregress(x_toplot,y_toplot)
# label = flabel(r,pval)
x_toplot = np.asarray(x_toplot)
f_label = lambda r,pval: "linear fit, R=%.2f "%r + (r"$p=%.3f$"%pval if pval>=0.001 else r"$p<0.001$")
ax.plot(x_toplot,a*x_toplot+b,'-',color=color,lw=2,label=f_label(r,pval))
######################
# # ...
# if title:
# ax.set_title(title)
if lims:
ax.set_ylim( lims_to_range(lims) )
# make room for legends ...
ymin,ymax = ax.get_ylim()
y_span = ymax-ymin
ax.set_ylim((ymin-0.5*y_span,ymax+0.5*y_span))
#
# save figures on demand ...
if savefig:
ax.legend(loc='best',ncol=2)
plt.savefig(fname)
else:
return ax
def update_lims(dat,fkey,old_lims=False):
# calculate the new limits anyways ...
lims_min = min( dat[fkey(i)].mean()-dat[fkey(i)].std() for i in range(the_num_of_quantiles) )
lims_max = max( dat[fkey(i)].mean()+dat[fkey(i)].std() for i in range(the_num_of_quantiles) )
new_lims = (lims_min,lims_max)
# update these limits if necessary ...
if not old_lims:
return new_lims
else:
updated_lims_min = min(new_lims[0],old_lims[0])
updated_lims_max = max(new_lims[1],old_lims[1])
return ( updated_lims_min, updated_lims_max )
def IVYWREL_key(i,label=False):
return ('q%d'%i,'IVYWREL') if label else 'q%d'%i
def R20_key(i,label=False):
return ('R20_q%d'%i,'R20 self-exp_T') if label else 'R20_q%d'%i
def Akashi_key(i,label=False):
return ('Akashi_q%d'%i,'Akashi cost') if label else 'Akashi_q%d'%i
# def ProtLen_key(i,label=False):
# return ('ProtLen_q%d'%i,'mean protein length') if label else 'ProtLen_q%d'%i
# unified limits for the IVYWREL ...
# IVYWREL_lims = update_lims(cai_stats_quant,IVYWREL_key)
# IVYWREL_lims = update_lims(cai_stats_quant_noTrOp,IVYWREL_key,old_lims=IVYWREL_lims)
# IVYWREL_lims = update_lims(cai_stats_quant_TrOp,IVYWREL_key,old_lims=IVYWREL_lims)
IVYWREL_lims = update_lims(cai_stats_quant_TrOp,IVYWREL_key)
# IVYWREL_lims = update_lims(original_cai_stats_quant,IVYWREL_key,old_lims=IVYWREL_lims)
# IVYWREL_lims = update_lims(original_cai_stats_quant_noTrOp,IVYWREL_key,old_lims=IVYWREL_lims)
# IVYWREL_lims = update_lims(original_cai_stats_quant_TrOp,IVYWREL_key,old_lims=IVYWREL_lims)
IVYWREL_lims = update_lims(original_cai_stats_quant_TrOp,IVYWREL_key,old_lims=IVYWREL_lims)
# unified limits for the R20 ...
# R20_lims = update_lims(cai_stats_quant,R20_key)
# R20_lims = update_lims(cai_stats_quant_noTrOp,R20_key,old_lims=R20_lims)
# R20_lims = update_lims(cai_stats_quant_TrOp,R20_key,old_lims=R20_lims)
R20_lims = update_lims(cai_stats_quant_TrOp,R20_key)
# R20_lims = update_lims(original_cai_stats_quant,R20_key,old_lims=R20_lims)
# R20_lims = update_lims(original_cai_stats_quant_noTrOp,R20_key,old_lims=R20_lims)
# R20_lims = update_lims(original_cai_stats_quant_TrOp,R20_key,old_lims=R20_lims)
R20_lims = update_lims(original_cai_stats_quant_TrOp,R20_key,old_lims=R20_lims)
# unified limits for the Akashi ...
# Akashi_lims = update_lims(cai_stats_quant,Akashi_key)
# Akashi_lims = update_lims(cai_stats_quant_noTrOp,Akashi_key,old_lims=Akashi_lims)
# Akashi_lims = update_lims(cai_stats_quant_TrOp,Akashi_key,old_lims=Akashi_lims)
Akashi_lims = update_lims(cai_stats_quant_TrOp,Akashi_key)
# Akashi_lims = update_lims(original_cai_stats_quant,Akashi_key,old_lims=Akashi_lims)
# Akashi_lims = update_lims(original_cai_stats_quant_noTrOp,Akashi_key,old_lims=Akashi_lims)
# Akashi_lims = update_lims(original_cai_stats_quant_TrOp,Akashi_key,old_lims=Akashi_lims)
Akashi_lims = update_lims(original_cai_stats_quant_TrOp,Akashi_key,old_lims=Akashi_lims)
# #########################
# ProtLen_lims = update_lims(cai_stats_quant_TrOp,ProtLen_key)
# ProtLen_lims = update_lims(original_cai_stats_quant_TrOp,ProtLen_key,old_lims=ProtLen_lims)
#########################
def get_fname_title(keyid,kingdom,shuff_stat,trop_stat):
# generate fname right away ...
fname = "%s_%s_qunatile_trend.%s.%s.png"%(keyid,kingdom,shuff_stat,trop_stat)
# figure out the readable title ...
the_kingdom = 'Archaea' if kingdom=='arch' else 'Bacteria'
the_shuff_stat = 'Shuffled' if shuff_stat=='shuff' else 'Original'
the_trop_stat = 'CUS' if trop_stat=='trop' else ('non-CUS' if trop_stat=='notrop' else 'All')
title = "%s %s (%s)"%(the_shuff_stat,the_kingdom,the_trop_stat)
return (fname,title)
# TROP ONLY ...
fname,title = get_fname_title('IVYWREL','bact','shuff','trop')
ax_IVYWREL = quantile_plotter(cai_stats_quant_TrOp,IVYWREL_key,fname,ax=None,savefig=False,title=title,lims=IVYWREL_lims,x_offset=-0.05)
fname,title = get_fname_title('IVYWREL','bact','original','trop')
quantile_plotter(original_cai_stats_quant_TrOp,IVYWREL_key,fname,ax=ax_IVYWREL,savefig=True,title=title,color='red',lims=IVYWREL_lims,x_offset=0.05)
fname,title = get_fname_title('R20','bact','shuff','trop')
ax_R20 = quantile_plotter(cai_stats_quant_TrOp,R20_key,fname,ax=None,savefig=False,title=title,lims=R20_lims,x_offset=-0.05)
fname,title = get_fname_title('R20','bact','original','trop')
quantile_plotter(original_cai_stats_quant_TrOp,R20_key,fname,ax=ax_R20,savefig=True,title=title,color='red',lims=R20_lims,x_offset=0.05)
fname,title = get_fname_title('Akashi','bact','shuff','trop')
ax_Akashi = quantile_plotter(cai_stats_quant_TrOp,Akashi_key,fname,ax=None,savefig=False,title=title,lims=Akashi_lims,x_offset=-0.05)
fname,title = get_fname_title('Akashi','bact','original','trop')
quantile_plotter(original_cai_stats_quant_TrOp,Akashi_key,fname,ax=ax_Akashi,savefig=True,title=title,color='red',lims=Akashi_lims,x_offset=0.05)
def quantile_plotter_II(dat,kx,fname,ax=None,savefig=False,title='',color='blue',lims=False,x_offset=0):
#############################
def lims_to_range(local_lims,ext_coeff = 1.1):
# small conversion func to turnd data range limits
# into slightly extended limist for plotting ...
mean = 0.5*sum(local_lims)
ext_half = ext_coeff*abs(local_lims[0] - mean)
return (mean - ext_half, mean + ext_half)
###############################
if ax is None:
plt.clf()
ax = plt.gca()
else:
pass
###############################
x_toplot, y_toplot, err_plot = [],[],[]
for i in range(the_num_of_quantiles):
kxi,ylabel = kx(i,label=True)
x_toplot.append(i+1+x_offset)
# y_toplot.append(dat[kxi].mean())
y_toplot.append(dat[kxi].median())
err_plot.append(dat[kxi].std())
# plotting ...
#
# ax.errorbar(x_toplot,y_toplot,yerr=err_plot,fmt='o',color=color, label=title, mew=0,ms=6)
rects1 = ax.bar(x_toplot, y_toplot, width=0.35, color=color, yerr=err_plot, label=title,error_kw=dict(elinewidth=2,ecolor='black'))
# error_kw=dict(elinewidth=2,ecolor='red')
#
#
ax.set_xlim(0.5,5.5)
ax.set_ylabel(ylabel)
ax.set_xlabel('CAI quantile')
#
# #########################
# a,b,r,pval,_ = st.linregress(x_toplot,y_toplot)
# # label = flabel(r,pval)
# x_toplot = np.asarray(x_toplot)
# f_label = lambda r,pval: "linear fit, R=%.2f "%r + (r"$p=%.3f$"%pval if pval>=0.001 else r"$p<0.001$")
# ax.plot(x_toplot,a*x_toplot+b,'-',color=color,lw=2,label=f_label(r,pval))
# ######################
# # ...
# if title:
# ax.set_title(title)
if lims:
ax.set_ylim( lims_to_range(lims) )
# make room for legends ...
ymin,ymax = ax.get_ylim()
y_span = ymax-ymin
ax.set_ylim((ymin-0.2*y_span,ymax))
#
# save figures on demand ...
if savefig:
ax.legend(loc='lower left',ncol=2)
# plt.savefig(fname)
plt.show()
else:
return ax
fname,title = get_fname_title('R20','bact','shuff','trop')
ax_R20 = quantile_plotter(cai_stats_quant_TrOp,R20_key,fname,ax=None,savefig=False,title=title,lims=R20_lims,x_offset=-0.05)
fname,title = get_fname_title('R20','bact','original','trop')
quantile_plotter(original_cai_stats_quant_TrOp,R20_key,fname,ax=ax_R20,savefig=True,title=title,color='red',lims=R20_lims,x_offset=0.05)
###########################################
from scipy.stats import norm
# original_cai_stats_quant_TrOp
ccc = ['R20_q0', 'R20_q1', 'R20_q2', 'R20_q3', 'R20_q4']
# ccc = ['Akashi_q0', 'Akashi_q1', 'Akashi_q2', 'Akashi_q3', 'Akashi_q4']
xmin = original_cai_stats_quant_TrOp[ccc].min().min()
xmax = original_cai_stats_quant_TrOp[ccc].max().max()
bins = np.linspace(xmin,xmax,50)
plt.clf()
fig,ax = plt.subplots(nrows=5,ncols=2,sharex=True,sharey=True,figsize=(6,6))
temp_threshold = 500
for i,c in enumerate(ccc):
# Fit a normal distribution to the data:
dat = cai_stats_quant_TrOp[cai_stats_quant_TrOp['OptimumTemperature']<temp_threshold][c]
# mu, std = norm.fit(dat)
# p = norm.pdf(bins, mu, std)
ax[len(ccc)-i-1,0].hist(dat.as_matrix(),bins=bins,color='blue',alpha=0.4,linewidth=0,log=False)
# ax[len(ccc)-i-1,0].plot(bins, p, color='blue', linewidth=2)
ax[len(ccc)-i-1,0].axvline(dat.mean(), color='blue', linewidth=3, label='mean')
#
ax[len(ccc)-i-1,0].set_ylabel(r"quintile %d"%(i+1))
if i==0:
ax[len(ccc)-i-1,0].set_xlabel(r"$R_T$")
ax[len(ccc)-i-1,0].legend(loc='upper left',frameon=False)
if i==(len(ccc)-1):
ax[len(ccc)-i-1,0].set_title("shuffled codons")
ax[len(ccc)-i-1,0].yaxis.set_ticks_position('left')
ax[len(ccc)-i-1,0].xaxis.set_ticks_position('bottom')
#
#
dat = original_cai_stats_quant_TrOp[original_cai_stats_quant_TrOp['OptimumTemperature']<temp_threshold][c]
# mu, std = norm.fit(dat)
# p = norm.pdf(bins, mu, std)
ax[len(ccc)-i-1,1].hist(dat.as_matrix(),bins=bins,color='red',alpha=0.4,linewidth=0,log=False)
# ax[len(ccc)-i-1].hist(cai_stats_quant_TrOp[c],bins=bins,color='blue',alpha=0.7)
# ax[len(ccc)-i-1,1].plot(bins, p, color='red', linewidth=2)
ax[len(ccc)-i-1,1].axvline(dat.mean(), color='red', linewidth=3, label='mean')
if i==0:
ax[len(ccc)-i-1,1].set_xlabel(r"$R_T$")
ax[len(ccc)-i-1,1].legend(loc='upper left',frameon=False)
if i==(len(ccc)-1):
ax[len(ccc)-i-1,1].set_title("original data")
ax[len(ccc)-i-1,1].yaxis.set_ticks_position('left')
ax[len(ccc)-i-1,1].xaxis.set_ticks_position('bottom')
# plt.show()
plt.tight_layout()
plt.savefig("RT_underhood_distro.png",dpi=300)
####################################################################################
# EXTRA ANALYSIS FOR TROY'S LINEAR MODEL STUFF ...
####################################################################################
# gen_dat_org = gen_dat.groupby(uid)
# original_gen_dat_org = original_cai_dat.groupby(uid)
#
# def quantile_summary_storage_init(num_of_quantiles=5):
# data structure to store info ...
def storage_init():
the_storage = {uid: [],
'prot_id': [],
'prot_len': [],
topt_id: [],
'CAI': [],
'TrOp': []}
for aa in aacids:
the_storage[aa] = []
# returning ...
return the_storage
#
#
# data structure to store info ...
unroll_stat_dat = storage_init()
# data structure to ORIGINAL store info ...
unroll_original_stat_dat = storage_init()
# start iterating over different organisms ...
for idx,topt in env_dat[[uid,topt_id]].itertuples(index=False):
# halophiles already excluded ...
cds_cai_dat = gen_dat_org.get_group(idx)
original_cds_cai_dat = original_gen_dat_org.get_group(idx)
# is it a translationally optimized organism ?
# after messing up codons, 0 organisms are going to be TrOp..
# so, just use the original definition to check if TrOp affects the results ...
trop_status = get_one_trop(original_gen_dat_org,idx)
#
if trop_status == 'true':
#
# # fill in the record for each protein ...
# [uid, 'pid', 'protein', 'CAI', 'TrOp']
# [uid, 'pid', 'protein', 'CAI', 'TrOp']
# cds_cai_dat
#
# SHUFFLED CODONS INFORMATION GATHERING ...
for pid,prot,cai in cds_cai_dat[['pid', 'protein', 'CAI']].itertuples(index=False):
########################
# some calculations ...
prot_len = len(prot)
aa_freq_20 = np.true_divide([prot.count(aa) for aa in aacids],float(prot_len))
# print aa_freq_20
########################
# storing the data ...
unroll_stat_dat[uid].append(idx)
unroll_stat_dat['prot_id'].append(pid)
unroll_stat_dat['prot_len'].append(prot_len)
unroll_stat_dat[topt_id].append(topt)
unroll_stat_dat['CAI'].append(cai)
unroll_stat_dat['TrOp'].append(trop_status)
for aa_id, aa in enumerate(aacids):
unroll_stat_dat[aa].append(aa_freq_20[aa_id])
#
# ORIGINAL GENOMES INFORMATION GATHERING ...
for pid,prot,cai in original_cds_cai_dat[['pid', 'protein', 'CAI']].itertuples(index=False):
########################
# some calculations ...
prot_len = len(prot)
aa_freq_20 = np.true_divide([prot.count(aa) for aa in aacids],float(prot_len))
# print aa_freq_20
########################
# storing the data ...
unroll_original_stat_dat[uid].append(idx)
unroll_original_stat_dat['prot_id'].append(pid)
unroll_original_stat_dat['prot_len'].append(prot_len)
unroll_original_stat_dat[topt_id].append(topt)
unroll_original_stat_dat['CAI'].append(cai)
unroll_original_stat_dat['TrOp'].append(trop_status)
for aa_id, aa in enumerate(aacids):
unroll_original_stat_dat[aa].append(aa_freq_20[aa_id])
#
#
flat_stats_shuffled = pd.DataFrame(unroll_stat_dat)
flat_stats_original = pd.DataFrame(unroll_original_stat_dat)
#
####################################################################################
# [uid, 'prot_id', 'prot_len', topt_id, 'CAI'] + aacids
|
|
# Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base test cases for all neutron tests.
"""
import contextlib
import gc
import logging as std_logging
import os
import os.path
import random
import traceback
import weakref
import eventlet.timeout
import fixtures
import mock
from oslo_concurrency.fixture import lockutils
from oslo_config import cfg
from oslo_messaging import conffixture as messaging_conffixture
from oslo_utils import strutils
import testtools
from neutron.agent.linux import external_process
from neutron.callbacks import manager as registry_manager
from neutron.callbacks import registry
from neutron.common import config
from neutron.common import rpc as n_rpc
from neutron.db import agentschedulers_db
from neutron import manager
from neutron import policy
from neutron.tests import fake_notifier
from neutron.tests import post_mortem_debug
CONF = cfg.CONF
CONF.import_opt('state_path', 'neutron.common.config')
LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s"
ROOTDIR = os.path.dirname(__file__)
ETCDIR = os.path.join(ROOTDIR, 'etc')
def etcdir(*p):
return os.path.join(ETCDIR, *p)
def fake_use_fatal_exceptions(*args):
return True
def fake_consume_in_threads(self):
return []
def get_rand_name(max_length=None, prefix='test'):
"""Return a random string.
The string will start with 'prefix' and will be exactly 'max_length'.
If 'max_length' is None, then exactly 8 random characters, each
hexadecimal, will be added. In case len(prefix) <= len(max_length),
ValueError will be raised to indicate the problem.
"""
if max_length:
length = max_length - len(prefix)
if length <= 0:
raise ValueError("'max_length' must be bigger than 'len(prefix)'.")
suffix = ''.join(str(random.randint(0, 9)) for i in range(length))
else:
suffix = hex(random.randint(0x10000000, 0x7fffffff))[2:]
return prefix + suffix
def bool_from_env(key, strict=False, default=False):
value = os.environ.get(key)
return strutils.bool_from_string(value, strict=strict, default=default)
def get_test_timeout(default=0):
return int(os.environ.get('OS_TEST_TIMEOUT', 0))
class AttributeDict(dict):
"""
Provide attribute access (dict.key) to dictionary values.
"""
def __getattr__(self, name):
"""Allow attribute access for all keys in the dict."""
if name in self:
return self[name]
raise AttributeError(_("Unknown attribute '%s'.") % name)
class DietTestCase(testtools.TestCase):
"""Same great taste, less filling.
BaseTestCase is responsible for doing lots of plugin-centric setup
that not all tests require (or can tolerate). This class provides
only functionality that is common across all tests.
"""
def setUp(self):
super(DietTestCase, self).setUp()
# Configure this first to ensure pm debugging support for setUp()
debugger = os.environ.get('OS_POST_MORTEM_DEBUGGER')
if debugger:
self.addOnException(post_mortem_debug.get_exception_handler(
debugger))
if bool_from_env('OS_DEBUG'):
_level = std_logging.DEBUG
else:
_level = std_logging.INFO
capture_logs = bool_from_env('OS_LOG_CAPTURE')
if not capture_logs:
std_logging.basicConfig(format=LOG_FORMAT, level=_level)
self.log_fixture = self.useFixture(
fixtures.FakeLogger(
format=LOG_FORMAT,
level=_level,
nuke_handlers=capture_logs,
))
test_timeout = get_test_timeout()
if test_timeout == -1:
test_timeout = 0
if test_timeout > 0:
self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
# If someone does use tempfile directly, ensure that it's cleaned up
self.useFixture(fixtures.NestedTempfile())
self.useFixture(fixtures.TempHomeDir())
self.addCleanup(mock.patch.stopall)
if bool_from_env('OS_STDOUT_CAPTURE'):
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
if bool_from_env('OS_STDERR_CAPTURE'):
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
self.addOnException(self.check_for_systemexit)
def check_for_systemexit(self, exc_info):
if isinstance(exc_info[1], SystemExit):
self.fail("A SystemExit was raised during the test. %s"
% traceback.format_exception(*exc_info))
@contextlib.contextmanager
def assert_max_execution_time(self, max_execution_time=5):
with eventlet.timeout.Timeout(max_execution_time, False):
yield
return
self.fail('Execution of this test timed out')
def assertOrderedEqual(self, expected, actual):
expect_val = self.sort_dict_lists(expected)
actual_val = self.sort_dict_lists(actual)
self.assertEqual(expect_val, actual_val)
def sort_dict_lists(self, dic):
for key, value in dic.iteritems():
if isinstance(value, list):
dic[key] = sorted(value)
elif isinstance(value, dict):
dic[key] = self.sort_dict_lists(value)
return dic
def assertDictSupersetOf(self, expected_subset, actual_superset):
"""Checks that actual dict contains the expected dict.
After checking that the arguments are of the right type, this checks
that each item in expected_subset is in, and matches, what is in
actual_superset. Separate tests are done, so that detailed info can
be reported upon failure.
"""
if not isinstance(expected_subset, dict):
self.fail("expected_subset (%s) is not an instance of dict" %
type(expected_subset))
if not isinstance(actual_superset, dict):
self.fail("actual_superset (%s) is not an instance of dict" %
type(actual_superset))
for k, v in expected_subset.items():
self.assertIn(k, actual_superset)
self.assertEqual(v, actual_superset[k],
"Key %(key)s expected: %(exp)r, actual %(act)r" %
{'key': k, 'exp': v, 'act': actual_superset[k]})
class ProcessMonitorFixture(fixtures.Fixture):
"""Test fixture to capture and cleanup any spawn process monitor."""
def setUp(self):
super(ProcessMonitorFixture, self).setUp()
self.old_callable = (
external_process.ProcessMonitor._spawn_checking_thread)
p = mock.patch("neutron.agent.linux.external_process.ProcessMonitor."
"_spawn_checking_thread",
new=lambda x: self.record_calls(x))
p.start()
self.instances = []
self.addCleanup(self.stop)
def stop(self):
for instance in self.instances:
instance.stop()
def record_calls(self, instance):
self.old_callable(instance)
self.instances.append(instance)
class BaseTestCase(DietTestCase):
@staticmethod
def config_parse(conf=None, args=None):
"""Create the default configurations."""
# neutron.conf.test includes rpc_backend which needs to be cleaned up
if args is None:
args = []
args += ['--config-file', etcdir('neutron.conf.test')]
if conf is None:
config.init(args=args)
else:
conf(args)
def setUp(self):
super(BaseTestCase, self).setUp()
# suppress all but errors here
capture_logs = bool_from_env('OS_LOG_CAPTURE')
self.useFixture(
fixtures.FakeLogger(
name='neutron.api.extensions',
format=LOG_FORMAT,
level=std_logging.ERROR,
nuke_handlers=capture_logs,
))
self.useFixture(lockutils.ExternalLockFixture())
cfg.CONF.set_override('state_path', self.get_default_temp_dir().path)
self.addCleanup(CONF.reset)
self.useFixture(ProcessMonitorFixture())
self.useFixture(fixtures.MonkeyPatch(
'neutron.common.exceptions.NeutronException.use_fatal_exceptions',
fake_use_fatal_exceptions))
self.setup_rpc_mocks()
self.setup_config()
self.setup_test_registry_instance()
policy.init()
self.addCleanup(policy.reset)
def get_new_temp_dir(self):
"""Create a new temporary directory.
:returns fixtures.TempDir
"""
return self.useFixture(fixtures.TempDir())
def get_default_temp_dir(self):
"""Create a default temporary directory.
Returns the same directory during the whole test case.
:returns fixtures.TempDir
"""
if not hasattr(self, '_temp_dir'):
self._temp_dir = self.get_new_temp_dir()
return self._temp_dir
def get_temp_file_path(self, filename, root=None):
"""Returns an absolute path for a temporary file.
If root is None, the file is created in default temporary directory. It
also creates the directory if it's not initialized yet.
If root is not None, the file is created inside the directory passed as
root= argument.
:param filename: filename
:type filename: string
:param root: temporary directory to create a new file in
:type root: fixtures.TempDir
:returns absolute file path string
"""
root = root or self.get_default_temp_dir()
return root.join(filename)
def setup_rpc_mocks(self):
# don't actually start RPC listeners when testing
self.useFixture(fixtures.MonkeyPatch(
'neutron.common.rpc.Connection.consume_in_threads',
fake_consume_in_threads))
self.useFixture(fixtures.MonkeyPatch(
'oslo_messaging.Notifier', fake_notifier.FakeNotifier))
self.messaging_conf = messaging_conffixture.ConfFixture(CONF)
self.messaging_conf.transport_driver = 'fake'
# NOTE(russellb) We want all calls to return immediately.
self.messaging_conf.response_timeout = 0
self.useFixture(self.messaging_conf)
self.addCleanup(n_rpc.clear_extra_exmods)
n_rpc.add_extra_exmods('neutron.test')
self.addCleanup(n_rpc.cleanup)
n_rpc.init(CONF)
def setup_test_registry_instance(self):
"""Give a private copy of the registry to each test."""
self._callback_manager = registry_manager.CallbacksManager()
mock.patch.object(registry, '_get_callback_manager',
return_value=self._callback_manager).start()
def setup_config(self, args=None):
"""Tests that need a non-default config can override this method."""
self.config_parse(args=args)
def config(self, **kw):
"""Override some configuration values.
The keyword arguments are the names of configuration options to
override and their values.
If a group argument is supplied, the overrides are applied to
the specified configuration option group.
All overrides are automatically cleared at the end of the current
test by the fixtures cleanup process.
"""
group = kw.pop('group', None)
for k, v in kw.iteritems():
CONF.set_override(k, v, group)
def setup_coreplugin(self, core_plugin=None):
self.useFixture(PluginFixture(core_plugin))
def setup_notification_driver(self, notification_driver=None):
self.addCleanup(fake_notifier.reset)
if notification_driver is None:
notification_driver = [fake_notifier.__name__]
cfg.CONF.set_override("notification_driver", notification_driver)
class PluginFixture(fixtures.Fixture):
def __init__(self, core_plugin=None):
self.core_plugin = core_plugin
def setUp(self):
super(PluginFixture, self).setUp()
self.dhcp_periodic_p = mock.patch(
'neutron.db.agentschedulers_db.DhcpAgentSchedulerDbMixin.'
'start_periodic_dhcp_agent_status_check')
self.patched_dhcp_periodic = self.dhcp_periodic_p.start()
# Plugin cleanup should be triggered last so that
# test-specific cleanup has a chance to release references.
self.addCleanup(self.cleanup_core_plugin)
if self.core_plugin is not None:
cfg.CONF.set_override('core_plugin', self.core_plugin)
def cleanup_core_plugin(self):
"""Ensure that the core plugin is deallocated."""
nm = manager.NeutronManager
if not nm.has_instance():
return
# TODO(marun) Fix plugins that do not properly initialize notifiers
agentschedulers_db.AgentSchedulerDbMixin.agent_notifiers = {}
# Perform a check for deallocation only if explicitly
# configured to do so since calling gc.collect() after every
# test increases test suite execution time by ~50%.
check_plugin_deallocation = (
bool_from_env('OS_CHECK_PLUGIN_DEALLOCATION'))
if check_plugin_deallocation:
plugin = weakref.ref(nm._instance.plugin)
nm.clear_instance()
if check_plugin_deallocation:
gc.collect()
# TODO(marun) Ensure that mocks are deallocated?
if plugin() and not isinstance(plugin(), mock.Base):
raise AssertionError(
'The plugin for this test was not deallocated.')
|
|
"""Views for the node settings page."""
# -*- coding: utf-8 -*-
import datetime
import httplib as http
from requests.exceptions import SSLError
from flask import request
from modularodm import Q
from modularodm.storage.base import KeyExistsException
from framework.auth.decorators import must_be_logged_in
from framework.exceptions import HTTPError
from website.addons.base import generic_views
from website.addons.dataverse import client
from website.addons.dataverse.model import DataverseProvider
from website.addons.dataverse.settings import DEFAULT_HOSTS
from website.addons.dataverse.serializer import DataverseSerializer
from dataverse.exceptions import VersionJsonNotFoundError
from website.oauth.models import ExternalAccount
from website.project.decorators import (
must_have_addon, must_be_addon_authorizer,
must_have_permission, must_not_be_registration,
must_be_contributor_or_public
)
from website.util import rubeus, api_url_for
from website.util.sanitize import assert_clean
SHORT_NAME = 'dataverse'
FULL_NAME = 'Dataverse'
dataverse_account_list = generic_views.account_list(
SHORT_NAME,
DataverseSerializer
)
dataverse_import_auth = generic_views.import_auth(
SHORT_NAME,
DataverseSerializer
)
dataverse_deauthorize_node = generic_views.deauthorize_node(
SHORT_NAME
)
dataverse_get_config = generic_views.get_config(
SHORT_NAME,
DataverseSerializer
)
## Auth ##
@must_be_logged_in
def dataverse_user_config_get(auth, **kwargs):
"""View for getting a JSON representation of the logged-in user's
Dataverse user settings.
"""
user_addon = auth.user.get_addon('dataverse')
user_has_auth = False
if user_addon:
user_has_auth = user_addon.has_auth
return {
'result': {
'userHasAuth': user_has_auth,
'urls': {
'create': api_url_for('dataverse_add_user_account'),
'accounts': api_url_for('dataverse_account_list'),
},
'hosts': DEFAULT_HOSTS,
},
}, http.OK
## Config ##
@must_be_logged_in
def dataverse_add_user_account(auth, **kwargs):
"""Verifies new external account credentials and adds to user's list"""
user = auth.user
provider = DataverseProvider()
host = request.json.get('host').rstrip('/')
api_token = request.json.get('api_token')
# Verify that credentials are valid
client.connect_or_error(host, api_token)
# Note: `DataverseSerializer` expects display_name to be a URL
try:
provider.account = ExternalAccount(
provider=provider.short_name,
provider_name=provider.name,
display_name=host, # no username; show host
oauth_key=host, # hijacked; now host
oauth_secret=api_token, # hijacked; now api_token
provider_id=api_token, # Change to username if Dataverse allows
)
provider.account.save()
except KeyExistsException:
# ... or get the old one
provider.account = ExternalAccount.find_one(
Q('provider', 'eq', provider.short_name) &
Q('provider_id', 'eq', api_token)
)
if provider.account not in user.external_accounts:
user.external_accounts.append(provider.account)
user_addon = auth.user.get_addon('dataverse')
if not user_addon:
user.add_addon('dataverse')
user.save()
# Need to ensure that the user has dataverse enabled at this point
user.get_or_add_addon('dataverse', auth=auth)
user.save()
return {}
@must_have_permission('write')
@must_have_addon(SHORT_NAME, 'user')
@must_have_addon(SHORT_NAME, 'node')
@must_be_addon_authorizer(SHORT_NAME)
def dataverse_set_config(node_addon, auth, **kwargs):
"""Saves selected Dataverse and dataset to node settings"""
user_settings = node_addon.user_settings
user = auth.user
if user_settings and user_settings.owner != user:
raise HTTPError(http.FORBIDDEN)
try:
assert_clean(request.json)
except AssertionError:
# TODO: Test me!
raise HTTPError(http.NOT_ACCEPTABLE)
alias = request.json.get('dataverse', {}).get('alias')
doi = request.json.get('dataset', {}).get('doi')
if doi is None or alias is None:
return HTTPError(http.BAD_REQUEST)
connection = client.connect_from_settings(node_addon)
dataverse = client.get_dataverse(connection, alias)
dataset = client.get_dataset(dataverse, doi)
node_addon.set_folder(dataverse, dataset, auth)
return {'dataverse': dataverse.title, 'dataset': dataset.title}, http.OK
@must_have_permission('write')
@must_have_addon(SHORT_NAME, 'user')
@must_have_addon(SHORT_NAME, 'node')
def dataverse_get_datasets(node_addon, **kwargs):
"""Get list of datasets from provided Dataverse alias"""
alias = request.json.get('alias')
connection = client.connect_from_settings(node_addon)
dataverse = client.get_dataverse(connection, alias)
datasets = client.get_datasets(dataverse)
ret = {
'alias': alias, # include alias to verify dataset container
'datasets': [{'title': dataset.title, 'doi': dataset.doi} for dataset in datasets],
}
return ret, http.OK
## Crud ##
@must_have_permission('write')
@must_not_be_registration
@must_have_addon(SHORT_NAME, 'node')
@must_be_addon_authorizer(SHORT_NAME)
def dataverse_publish_dataset(node_addon, auth, **kwargs):
node = node_addon.owner
publish_both = request.json.get('publish_both', False)
now = datetime.datetime.utcnow()
connection = client.connect_from_settings_or_401(node_addon)
dataverse = client.get_dataverse(connection, node_addon.dataverse_alias)
dataset = client.get_dataset(dataverse, node_addon.dataset_doi)
if publish_both:
client.publish_dataverse(dataverse)
client.publish_dataset(dataset)
# Add a log
node.add_log(
action='dataverse_dataset_published',
params={
'project': node.parent_id,
'node': node._id,
'dataset': dataset.title,
},
auth=auth,
log_date=now,
)
return {'dataset': dataset.title}, http.OK
## HGRID ##
def _dataverse_root_folder(node_addon, auth, **kwargs):
node = node_addon.owner
default_version = 'latest-published'
version = 'latest-published' if not node.can_edit(auth) else default_version
# Quit if no dataset linked
if not node_addon.complete:
return []
can_edit = node.can_edit(auth)
permissions = {
'edit': can_edit and not node.is_registration,
'view': node.can_view(auth)
}
try:
connection = client.connect_from_settings(node_addon)
dataverse = client.get_dataverse(connection, node_addon.dataverse_alias)
dataset = client.get_dataset(dataverse, node_addon.dataset_doi)
except SSLError:
return [rubeus.build_addon_root(
node_addon,
node_addon.dataset,
permissions=permissions
)]
# Quit if doi does not produce a dataset
if dataset is None:
return []
published_files = client.get_files(dataset, published=True)
# Produce draft version or quit if no published version is available
if not published_files:
if can_edit:
version = 'latest'
else:
return []
urls = {
'publish': node.api_url_for('dataverse_publish_dataset'),
}
# determine if there are any changes between the published and draft
# versions of the dataset
try:
dataset.get_metadata('latest-published')
dataset_is_published = True
dataset_draft_modified = dataset.get_state() == 'DRAFT'
except VersionJsonNotFoundError:
dataset_is_published = False
dataset_draft_modified = True
# Get the dataverse host
# (stored in oauth_key because dataverse doesn't use that)
dataverse_host = node_addon.external_account.oauth_key
return [rubeus.build_addon_root(
node_addon,
node_addon.dataset,
urls=urls,
permissions=permissions,
dataset=node_addon.dataset,
doi=dataset.doi,
dataverse=dataverse.title,
hasPublishedFiles=bool(published_files),
dataverseIsPublished=dataverse.is_published,
datasetIsPublished=dataset_is_published,
datasetDraftModified=dataset_draft_modified,
version=version,
host=dataverse_host,
)]
@must_be_contributor_or_public
@must_have_addon(SHORT_NAME, 'node')
def dataverse_root_folder(node_addon, auth, **kwargs):
return _dataverse_root_folder(node_addon, auth=auth)
## Widget ##
@must_be_contributor_or_public
@must_have_addon(SHORT_NAME, 'node')
def dataverse_widget(node_addon, **kwargs):
node = node_addon.owner
widget_url = node.api_url_for('dataverse_get_widget_contents')
ret = {
'complete': node_addon.complete,
'widget_url': widget_url,
}
ret.update(node_addon.config.to_json())
return ret, http.OK
@must_be_contributor_or_public
@must_have_addon(SHORT_NAME, 'node')
def dataverse_get_widget_contents(node_addon, **kwargs):
data = {
'connected': False,
}
if not node_addon.complete:
return {'data': data}, http.OK
doi = node_addon.dataset_doi
alias = node_addon.dataverse_alias
connection = client.connect_from_settings_or_401(node_addon)
dataverse = client.get_dataverse(connection, alias)
dataset = client.get_dataset(dataverse, doi)
if dataset is None:
return {'data': data}, http.BAD_REQUEST
dataverse_host = node_addon.external_account.oauth_key
dataverse_url = 'http://{0}/dataverse/{1}'.format(dataverse_host, alias)
dataset_url = 'http://dx.doi.org/' + doi
data.update({
'connected': True,
'dataverse': node_addon.dataverse,
'dataverseUrl': dataverse_url,
'dataset': node_addon.dataset,
'doi': doi,
'datasetUrl': dataset_url,
'citation': dataset.citation,
})
return {'data': data}, http.OK
|
|
from .db import db
from . import app, users
import time
import json
from flask import request, jsonify
rooms = db["rooms"]
rooms.create_index("name", unique=True)
def create_room(name, creator, publicity="public"):
room = {
"name": name,
"creator": creator["login"].lower(),
"description": "A description",
"theme": {
"background_colour": "#00BCD4",
"logo-src": "/static/images/logo.png"
},
"users": {
creator["login"].lower(): {
"rank": "owner",
"tag": "owner",
"banned": False,
"muted": False,
"last_message": -1,
}
},
"publicity": publicity,
"created": time.time(),
"tags": {
"default": {
"name": "User",
"colour": "#00BCD4",
},
"owner": {
"name": "Owner",
"colour": "#EEEEEE",
}
}
}
try:
rooms.insert(room)
except ValueError:
return False
return True
def get_room(room_name):
return rooms.find_one({"name": room_name})
def add_user(room_name, username):
return rooms.update_one(
{"name": room_name},
{
"$set": {
"users.%s" % username.lower(): {
"rank": "default",
"tag": "default",
"banned": False,
"muted": False,
"last_message": -1,
"online": False,
"last_ping": -1
}
}
}
)
def remove_user(room_name, username):
return rooms.update_one(
{"name": room_name},
{"$unset": {"users.%s" % username.lower(): ""}},
)
def invite_user(room_name, username):
return rooms.update_one(
{"name": room_name},
{"$push": {"invited": username}}
)
def get_user_in_room(room, user):
try:
return room["users"][user["username"].lower()]
except KeyError:
return None
def update_last_message(room_name, username, timestamp=None):
if timestamp is None:
timestamp = time.time()
return rooms.update_one(
{"name": room_name},
{
"$set": {
("users.%s.last_message" % username.lower()): timestamp,
("users.%s.last_ping" % username.lower()): timestamp,
("users.%s.online") % username.lower(): True
}
}
)
def update_last_ping(room_name, username, timestamp=None):
if timestamp is None:
timestamp = time.time()
return rooms.update_one(
{"name": room_name},
{
"$set": {
("users.%s.last_ping" % username.lower()): timestamp,
("users.%s.online") % username.lower(): True
}
}
)
def ban_user(room_name, username):
return rooms.update_one(
{"name": room_name},
{
"$set": {
("users.%s.banned" % username.lower()): True
}
}
)
def unban_user(room_name, username):
return rooms.update_one(
{"name": room_name},
{
"$set": {
("users.%s.banned" % username.lower()): False
}
}
)
def set_tag(room_name, username, tag):
return rooms.update_one(
{"name": room_name},
{
"$set": {
("users.%s.tag" % username.lower()): tag
}
}
)
def get_tag(room, user):
try:
return room["users"][user]["tag"]
except:
return "default"
def remove_tag(room_name, username):
return rooms.update_one(
{"name": room_name},
{
"$set": {
("users.%s.tag" % username.lower()): "default"
}
}
)
def set_user_online(room_name, username, online):
return rooms.update_one(
{"name": room_name},
{
"$set": {
("users.%s.online" % username.lower()): online
}
}
)
def get_valid_tags(room_name):
room = rooms.find_one({"name": room_name}, {"tags": 1})
try:
return room["tags"]
except:
return {
"ok": False,
"message": "Room not found.",
}
def set_valid_tags(room_name, tags):
return rooms.update_one(
{"name": room_name},
{"$set": {"tags": tags}}
)
def safe_room(room):
return {
"name": room["name"],
"creator": room["creator"],
"description": room["description"],
"created": room["created"]
}
@app.template_global()
def list_rooms(username=None):
res = rooms.find(
{
"$or": [
{"publicity": "public"},
{"users.%s.banned" % username.lower(): False} if username else {"": True},
{"invited": {"$in": [username.lower()]}},
]
},
{"name":1, "creator": 1, "description": 1, "created": 1}
)
safe_rooms = {}
for room in res:
safe_rooms[room["name"]] = safe_room(room)
return safe_rooms
@app.route("/api/1/rooms/list", methods=["GET"])
def api_rooms_list():
user = users.get_user()
return {
"ok": True,
"rooms": list_rooms(user["username"])
}
@app.route("/api/1/rooms/tags", methods=["GET", "POST"])
def api_rooms_tags():
if request.method == "GET":
room_name = request.args["room_name"]
return jsonify(get_valid_tags(room_name))
elif request.method == "POST":
room_name = request.form["room_name"]
tags = request.form["tags"]
try:
tags = json.loads(tags)
except ValueError: # replace with actual error
return jsonify({
"ok": False,
"message": "Tags not valid JSON.",
})
try:
valid_tags_data = {}
for tag_name, options in tags:
valid_options = {}
valid_options["name"] = options["name"]
valid_options["text-color"] = options["text-color"]
valid_options["bg-color"] = options["bg-color"]
valid_tags_data[tag_name.lower()] = valid_options
except KeyError:
return jsonify({
"ok": False,
"message": "Tags data was not valid.",
})
if "default" not in valid_tags_data:
return jsonify({
"ok": False,
"message": "Must have tag for default.",
})
set_valid_tags(room_name, valid_tags_data)
return jsonify({
"ok": True,
})
|
|
"""File backend index."""
import copy
from collections import defaultdict
from blitzdb.backends.base import NotInTransaction
from blitzdb.backends.file.serializers import PickleSerializer as Serializer
from blitzdb.queryset import QuerySet
class Index(object):
"""File backend index.
An index accepts key/value pairs and stores them so that they can be
efficiently retrieved.
:param params: Index parameters such as id and primary key
:type params: dict
:param serializer: Used to encode data before storing it.
:type serialize: object
:param dserializer: Used to decode date after retrieving it.
:type deserializer: object
:param store: Where the blobs are stored
:type store: object
"""
# magic value we use when storing undefined values
undefined_magic_value = '5baf58af9fb144a4ba2aa4374e931539'
def __init__(self, params, serializer, deserializer, store=None):
"""Initalize internal state."""
self._params = params
self._store = store
self._serializer = serializer
self._deserializer = deserializer
self._splitted_key = self.key.split('.')
self._index = None
self._reverse_index = None
self._undefined_keys = None
self.clear()
if store:
self.ephemeral = False
self.loaded = self.load_from_store()
else:
self.ephemeral = True
self.loaded = False
def clear(self):
"""Clear index."""
self._index = defaultdict(list)
self._reverse_index = defaultdict(list)
self._undefined_keys = {}
@property
def key(self):
"""Return key parameter.
An index will be created by default in which the key is the document
primary key, but custom indices can be created for any property
(including nested ones).
:return: primary key
:rtype: str
"""
return self._params['key']
def get_value(self, attributes):
"""Get value to be indexed from document attributes.
:param attributes: Document attributes
:type attributes: dict
:return: Value to be indexed
:rtype: object
"""
value = attributes
# A splitted key like 'a.b.c' goes into nested properties
# and the value is retrieved recursively
for elem in self._splitted_key:
if isinstance(value, list):
# Integer keys must be used for list properties
value = value[int(elem)]
else:
value = value[elem]
return value
def save_to_store(self):
"""Save index to store.
:raise AttributeError: If no datastore is defined
"""
if not self._store:
raise AttributeError('No datastore defined!')
saved_data = self.save_to_data(in_place=True)
data = Serializer.serialize(saved_data)
self._store.store_blob(data, 'all_keys_with_undefined')
def get_all_keys(self):
"""Get all keys indexed.
:return: All keys
:rtype: list(str)
"""
all_keys = []
for keys in self._index.values():
all_keys.extend(keys)
return all_keys
def get_index(self):
"""Get copy of the internal index structure.
:return: Internal index structure
:rtype: dict(str)
"""
return copy.deepcopy(self._index)
def load_from_store(self):
"""Load index from store.
:return: Whether index was correctly loaded or not
:rtype: bool
:raise AttributeError: If no datastore is defined
"""
if not self._store:
raise AttributeError('No datastore defined!')
if self._store.has_blob('all_keys'):
data = Serializer.deserialize(self._store.get_blob('all_keys'))
self.load_from_data(data)
return True
elif self._store.has_blob('all_keys_with_undefined'):
blob = self._store.get_blob('all_keys_with_undefined')
data = Serializer.deserialize(blob)
self.load_from_data(data, with_undefined=True)
return True
else:
return False
def sort_keys(self, keys, order=QuerySet.ASCENDING):
"""Sort keys.
Keys are sorted based on the value they are indexing.
:param keys: Keys to be sorted
:type keys: list(str)
:param order: Order criteri (asending or descending)
:type order: int
:return: Sorted keys
:rtype: list(str)
:raise ValueError: If invalid order value is passed
"""
# to do: check that all reverse index values are unambiguous
missing_keys = [
key
for key in keys
if not len(self._reverse_index[key])
]
keys_and_values = [
(key, self._reverse_index[key][0])
for key in keys
if key not in missing_keys
]
sorted_keys = [
kv[0]
for kv in sorted(
keys_and_values,
key=lambda x: x[1],
reverse=True if order == QuerySet.DESCENDING else False)
]
if order == QuerySet.ASCENDING:
return missing_keys + sorted_keys
elif order == QuerySet.DESCENDING:
return sorted_keys + missing_keys
else:
raise ValueError('Unexpected order value: %d' % order)
def save_to_data(self, in_place=False):
"""Save index to data structure.
:param in_place: Do not copy index value to a new list object
:type in_place: bool
:return: Index data structure
:rtype: list
"""
if in_place:
return [
list(self._index.items()),
list(self._undefined_keys.keys())
]
return (
[(key, values[:]) for key, values in self._index.items()],
list(self._undefined_keys.keys()),
)
def load_from_data(self, data, with_undefined=False):
"""Load index structure.
:param with_undefined: Load undefined keys as well
:type with_undefined: bool
"""
if with_undefined:
defined_values, undefined_values = data
else:
defined_values = data
undefined_values = None
self._index = defaultdict(list, defined_values)
self._reverse_index = defaultdict(list)
for key, values in self._index.items():
for value in values:
self._reverse_index[value].append(key)
if undefined_values:
self._undefined_keys = {key: True for key in undefined_values}
else:
self._undefined_keys = {}
def get_hash_for(self, value):
"""Get hash for a given value.
:param value: The value to be indexed
:type value: object
:return: Hashed value
:rtype: str
"""
serialized_value = self._serializer(value)
if isinstance(serialized_value, dict):
# Hash each item and return the hash of all the hashes
return hash(frozenset([
self.get_hash_for(x)
for x in serialized_value.items()
]))
elif (isinstance(serialized_value, list)
or isinstance(serialized_value, tuple)):
# Hash each element and return the hash of all the hashes
return hash(tuple([
self.get_hash_for(x) for x in serialized_value
]))
return value
def get_keys_for(self, value):
"""Get keys for a given value.
:param value: The value to look for
:type value: object
:return: The keys for the given value
:rtype: list(str)
"""
if callable(value):
return value(self)
hash_value = self.get_hash_for(value)
return self._index[hash_value][:]
def get_undefined_keys(self):
"""Get undefined keys.
:return: Undefined keys
:rtype: list(str)
"""
return self._undefined_keys.keys()
# The following two operations change the value of the index
def add_hashed_value(self, hash_value, store_key):
"""Add hashed value to the index.
:param hash_value: The hashed value to be added to the index
:type hash_value: str
:param store_key: The key for the document in the store
:type store_key: object
"""
if store_key not in self._index[hash_value]:
self._index[hash_value].append(store_key)
if hash_value not in self._reverse_index[store_key]:
self._reverse_index[store_key].append(hash_value)
def add_key(self, attributes, store_key):
"""Add key to the index.
:param attributes: Attributes to be added to the index
:type attributes: dict(str)
:param store_key: The key for the document in the store
:type store_key: str
"""
undefined = False
try:
value = self.get_value(attributes)
if value == self.undefined_magic_value:
raise IndexError(
'index value corresponds to undefined_magic_value: %s'
% self.undefined_magic_value)
except (KeyError, IndexError):
undefined = True
# We remove old values
self.remove_key(store_key)
if not undefined:
if isinstance(value, list) or isinstance(value, tuple):
# We add an extra hash value for the list itself
# (this allows for querying the whole list)
values = value
hash_value = self.get_hash_for(value)
self.add_hashed_value(hash_value, store_key)
else:
values = [value]
for value in values:
hash_value = self.get_hash_for(value)
self.add_hashed_value(hash_value, store_key)
else:
self.add_undefined(store_key)
def add_undefined(self, store_key):
"""Add undefined key to the index.
:param store_key: The key for the document in the store
:type store_key: str
"""
self._undefined_keys[store_key] = True
def remove_key(self, store_key):
"""Remove key from the index.
:param store_key: The key for the document in the store
:type store_key: str
"""
if store_key in self._reverse_index:
for value in self._reverse_index[store_key]:
self._index[value].remove(store_key)
del self._reverse_index[store_key]
class TransactionalIndex(Index):
"""This class adds transaction support to the Index class."""
def __init__(self, *args, **kwargs):
"""Initialize internal state."""
super(TransactionalIndex, self).__init__(*args, **kwargs)
self._in_transaction = False
self._add_cache = None
self._reverse_add_cache = None
self._remove_cache = None
self._init_cache()
def _init_cache(self):
"""Initialize cache."""
self._add_cache = defaultdict(list)
self._reverse_add_cache = defaultdict(list)
self._remove_cache = {}
def begin(self):
"""Begin transaction.
This will commit the last transaction before starting a new one.
"""
self.commit()
def commit(self):
"""Commit current transaction."""
if not self._add_cache and not self._remove_cache:
return
for store_key, hash_values in self._add_cache.items():
for hash_value in hash_values:
super(TransactionalIndex, self).add_hashed_value(
hash_value, store_key)
for store_key in self._remove_cache:
super(TransactionalIndex, self).remove_key(store_key)
if not self.ephemeral:
self.save_to_store()
self._init_cache()
self._in_transaction = True
def rollback(self):
"""Drop changes from current transaction."""
if not self._in_transaction:
raise NotInTransaction
self._init_cache()
self._in_transaction = False
def add_hashed_value(self, hash_value, store_key):
"""Add hashed value in the context of the current transaction.
:param hash_value: The hashed value to be added to the index
:type hash_value: str
:param store_key: The key for the document in the store
:type store_key: object
"""
if hash_value not in self._add_cache[store_key]:
self._add_cache[store_key].append(hash_value)
if store_key not in self._reverse_add_cache[hash_value]:
self._reverse_add_cache[hash_value].append(store_key)
if store_key in self._remove_cache:
del self._remove_cache[store_key]
def remove_key(self, store_key):
"""Remove key in the context of the current transaction.
:param store_key: The key for the document in the store
:type store_key: str
"""
self._remove_cache[store_key] = True
if store_key in self._add_cache:
for hash_value in self._add_cache[store_key]:
self._reverse_add_cache[hash_value].remove(store_key)
del self._add_cache[store_key]
def get_keys_for(self, value, include_uncommitted=False):
"""Get keys for a given value.
:param value: The value to look for
:type value: object
:param include_uncommitted: Include uncommitted values in results
:type include_uncommitted: bool
:return: The keys for the given value
:rtype: list(str)
"""
if not include_uncommitted:
return super(TransactionalIndex, self).get_keys_for(value)
else:
keys = super(TransactionalIndex, self).get_keys_for(value)
hash_value = self.get_hash_for(value)
keys += self._reverse_add_cache[hash_value]
return keys
|
|
#!/usr/bin/env python
import logging
import os
import inspect
import sys
from threading import Timer
from argparse import ArgumentParser
from sleekxmpp import ClientXMPP
from sleekxmpp.exceptions import XMPPError
from sleekxmpp.xmlstream.scheduler import Task, Scheduler
from base_plugin import Plugin, PluginContext
from plugin_utils import *
from base_message import Message
#If we don't have what we need, just run without the CLI
try:
from frontend import *
from curses import wrapper
FORCENOCLI = False
except Exception as e:
logging.debug("Unable to import CLI dependencies: " + str(e))
logging.debug("Falling back to stdout")
FORCENOCLI = True
#TODO: rip out init, channel movement, messaging, and termination into hooks and turn "xmpp" into a module. (also filters.)
class Bot(ClientXMPP):
'''
The primary XMPP bot class.
'''
def __init__(self, jid, password):
'''
Initialize the bot.
:param jid: The account to log in with.
:param password: The password to log in with.
'''
ClientXMPP.__init__(self, jid, password)
PluginContext.client = self
# This is the plugin for multi-user chat.
self.register_plugin('xep_0045')
self.add_event_handler("session_start", self.session_start)
self.add_event_handler("message", self.on_event)
self.add_event_handler("presence", self.on_event)
self.nick = None
self.service = None
self.channels = []
self.custom_plugins = []
self.plugin_dir = "./plugins"
self.import_plugins()
def set_nick(self, nick):
'''
Set the nick or display name of the bot.
:param nick: the nickname to use.
'''
self.nick = nick
def set_service (self, service):
'''
Set the service URI the bot will utilize for channels.
:param service: the URI to use.
'''
self.service = service
def set_channels(self, channels):
'''
Set the list of channels the bot will join on startup.
:param channels: the list of channels to join.
'''
self.channels = channels
def add_channel(self, channel):
'''
Add a channel to the list of channels the bot will join on startup.
:param channel: the channel to add.
'''
self.channels.append(channel)
def join_channels(self):
'''
Join a list of channels. Must be called after startup.
'''
for channel in self.channels:
self.join_channel(channel)
def join_channel(self, channel):
'''
Join a single channel. Must be called after startup.
:param channel: the channel to join.
'''
for channel in self.channels:
if self.service:
channel = channel + "@" + self.service
self.plugin['xep_0045'].joinMUC(channel,
self.nick,
wait=True)
def set_plugin_dir(self, plugin_dir):
'''
Set where the bot will look for plugins.
:param plugin_dir: the path to the plugin directory. All plugins
should be python files below this path.
'''
self.plugin_dir = plugin_dir
def add_plugin(self, plugin):
'''
Add a Plugin class for the bot to utilize.
The bot will create a unique instance of the Plugin class to run.
:param plugin: the plugin class to add.
'''
self.custom_plugins.append(plugin(self))
def import_plugins(self):
'''
Discover and import all visible plugins from the ./plugins folder in the running directory.
Must be valid python files, implementing classes which inherit from Plugin, or utilizing
The standalone decorators @Command and @Trigger.
'''
self.custom_plugins = []
plugin_filenames = []
for (dirpath, dirnames, filenames) in os.walk(self.plugin_dir):
plugin_filenames = filenames
break
plugin_root = None
#Accumulate each of the plugin files defined in ./plugins.
for filename in plugin_filenames:
base, extension = os.path.splitext(filename)
if "__init__" not in base and ".py" == extension:
try:
plugin_root = __import__("plugins." + os.path.splitext(filename)[0])
logging.debug("Plugin file:" + str(filename))
except Exception as e:
logging.warning("PLUGIN IMPORT ERROR:" + str(e) + " FROM FILE:" + str(filename))
logging.debug("Plugin module root:" + str(dir(plugin_root)))
#Extract the plugin classes from the plugins module tree. There's got to be a better way to do this, but I'm bad at things.
for module, module_name in [(getattr(plugin_root, module_name), module_name) for module_name in dir(plugin_root)]: #For components of the toplevel plugins module (we're looking for plugin files)
try:
# This is a dirty hack but it makes the logs much cleaner. Not really necessary, we can just fail out without it.
if module.__class__ == os.__class__:
logging.info(" From Module:" + str(module))
for plugin, plugin_name in [(getattr(module, plugin_name), plugin_name) for plugin_name in dir(module)]: #For items internal to the modules derived from plugin files.
try:
#Total cludge to supress failed import errors on stuff that should fail while allowing errors on user stuff.
if inspect.isclass(plugin) \
and plugin != Plugin \
and issubclass(plugin, Plugin): #Make sure we aren't grabbing the parent class or a builtin.
logging.info(" Plugin:" + str(plugin))
self.add_plugin(plugin)
except Exception as e:
logging.warning("PLUGIN ADD ERROR:" + str(plugin) + ":" + str(e))
except:
continue
logging.debug("Imported plugins: " + str(self.custom_plugins))
return self.custom_plugins
def session_start(self, event):
'''
The callback to be called when the bot first connects (after Process() is called)
:param event: the start event that triggers this function.
'''
logging.info("Session start")
self.send_presence()
try:
self.get_roster()
except XMPPError as err:
logging.error("error:" + str(err))
self.disconnect()
self.join_channels()
def on_event(self, event):
'''
The callback to trigger any time an event is captured.
:param event: The event that triggered this function.
'''
event = Message.loadXMPP(event)
self.run_plugins(event)
def run_plugins(self, event):
'''
Run all plugins on the given event, if appropriate.
:param event: The event to run against.
'''
#logging.debug("Got event:" + event + event["body"]
#Run plugins implemented as children of Plugin
for plugin in self.custom_plugins:
try:
plugin(event)
except Exception as e:
logging.exception("PLUGIN RUNTIME ERROR:" + str(plugin) + ":" + str(e))
#Below is the functionality for running the plugins implemented by standalone decorators. (@Command and @Trigger)
#Run commands. Currently limited to private messages.
if on_private_message(event, self):
message_body_tokens = event.Body.split()
for command, command_function in [(function.__name__, function) for function in PluginContext.commands]:
if len(message_body_tokens) and message_body_tokens[0].strip() == '!' + command.strip():
try:
logging.debug([event] + message_body_tokens[1:])
call_function_with_variable_arguments(command_function, [event] + message_body_tokens[1:])
except Exception as e:
logging.exception("COMMAND ERROR:" + str(command) + ":" + str(e))
event.reply("COMMAND ERROR:" + str(e)).send()
#Run triggered functions.
for function, triggers in PluginContext.triggers.items():
for trigger in triggers:
try:
if trigger(event, self):
call_function_with_variable_arguments(function, [event] + event.Body.split()[1:])
break
except Exception as e:
logging.exception("TRIGGER ERROR:" + str(trigger) + ":" + str(e))
def list_commands(self):
'''
List all the commands currently visible to the bot.
'''
commands = [(function.__name__, function) for function in PluginContext.commands]
for plugin in self.custom_plugins:
try:
commands += plugin._commands
except:
pass #In case someone wants to cram a function in as a plugin or something.
return commands
def mainloop(bot, stdscr=None):
'''
The main runloop of the program. If being run in a curses session, stdscr should be populated.
:param bot: The bot instance that has been prepared.
:param stdscr: The curses main screen.
'''
#If we're running in a curses window... (for the CLI)
if stdscr:
console = TerminalFrontend(stdscr)
bot.custom_plugins.append(console.display_message)
else: #Otherwise, set up a basic message printer.
def print_message(event):
print event.From
print event.Body
bot.custom_plugins.append(print_message)
bot.process(block=False)
should_halt = False
while not should_halt:
if stdscr:
console.handle_user_input()
should_halt = console.should_exit()
else:
# Exit on escape. Must hit enter, though.
if raw_input() == '\x1b':
should_halt = True
pass
try:
bot.abort()
except:
exit()
def main():
'''
The main entry point for the bot.
Mostly parsing args, and the config file, prepares for the mainloop.
'''
logging.basicConfig(level=logging.INFO,
format="%(levelname)-8s %(message)s")
parser = ArgumentParser()
parser.add_argument("-c", "--config", dest="config", default="./bot_config.conf")
parser.add_argument("-u", "--username", dest="username", default=None)
parser.add_argument("-p", "--password", dest="password", default=None)
parser.add_argument("--channels", nargs='+', dest="channels", default=[])
parser.add_argument("--server", dest="server", default=None)
parser.add_argument("--service", dest="service", default=None)
parser.add_argument("--nocli", action="store_true", default=False, dest="nocli")
parser.add_argument("--plugins", dest="plugins", default=None)
args = parser.parse_args()
username = None
password = None
server = None
service = None
channels = []
plugin_dir = None
#Parse the config file. Should I use a tool for doing this? Probably, but this took all of 10 seconds.
#Grab config file options first so command line can override them.
if args.config:
try:
with open(args.config) as config_file:
for line in config_file.readlines():
line = line.strip()
if line and line[0] != "#":
line_tokens = line.split("=")
if len(line_tokens) >= 2:
if line_tokens[0].strip().lower() == "username":
username = line_tokens[1].strip()
if line_tokens[0].strip().lower() == "password":
password = line_tokens[1].strip()
if line_tokens[0].strip().lower() == "server":
server = line_tokens[1].strip()
if line_tokens[0].strip().lower() == "service":
service = line_tokens[1].strip()
if line_tokens[0].strip().lower() == "channel":
channels.append(line_tokens[1].strip())
if line_tokens[0].strip().lower() == "plugins":
plugin_dir = line_tokens[1].strip()
except:
pass
if args.username:
username = args.username
if args.password:
password = args.password
if args.server:
server = args.server
if args.service:
service = args.service
if args.channels != []:
channels = args.channels
if args.plugins:
plugin_dir = args.plugins
#FIXME: make these reqired via argparser somehow?
if not username:
logging.error("Error: no username was supplied.")
sys.exit(-1)
if not password:
logging.error("Error: no password was supplied.")
sys.exit(-1)
if not server:
logging.error("Error: no server was supplied.")
sys.exit(-1)
if not service:
logging.error("Error: no service was supplied.")
sys.exit(-1)
logging.info("username: " + str(username))
logging.info("password: " + str(password[0]) + '+' + str(len(password)-1))
logging.info("server: " + str(server))
logging.info("service: " + str(service))
logging.info("channels: " + str(channels))
try:
bot = Bot(username + "@" + server, password)
bot.set_service(service)
bot.set_nick(username)
bot.set_plugin_dir(plugin_dir)
bot.connect()
logging.info("Connecting to channels:")
for channel in channels:
logging.info("Connecting to channel: " + str(channel))
bot.add_channel(channel)
if args.nocli or FORCENOCLI:
mainloop(bot)
else:
def mainloop_wrapper(stdscr):
mainloop(bot, stdscr)
wrapper(mainloop_wrapper)
except Exception as e:
logging.exception(e)
exit()
if __name__ == "__main__":
main()
|
|
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, see
# <http://www.gnu.org/licenses/>.
# This file is part of urlgrabber, a high-level cross-protocol url-grabber
# Copyright 2002-2004 Michael D. Stenner, Ryan Tomayko
# $Id: byterange.py,v 1.9 2005/02/14 21:55:07 mstenner Exp $
import os
import stat
import urllib
import urllib2
import email.Utils
class RangeError(IOError):
"""Error raised when an unsatisfiable range is requested."""
pass
class HTTPRangeHandler(urllib2.BaseHandler):
"""Handler that enables HTTP Range headers.
This was extremely simple. The Range header is a HTTP feature to
begin with so all this class does is tell urllib2 that the
"206 Partial Content" response from the HTTP server is what we
expected.
Example:
import urllib2
import byterange
range_handler = range.HTTPRangeHandler()
opener = urllib2.build_opener(range_handler)
# install it
urllib2.install_opener(opener)
# create Request and set Range header
req = urllib2.Request('http://www.python.org/')
req.header['Range'] = 'bytes=30-50'
f = urllib2.urlopen(req)
"""
def http_error_206(self, req, fp, code, msg, hdrs):
# 206 Partial Content Response
r = urllib.addinfourl(fp, hdrs, req.get_full_url())
r.code = code
r.msg = msg
return r
def http_error_416(self, req, fp, code, msg, hdrs):
# HTTP's Range Not Satisfiable error
raise RangeError('Requested Range Not Satisfiable')
class RangeableFileObject(object):
"""File object wrapper to enable raw range handling.
This was implemented primarily for handling range
specifications for file:// urls. This object effectively makes
a file object look like it consists only of a range of bytes in
the stream.
Examples:
# expose 10 bytes, starting at byte position 20, from
# /etc/aliases.
>>> fo = RangeableFileObject(file('/etc/passwd', 'r'), (20,30))
# seek seeks within the range (to position 23 in this case)
>>> fo.seek(3)
# tell tells where your at _within the range_ (position 3 in
# this case)
>>> fo.tell()
# read EOFs if an attempt is made to read past the last
# byte in the range. the following will return only 7 bytes.
>>> fo.read(30)
"""
def __init__(self, fo, rangetup):
"""Create a RangeableFileObject.
fo -- a file like object. only the read() method need be
supported but supporting an optimized seek() is
preferable.
rangetup -- a (firstbyte,lastbyte) tuple specifying the range
to work over.
The file object provided is assumed to be at byte offset 0.
"""
self.fo = fo
(self.firstbyte, self.lastbyte) = range_tuple_normalize(rangetup)
self.realpos = 0
self._do_seek(self.firstbyte)
def __getattr__(self, name):
"""This effectively allows us to wrap at the instance level.
Any attribute not found in _this_ object will be searched for
in self.fo. This includes methods."""
return getattr(self.fo, name)
def tell(self):
"""Return the position within the range.
This is different from fo.seek in that position 0 is the
first byte position of the range tuple. For example, if
this object was created with a range tuple of (500,899),
tell() will return 0 when at byte position 500 of the file.
"""
return (self.realpos - self.firstbyte)
def seek(self, offset, whence=0):
"""Seek within the byte range.
Positioning is identical to that described under tell().
"""
assert whence in (0, 1, 2)
if whence == 0: # absolute seek
realoffset = self.firstbyte + offset
elif whence == 1: # relative seek
realoffset = self.realpos + offset
elif whence == 2: # absolute from end of file
# XXX: are we raising the right Error here?
raise IOError('seek from end of file not supported.')
# do not allow seek past lastbyte in range
if self.lastbyte and (realoffset >= self.lastbyte):
realoffset = self.lastbyte
self._do_seek(realoffset - self.realpos)
def read(self, size=-1):
"""Read within the range.
This method will limit the size read based on the range.
"""
size = self._calc_read_size(size)
rslt = self.fo.read(size)
self.realpos += len(rslt)
return rslt
def readline(self, size=-1):
"""Read lines within the range.
This method will limit the size read based on the range.
"""
size = self._calc_read_size(size)
rslt = self.fo.readline(size)
self.realpos += len(rslt)
return rslt
def _calc_read_size(self, size):
"""Handles calculating the amount of data to read based on
the range.
"""
if self.lastbyte:
if size > -1:
if ((self.realpos + size) >= self.lastbyte):
size = (self.lastbyte - self.realpos)
else:
size = (self.lastbyte - self.realpos)
return size
def _do_seek(self, offset):
"""Seek based on whether wrapped object supports seek().
offset is relative to the current position (self.realpos).
"""
assert offset >= 0
seek = getattr(self.fo, 'seek', self._poor_mans_seek)
seek(self.realpos + offset)
self.realpos += offset
def _poor_mans_seek(self, offset):
"""Seek by calling the wrapped file objects read() method.
This is used for file like objects that do not have native
seek support. The wrapped objects read() method is called
to manually seek to the desired position.
offset -- read this number of bytes from the wrapped
file object.
raise RangeError if we encounter EOF before reaching the
specified offset.
"""
pos = 0
bufsize = 1024
while pos < offset:
if (pos + bufsize) > offset:
bufsize = offset - pos
buf = self.fo.read(bufsize)
if len(buf) != bufsize:
raise RangeError('Requested Range Not Satisfiable')
pos += bufsize
class FileRangeHandler(urllib2.FileHandler):
"""FileHandler subclass that adds Range support.
This class handles Range headers exactly like an HTTP
server would.
"""
def open_local_file(self, req):
import mimetypes
import email
host = req.get_host()
file = req.get_selector()
localfile = urllib.url2pathname(file)
stats = os.stat(localfile)
size = stats[stat.ST_SIZE]
modified = email.Utils.formatdate(stats[stat.ST_MTIME])
mtype = mimetypes.guess_type(file)[0]
if host:
host, port = urllib.splitport(host)
if port or socket.gethostbyname(host) not in self.get_names():
raise urllib2.URLError('file not on local host')
fo = open(localfile,'rb')
brange = req.headers.get('Range', None)
brange = range_header_to_tuple(brange)
assert brange != ()
if brange:
(fb, lb) = brange
if lb == '':
lb = size
if fb < 0 or fb > size or lb > size:
raise RangeError('Requested Range Not Satisfiable')
size = (lb - fb)
fo = RangeableFileObject(fo, (fb, lb))
headers = email.message_from_string(
'Content-Type: %s\nContent-Length: %d\nLast-Modified: %s\n' %
(mtype or 'text/plain', size, modified))
return urllib.addinfourl(fo, headers, 'file:'+file)
# FTP Range Support
# Unfortunately, a large amount of base FTP code had to be copied
# from urllib and urllib2 in order to insert the FTP REST command.
# Code modifications for range support have been commented as
# follows:
# -- range support modifications start/end here
from urllib import splitport, splituser, splitpasswd, splitattr, \
unquote, addclosehook, addinfourl
import ftplib
import socket
import mimetypes
import email
class FTPRangeHandler(urllib2.FTPHandler):
def ftp_open(self, req):
host = req.get_host()
if not host:
raise IOError('ftp error', 'no host given')
host, port = splitport(host)
if port is None:
port = ftplib.FTP_PORT
else:
port = int(port)
# username/password handling
user, host = splituser(host)
if user:
user, passwd = splitpasswd(user)
else:
passwd = None
host = unquote(host)
user = unquote(user or '')
passwd = unquote(passwd or '')
try:
host = socket.gethostbyname(host)
except socket.error, msg:
raise urllib2.URLError(msg)
path, attrs = splitattr(req.get_selector())
dirs = path.split('/')
dirs = map(unquote, dirs)
dirs, file = dirs[:-1], dirs[-1]
if dirs and not dirs[0]:
dirs = dirs[1:]
try:
fw = self.connect_ftp(user, passwd, host, port, dirs)
type = file and 'I' or 'D'
for attr in attrs:
attr, value = splitattr(attr)
if attr.lower() == 'type' and \
value in ('a', 'A', 'i', 'I', 'd', 'D'):
type = value.upper()
# -- range support modifications start here
rest = None
range_tup = range_header_to_tuple(req.headers.get('Range', None))
assert range_tup != ()
if range_tup:
(fb, lb) = range_tup
if fb > 0:
rest = fb
# -- range support modifications end here
fp, retrlen = fw.retrfile(file, type, rest)
# -- range support modifications start here
if range_tup:
(fb, lb) = range_tup
if lb == '':
if retrlen is None or retrlen == 0:
raise RangeError('Requested Range Not Satisfiable due'
' to unobtainable file length.')
lb = retrlen
retrlen = lb - fb
if retrlen < 0:
# beginning of range is larger than file
raise RangeError('Requested Range Not Satisfiable')
else:
retrlen = lb - fb
fp = RangeableFileObject(fp, (0, retrlen))
# -- range support modifications end here
headers = ""
mtype = mimetypes.guess_type(req.get_full_url())[0]
if mtype:
headers += "Content-Type: %s\n" % mtype
if retrlen is not None and retrlen >= 0:
headers += "Content-Length: %d\n" % retrlen
headers = email.message_from_string(headers)
return addinfourl(fp, headers, req.get_full_url())
except ftplib.all_errors, msg:
raise IOError('ftp error', msg)
def connect_ftp(self, user, passwd, host, port, dirs):
fw = ftpwrapper(user, passwd, host, port, dirs)
return fw
class ftpwrapper(urllib.ftpwrapper):
# range support note:
# this ftpwrapper code is copied directly from
# urllib. The only enhancement is to add the rest
# argument and pass it on to ftp.ntransfercmd
def retrfile(self, file, type, rest=None):
self.endtransfer()
if type in ('d', 'D'):
cmd = 'TYPE A'
isdir = 1
else:
cmd = 'TYPE ' + type
isdir = 0
try:
self.ftp.voidcmd(cmd)
except ftplib.all_errors:
self.init()
self.ftp.voidcmd(cmd)
conn = None
if file and not isdir:
# Use nlst to see if the file exists at all
try:
self.ftp.nlst(file)
except ftplib.error_perm, reason:
raise IOError('ftp error', reason)
# Restore the transfer mode!
self.ftp.voidcmd(cmd)
# Try to retrieve as a file
try:
cmd = 'RETR ' + file
conn = self.ftp.ntransfercmd(cmd, rest)
except ftplib.error_perm, reason:
if str(reason).startswith('501'):
# workaround for REST not supported error
fp, retrlen = self.retrfile(file, type)
fp = RangeableFileObject(fp, (rest,''))
return (fp, retrlen)
elif not str(reason).startswith('550'):
raise IOError('ftp error', reason)
if not conn:
# Set transfer mode to ASCII!
self.ftp.voidcmd('TYPE A')
# Try a directory listing
if file:
cmd = 'LIST ' + file
else:
cmd = 'LIST'
conn = self.ftp.ntransfercmd(cmd)
self.busy = 1
# Pass back both a suitably decorated object and a retrieval length
return (addclosehook(conn[0].makefile('rb'),
self.endtransfer), conn[1])
####################################################################
# Range Tuple Functions
# XXX: These range tuple functions might go better in a class.
_rangere = None
def range_header_to_tuple(range_header):
"""Get a (firstbyte,lastbyte) tuple from a Range header value.
Range headers have the form "bytes=<firstbyte>-<lastbyte>". This
function pulls the firstbyte and lastbyte values and returns
a (firstbyte,lastbyte) tuple. If lastbyte is not specified in
the header value, it is returned as an empty string in the
tuple.
Return None if range_header is None
Return () if range_header does not conform to the range spec
pattern.
"""
global _rangere
if range_header is None:
return None
if _rangere is None:
import re
_rangere = re.compile(r'^bytes=(\d{1,})-(\d*)')
match = _rangere.match(range_header)
if match:
tup = range_tuple_normalize(match.group(1, 2))
if tup and tup[1]:
tup = (tup[0], tup[1]+1)
return tup
return ()
def range_tuple_to_header(range_tup):
"""Convert a range tuple to a Range header value.
Return a string of the form "bytes=<firstbyte>-<lastbyte>" or None
if no range is needed.
"""
if range_tup is None:
return None
range_tup = range_tuple_normalize(range_tup)
if range_tup:
if range_tup[1]:
range_tup = (range_tup[0], range_tup[1] - 1)
return 'bytes=%s-%s' % range_tup
def range_tuple_normalize(range_tup):
"""Normalize a (first_byte,last_byte) range tuple.
Return a tuple whose first element is guaranteed to be an int
and whose second element will be '' (meaning: the last byte) or
an int. Finally, return None if the normalized tuple == (0,'')
as that is equivalent to retrieving the entire file.
"""
if range_tup is None:
return None
# handle first byte
fb = range_tup[0]
if fb in (None, ''):
fb = 0
else:
fb = int(fb)
# handle last byte
try:
lb = range_tup[1]
except IndexError:
lb = ''
else:
if lb is None:
lb = ''
elif lb != '':
lb = int(lb)
# check if range is over the entire file
if (fb, lb) == (0, ''):
return None
# check that the range is valid
if lb < fb:
raise RangeError('Invalid byte range: %s-%s' % (fb, lb))
return (fb, lb)
|
|
# -*- coding: utf-8 -*-
import functools
import sys
from argparse import ArgumentParser
import tensorflow as tf
from pprint import pformat
from tensorflow.contrib.framework import arg_scope, add_arg_scope
import tfsnippet as spt
from tfsnippet.examples.utils import (MLResults,
save_images_collection,
bernoulli_as_pixel,
bernoulli_flow,
print_with_title)
class ExpConfig(spt.Config):
# model parameters
z_dim = 40
x_dim = 784
# training parameters
result_dir = None
write_summary = False
max_epoch = 3000
max_step = None
batch_size = 128
l2_reg = 0.0001
initial_lr = 0.001
lr_anneal_factor = 0.5
lr_anneal_epoch_freq = 300
lr_anneal_step_freq = None
# evaluation parameters
test_n_z = 500
test_batch_size = 128
config = ExpConfig()
@spt.global_reuse
@add_arg_scope
def q_net(x, observed=None, n_z=None, is_initializing=False):
net = spt.BayesianNet(observed=observed)
normalizer_fn = functools.partial(
spt.layers.act_norm, initializing=is_initializing)
# compute the hidden features
with arg_scope([spt.layers.dense],
activation_fn=tf.nn.leaky_relu,
normalizer_fn=normalizer_fn,
weight_norm=True,
kernel_regularizer=spt.layers.l2_regularizer(config.l2_reg)):
h_x = tf.to_float(x)
h_x = spt.layers.dense(h_x, 500)
h_x = spt.layers.dense(h_x, 500)
# sample z ~ q(z|x)
z_mean = spt.layers.dense(h_x, config.z_dim, name='z_mean')
z_logstd = spt.layers.dense(h_x, config.z_dim, name='z_logstd')
z = net.add('z', spt.Normal(mean=z_mean, logstd=z_logstd), n_samples=n_z,
group_ndims=1)
return net
@spt.global_reuse
@add_arg_scope
def p_net(observed=None, n_z=None, is_initializing=False):
net = spt.BayesianNet(observed=observed)
normalizer_fn = functools.partial(
spt.layers.act_norm, initializing=is_initializing)
# sample z ~ p(z)
z = net.add('z', spt.Normal(mean=tf.zeros([1, config.z_dim]),
logstd=tf.zeros([1, config.z_dim])),
group_ndims=1, n_samples=n_z)
# compute the hidden features
with arg_scope([spt.layers.dense],
activation_fn=tf.nn.leaky_relu,
normalizer_fn=normalizer_fn,
weight_norm=True,
kernel_regularizer=spt.layers.l2_regularizer(config.l2_reg)):
h_z = z
h_z = spt.layers.dense(h_z, 500)
h_z = spt.layers.dense(h_z, 500)
# sample x ~ p(x|z)
x_logits = spt.layers.dense(h_z, config.x_dim, name='x_logits')
x = net.add('x', spt.Bernoulli(logits=x_logits), group_ndims=1)
return net
def main():
# parse the arguments
arg_parser = ArgumentParser()
spt.register_config_arguments(config, arg_parser, title='Model options')
spt.register_config_arguments(spt.settings, arg_parser, prefix='tfsnippet',
title='TFSnippet options')
arg_parser.parse_args(sys.argv[1:])
# print the config
print_with_title('Configurations', pformat(config.to_dict()), after='\n')
# open the result object and prepare for result directories
results = MLResults(config.result_dir)
results.save_config(config) # save experiment settings for review
results.make_dirs('plotting', exist_ok=True)
results.make_dirs('train_summary', exist_ok=True)
# input placeholders
input_x = tf.placeholder(
dtype=tf.int32, shape=(None, config.x_dim), name='input_x')
learning_rate = spt.AnnealingVariable(
'learning_rate', config.initial_lr, config.lr_anneal_factor)
# derive the output for initialization
with tf.name_scope('initialization'), \
spt.utils.scoped_set_config(spt.settings, auto_histogram=False):
init_q_net = q_net(input_x, is_initializing=True)
init_chain = init_q_net.chain(
p_net, observed={'x': input_x}, is_initializing=True)
init_lb = tf.reduce_mean(init_chain.vi.lower_bound.elbo())
# derive the loss and lower-bound for training
with tf.name_scope('training'):
train_q_net = q_net(input_x)
train_chain = train_q_net.chain(p_net, observed={'x': input_x})
vae_loss = tf.reduce_mean(train_chain.vi.training.sgvb())
loss = vae_loss + tf.losses.get_regularization_loss()
# derive the nll and logits output for testing
with tf.name_scope('testing'):
test_q_net = q_net(input_x, n_z=config.test_n_z)
test_chain = test_q_net.chain(
p_net, latent_axis=0, observed={'x': input_x})
test_nll = -tf.reduce_mean(test_chain.vi.evaluation.is_loglikelihood())
test_lb = tf.reduce_mean(test_chain.vi.lower_bound.elbo())
# derive the optimizer
with tf.name_scope('optimizing'):
optimizer = tf.train.AdamOptimizer(learning_rate)
params = tf.trainable_variables()
grads = optimizer.compute_gradients(loss, var_list=params)
with tf.control_dependencies(
tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
train_op = optimizer.apply_gradients(grads)
# derive the plotting function
with tf.name_scope('plotting'):
plot_p_net = p_net(n_z=100)
x_plots = tf.reshape(bernoulli_as_pixel(plot_p_net['x']), (-1, 28, 28))
def plot_samples(loop):
with loop.timeit('plot_time'):
images = session.run(x_plots)
save_images_collection(
images=images,
filename='plotting/{}.png'.format(loop.epoch),
grid_size=(10, 10),
results=results
)
# prepare for training and testing data
(x_train, y_train), (x_test, y_test) = \
spt.datasets.load_mnist(x_shape=[784])
train_flow = bernoulli_flow(
x_train, config.batch_size, shuffle=True, skip_incomplete=True)
test_flow = bernoulli_flow(
x_test, config.test_batch_size, sample_now=True)
with spt.utils.create_session().as_default() as session, \
train_flow.threaded(5) as train_flow:
spt.utils.ensure_variables_initialized()
# initialize the network
for [x] in train_flow:
print('Network initialized, first-batch loss is {:.6g}.\n'.
format(session.run(init_lb, feed_dict={input_x: x})))
break
# train the network
with spt.TrainLoop(params,
var_groups=['q_net', 'p_net'],
max_epoch=config.max_epoch,
max_step=config.max_step,
summary_dir=(results.system_path('train_summary')
if config.write_summary else None),
summary_graph=tf.get_default_graph(),
early_stopping=False) as loop:
trainer = spt.Trainer(
loop, train_op, [input_x], train_flow,
metrics={'loss': loss},
summaries=tf.summary.merge_all(spt.GraphKeys.AUTO_HISTOGRAM)
)
trainer.anneal_after(
learning_rate,
epochs=config.lr_anneal_epoch_freq,
steps=config.lr_anneal_step_freq
)
evaluator = spt.Evaluator(
loop,
metrics={'test_nll': test_nll, 'test_lb': test_lb},
inputs=[input_x],
data_flow=test_flow,
time_metric_name='test_time'
)
evaluator.events.on(
spt.EventKeys.AFTER_EXECUTION,
lambda e: results.update_metrics(evaluator.last_metrics_dict)
)
trainer.evaluate_after_epochs(evaluator, freq=10)
trainer.evaluate_after_epochs(
functools.partial(plot_samples, loop), freq=10)
trainer.log_after_epochs(freq=1)
trainer.run()
# print the final metrics and close the results object
print_with_title('Results', results.format_metrics(), before='\n')
results.close()
if __name__ == '__main__':
main()
|
|
"""Nearest Neighbor Classification"""
# Authors: Jake Vanderplas <vanderplas@astro.washington.edu>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Sparseness support by Lars Buitinck <L.J.Buitinck@uva.nl>
#
# License: BSD, (C) INRIA, University of Amsterdam
import numpy as np
from scipy import stats
from ..utils.extmath import weighted_mode
from .base import \
_check_weights, _get_weights, \
NeighborsBase, KNeighborsMixin,\
RadiusNeighborsMixin, SupervisedIntegerMixin
from ..base import ClassifierMixin
from ..utils import atleast2d_or_csr
class KNeighborsClassifier(NeighborsBase, KNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing the k-nearest neighbors vote.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`scipy.spatial.cKDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
warn_on_equidistant : boolean, optional. Defaults to True.
Generate a warning if equidistant neighbors are discarded.
For classification or regression based on k-neighbors, if
neighbor k and neighbor k+1 have identical distances but
different labels, then the result will be dependent on the
ordering of the training data.
If the fit method is ``'kd_tree'``, no warnings will be generated.
p: integer, optional (default = 2)
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsClassifier
>>> neigh = KNeighborsClassifier(n_neighbors=3)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsClassifier(...)
>>> print(neigh.predict([[1.1]]))
[0]
>>> print(neigh.predict_proba([[0.9]]))
[[ 0.66666667 0.33333333]]
See also
--------
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5,
weights='uniform',
algorithm='auto', leaf_size=30,
warn_on_equidistant=True, p=2):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size,
warn_on_equidistant=warn_on_equidistant,
p=p)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X: array
A 2-D array representing the test points.
Returns
-------
labels: array
List of class labels (one for each data sample).
"""
X = atleast2d_or_csr(X)
neigh_dist, neigh_ind = self.kneighbors(X)
pred_labels = self._y[neigh_ind]
weights = _get_weights(neigh_dist, self.weights)
if weights is None:
mode, _ = stats.mode(pred_labels, axis=1)
else:
mode, _ = weighted_mode(pred_labels, weights, axis=1)
return mode.flatten().astype(np.int)
def predict_proba(self, X):
"""Return probability estimates for the test data X.
Parameters
----------
X: array, shape = (n_samples, n_features)
A 2-D array representing the test points.
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Probabilities of the samples for each class in the model,
where classes are ordered arithmetically.
"""
X = atleast2d_or_csr(X)
neigh_dist, neigh_ind = self.kneighbors(X)
pred_labels = self._y[neigh_ind]
weights = _get_weights(neigh_dist, self.weights)
if weights is None:
weights = np.ones_like(pred_labels)
probabilities = np.zeros((X.shape[0], self._classes.size))
# Translate class label to a column index in probabilities array.
# This may not be needed provided classes labels are guaranteed to be
# np.arange(n_classes) (e.g. consecutive and starting with 0)
pred_indices = pred_labels.copy()
for k, c in enumerate(self._classes):
pred_indices[pred_labels == c] = k
# a simple ':' index doesn't work right
all_rows = np.arange(X.shape[0])
for i, idx in enumerate(pred_indices.T): # loop is O(n_neighbors)
probabilities[all_rows, idx] += weights[:, i]
# normalize 'votes' into real [0,1] probabilities
probabilities = (probabilities.T / probabilities.sum(axis=1)).T
return probabilities
class RadiusNeighborsClassifier(NeighborsBase, RadiusNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing a vote among neighbors within a given radius
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`scipy.spatial.cKDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p: integer, optional (default = 2)
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
outlier_label: int, optional (default = None)
Label, which is given for outlier samples (samples with no
neighbors on given radius).
If set to None, ValueError is raised, when outlier is detected.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsClassifier
>>> neigh = RadiusNeighborsClassifier(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsClassifier(...)
>>> print(neigh.predict([[1.5]]))
[0]
See also
--------
KNeighborsClassifier
RadiusNeighborsRegressor
KNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30, p=2, outlier_label=None):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
p=p)
self.weights = _check_weights(weights)
self.outlier_label = outlier_label
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X: array
A 2-D array representing the test points.
Returns
-------
labels: array
List of class labels (one for each data sample).
"""
X = atleast2d_or_csr(X)
neigh_dist, neigh_ind = self.radius_neighbors(X)
pred_labels = [self._y[ind] for ind in neigh_ind]
if self.outlier_label:
outlier_label = np.array((self.outlier_label, ))
small_value = np.array((1e-6, ))
for i, pl in enumerate(pred_labels):
# Check that all have at least 1 neighbor
if len(pl) < 1:
pred_labels[i] = outlier_label
neigh_dist[i] = small_value
else:
for pl in pred_labels:
# Check that all have at least 1 neighbor
if len(pl) < 1:
raise ValueError('no neighbors found for a test sample, '
'you can try using larger radius, '
'give a label for outliers, '
'or consider removing them in your '
'dataset')
weights = _get_weights(neigh_dist, self.weights)
if weights is None:
mode = np.asarray([stats.mode(pl)[0] for pl in pred_labels],
dtype=np.int)
else:
mode = np.asarray([weighted_mode(pl, w)[0]
for (pl, w) in zip(pred_labels, weights)],
dtype=np.int)
return mode.flatten().astype(np.int)
|
|
# Copyright (c) 2011-2015 Rackspace US, Inc.
# All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Simpl git utilities.
Wraps many shellouts to git creating
easy-to-handle, pythonic results.
Tested against:
git 2.1.2
"""
import atexit
import errno
import logging
import os
import pipes
import re
import shutil
import tempfile
import warnings
from six.moves import zip_longest
from simpl import exceptions
from simpl.utils import shell
LOG = logging.getLogger(__name__)
#: Minimum recommended git version
MIN_GIT_VERSION = (1, 9)
def execute_git_command(command, repo_dir=None):
"""Execute a git command and return the output.
Catches CalledProcessErrors and OSErrors, wrapping them
in a more useful :class:`~simpl.exceptions.SimplGitCommandError`.
Raises :class:`~simpl.exceptions.SimplGitCommandError` if the command
fails. Returncode and output from the attempt can be found in the
SimplGitCommandError attributes.
"""
try:
output = shell.execute(command, cwd=repo_dir)
except exceptions.SimplCalledProcessError as err:
raise exceptions.SimplGitCommandError(err.returncode, err.cmd,
output=err.output)
except OSError as err:
# OSError's errno *is not* the returncode
raise exceptions.SimplGitCommandError(
127, command, output=repr(err), oserror=err)
else:
return output
def git_version():
"""Get the `git version`."""
return execute_git_command(['git', '--version'])
def check_git_version():
"""Check the installed git version against a known-stable version.
If the git version is less then ``MIN_GIT_VERSION``, a warning is raised.
If git is not installed at all on this system, we also raise a warning for
that.
The original reason why this check was introduced is because with older
versions git (< 1.9), newly init-ed git repos cannot checkout from a
fetched remote unless the repo has at least one commit in it. The reason
for this is that before creating a commit, the HEAD refers to a
refs/heads/master file which doesn't exist yet.
.. todo::
TODO(larsbutler): If we wanted to be defensive about this and favor
compatibility over elegance, we could just automatically add a
`git commit` (empty, no message) after every `git init`. I would
recommend doing this in the :class:`GitRepo` class, not in the
module-level util functions. Adding an extra commit shouldn't cause
any problems.
"""
try:
version = git_version()
except exceptions.SimplGitCommandError:
warnings.warn("Git does not appear to be installed!",
exceptions.GitWarning)
return
ver_num = version.split()[2]
major, minor, _ = ver_num.split('.', 2)
major = int(major)
minor = int(minor)
if (major, minor) < MIN_GIT_VERSION:
warnings.warn(
"Git version %(ver)s found. %(rec)s or greater "
"is recommended for simpl/git.py"
% dict(ver=ver_num,
rec='.'.join((str(x) for x in MIN_GIT_VERSION))),
exceptions.GitWarning)
# Check the git version whenever this module is used:
check_git_version()
def git_init(repo_dir):
"""Run git init in `repo_dir`."""
return execute_git_command(['git', 'init'], repo_dir=repo_dir)
def git_clone(target_dir, repo_location, branch_or_tag=None, verbose=True):
"""Clone repo at repo_location to target_dir and checkout branch_or_tag.
If branch_or_tag is not specified, the HEAD of the primary
branch of the cloned repo is checked out.
"""
target_dir = pipes.quote(target_dir)
command = ['git', 'clone']
if verbose:
command.append('--verbose')
if os.path.isdir(repo_location):
command.append('--no-hardlinks')
command.extend([pipes.quote(repo_location), target_dir])
if branch_or_tag:
command.extend(['--branch', branch_or_tag])
return execute_git_command(command)
def git_tag(repo_dir, tagname, message=None, force=True):
"""Create an annotated tag at the current head."""
message = message or "%s" % tagname
command = ['git', 'tag', '--annotate', '--message', message]
if force:
command.append('--force')
# append the tag as the final arg
command.append(tagname)
return execute_git_command(command, repo_dir=repo_dir)
def git_list_config(repo_dir):
"""Return a list of the git configuration."""
command = ['git', 'config', '--list']
raw = execute_git_command(command, repo_dir=repo_dir).splitlines()
output = {key: val for key, val in
[cfg.split('=', 1) for cfg in raw]}
# TODO(sam): maybe turn this into more easily navigable
# nested dicts?
# e.g. {'alias': {'branches': ..., 'remotes': ...}}
return output
def git_list_tags(repo_dir, with_messages=False):
"""Return a list of git tags for the git repo in `repo_dir`."""
command = ['git', 'tag', '-l']
if with_messages:
command.append('-n1')
raw = execute_git_command(command, repo_dir=repo_dir).splitlines()
output = [l.strip() for l in raw if l.strip()]
if with_messages:
output = [tuple(j.strip() for j in line.split(None, 1))
for line in output]
return output
def git_list_branches(repo_dir):
"""Return a list of git branches for the git repo in 'repo_dir'.
.. code-block:: python
[
{'branch': <branchname>,
'commit': <commit_hash>,
'message': <commit message>},
{...},
]
"""
command = ['git', 'branch', '--remotes', '--all',
'--verbose', '--no-abbrev']
output = execute_git_command(command, repo_dir=repo_dir).splitlines()
# remove nullish lines
lines = [l.strip() for l in output if l.strip()]
# find the * current branch
try:
current_branch = [l for l in lines if l.startswith('* ')][0]
except IndexError:
current_branch = None
item = None
if current_branch:
lines.remove(current_branch)
current_branch = current_branch.replace('* ', '', 1)
if re.match('\(.*detached.+\)', current_branch):
branch, rest = current_branch.split(')', 1)
branch = "%s)" % branch
sha, msg = rest.split(None, 1)
item = {'branch': branch, 'commit': sha, 'message': msg}
else:
lines.insert(0, current_branch)
# <branch> <hash> <commit_message>
# make a list of lists with clean elements of equal length
breakout = [k.split(None, 2) for k in lines]
# remove any strange hashless outliers
breakout = [k for k in breakout if len(k[1]) == 40]
headers = ['branch', 'commit', 'message']
# use izip_longest so we fill in None if message was empty
result = [dict(zip_longest(headers, vals))
for vals in breakout]
if item:
result.append(item)
return result
def git_list_remotes(repo_dir):
"""Return a listing of configured remotes."""
command = ['git', 'remote', '--verbose', 'show']
raw = execute_git_command(command, repo_dir=repo_dir).splitlines()
output = [l.strip() for l in raw if l.strip()]
# <name> <location> (<cmd>)
# make a list of lists with clean elements of equal length
headers = ['name', 'location', 'cmd']
breakout = [k.split(None, len(headers)) for k in output]
# use izip_longest so we fill in None if message was empty
return [dict(zip_longest(headers, vals))
for vals in breakout]
def git_list_refs(repo_dir):
"""List references available in the local repo with commit ids.
This is similar to ls-remote, but shows the *local* refs.
Return format:
.. code-block:: python
{<ref1>: <commit_hash1>,
<ref2>: <commit_hash2>,
...,
<refN>: <commit_hashN>,
}
"""
command = ['git', 'show-ref', '--dereference', '--head']
raw = execute_git_command(command, repo_dir=repo_dir).splitlines()
output = [l.strip() for l in raw if l.strip()]
return {ref: commit_hash for commit_hash, ref in
[l.split(None, 1) for l in output]}
def git_ls_remote(repo_dir, remote='origin', refs=None):
"""Run git ls-remote.
'remote' can be a remote ref in a local repo, e.g. origin,
or url of a remote repository.
Return format:
.. code-block:: python
{<ref1>: <commit_hash1>,
<ref2>: <commit_hash2>,
...,
<refN>: <commit_hashN>,
}
"""
command = ['git', 'ls-remote', pipes.quote(remote)]
if refs:
if isinstance(refs, list):
command.extend(refs)
else:
command.append(refs)
raw = execute_git_command(command, repo_dir=repo_dir).splitlines()
output = [l.strip() for l in raw if l.strip()
and not l.strip().lower().startswith('from ')]
return {ref: commit_hash for commit_hash, ref in
[l.split(None, 1) for l in output]}
def git_branch(repo_dir, branch_name, start_point='HEAD',
force=True, verbose=True, checkout=False):
"""Create a new branch like `git branch <branch_name> <start_point>`."""
command = ['git', 'branch']
if verbose:
command.append('--verbose')
if force:
command.append('--force')
command.extend([branch_name, start_point])
branch_output = execute_git_command(command, repo_dir=repo_dir)
if checkout:
return git_checkout(repo_dir, branch_name)
else:
return branch_output
def git_checkout(repo_dir, ref, branch=None):
"""Do a git checkout of `ref` in `repo_dir`.
If branch is specified it should be the name of the new branch.
"""
command = ['git', 'checkout', '--force']
if branch:
command.extend(['-B', '{}'.format(branch)])
command.append(ref)
return execute_git_command(command, repo_dir=repo_dir)
def git_fetch(repo_dir, remote=None, refspec=None, verbose=False, tags=True):
"""Do a git fetch of `refspec` in `repo_dir`.
If 'remote' is None, all remotes will be fetched.
"""
command = ['git', 'fetch']
if not remote:
command.append('--all')
else:
remote = pipes.quote(remote)
command.extend(['--update-head-ok'])
if tags:
command.append('--tags')
if verbose:
command.append('--verbose')
if remote:
command.append(remote)
if refspec:
command.append(refspec)
return execute_git_command(command, repo_dir=repo_dir)
def git_pull(repo_dir, remote="origin", ref=None):
"""Do a git pull of `ref` from `remote`."""
command = ['git', 'pull', '--update-head-ok', pipes.quote(remote)]
if ref:
command.append(ref)
return execute_git_command(command, repo_dir=repo_dir)
def git_commit(repo_dir, message=None, amend=False, stage=True):
"""Commit any changes, optionally staging all changes beforehand."""
if stage:
git_add_all(repo_dir)
command = ['git', 'commit', '--allow-empty']
if amend:
command.append('--amend')
if not message:
command.append('--no-edit')
if message:
command.extend(['--message', pipes.quote(message)])
elif not amend:
# if not amending and no message, allow an empty message
command.extend(['--message=', '--allow-empty-message'])
return execute_git_command(command, repo_dir=repo_dir)
def git_ls_tree(repo_dir, treeish='HEAD'):
"""Run git ls-tree."""
command = ['git', 'ls-tree', '-r', '--full-tree', treeish]
raw = execute_git_command(command, repo_dir=repo_dir).splitlines()
output = [l.strip() for l in raw if l.strip()]
# <mode> <type> <object> <file>
# make a list of lists with clean elements of equal length
breakout = [k.split(None, 3) for k in output]
headers = ['mode', 'type', 'object', 'file']
return [dict(zip(headers, vals)) for vals in breakout]
def git_add_all(repo_dir):
"""Stage all changes in the working tree."""
command = ['git', 'add', '--all']
return execute_git_command(command, repo_dir=repo_dir)
def git_status(repo_dir):
"""Get the working tree status."""
command = ['git', 'status']
return execute_git_command(command, repo_dir=repo_dir)
def git_head_commit(repo_dir):
"""Return the current commit hash head points to."""
command = ['git', 'rev-parse', 'HEAD']
return execute_git_command(command, repo_dir=repo_dir)
def git_current_branch(repo_dir):
"""Return the current branch name.
If the repo is in 'detached HEAD' state, this just returns "HEAD".
"""
command = ['git', 'rev-parse', '--abbrev-ref', 'HEAD']
return execute_git_command(command, repo_dir=repo_dir)
def is_git_repo(repo_dir):
"""Return True if the directory is inside a git repo."""
command = ['git', 'rev-parse']
try:
execute_git_command(command, repo_dir=repo_dir)
except exceptions.SimplGitCommandError:
return False
else:
return True
def git_remote_resolve_reference(repo_dir, ref, remote='origin'):
"""Try to find a revision (commit hash) for the ref at 'remote' repo.
Once you have the revision (commit hash), you can check it out. Of
course, you may have to fetch it first.
Note: Borrowed these ideas from Chef
https://github.com/chef/chef/blob/master/lib/chef/provider/git.rb
Returns None if no revision is found.
"""
ls_refs = git_ls_remote(repo_dir, remote=remote, refs='%s*' % ref)
if ref == 'HEAD':
return ls_refs['HEAD']
else:
matching_refs = [
'refs/tags/%s^{}' % ref,
'refs/heads/%s^{}' % ref,
'%s^{}' % ref,
'refs/tags/%s' % ref,
'refs/heads/%s' % ref,
ref,
]
for _ref in matching_refs:
if _ref in ls_refs:
return ls_refs[_ref]
class GitRepo(object):
"""Wrapper on a git repository.
Git command failures raise SimplGitCommandException which includes
attributes about the returncode, error output, etc.
Unless 'repo_dir' is already an initialized git repository,
you will probably want to run one of the classmethod's to
initialize a GitRepo, either GitRepo.init() or GitRepo.clone(),
both of which return an instance of GitRepo.
An attempt to instantiate GitRepo with a path that is not at/in
a git repository will raise a SimplGitNotRepo exception.
"""
def __init__(self, repo_dir=None):
"""Initialize wrapper and check for existence of dir.
The init() and clone() classmethods are common ways of
initializing an instance of GitRepo.
Defaults to current working directory if repo_dir is not supplied.
If the repo_dir is not a git repository, SimplGitNotRepo is raised.
"""
repo_dir = repo_dir or os.getcwd()
repo_dir = os.path.abspath(
os.path.expanduser(os.path.normpath(repo_dir)))
if not os.path.isdir(repo_dir):
raise OSError(errno.ENOENT, "No such directory")
if not is_git_repo(repo_dir):
raise exceptions.SimplGitNotRepo(
"%s is not [in] a git repo." % repo_dir)
self.repo_dir = repo_dir
self.temp = False
if os.path.realpath(self.repo_dir).startswith(
os.path.realpath(tempfile.gettempdir())):
self.temp = True
@classmethod
def clone(cls, repo_location, repo_dir=None,
branch_or_tag=None, temp=False):
"""Clone repo at repo_location into repo_dir and checkout branch_or_tag.
Defaults into current working directory if repo_dir is not supplied.
If 'temp' is True, a temporary directory will be created for you
and the repository will be cloned into it. The tempdir is scheduled
for deletion (when the process exits) through an exit function
registered with the atexit module. If 'temp' is True, repo_dir
is ignored.
If branch_or_tag is not specified, the HEAD of the primary
branch of the cloned repo is checked out.
"""
if temp:
reponame = repo_location.rsplit('/', 1)[-1]
suffix = '%s.temp_simpl_GitRepo' % '_'.join(
[str(x) for x in (reponame, branch_or_tag) if x])
repo_dir = create_tempdir(suffix=suffix, delete=True)
else:
repo_dir = repo_dir or os.getcwd()
git_clone(repo_dir, repo_location, branch_or_tag=branch_or_tag)
# assuming no errors
return cls(repo_dir)
@classmethod
def init(cls, repo_dir=None, temp=False, initial_commit=False):
"""Run `git init` in the repo_dir.
Defaults to current working directory if repo_dir is not supplied.
If 'temp' is True, a temporary directory will be created for you
and the repository will be initialized. The tempdir is scheduled
for deletion (when the process exits) through an exit function
registered with the atexit module. If 'temp' is True, repo_dir is
ignored.
"""
if temp:
suffix = '.temp_simpl_GitRepo'
repo_dir = create_tempdir(suffix=suffix, delete=True)
else:
repo_dir = repo_dir or os.getcwd()
git_init(repo_dir)
instance = cls(repo_dir)
# NOTE(larsbutler): If we wanted to be defensive about this and favor
# compatibility over elegance, we could just automatically add a
# `git commit` (empty, no message) after every `git init`. I would
# recommend doing this in the :class:`GitRepo` class, not in the
# module-level util functions. Adding an extra commit shouldn't cause
# any problems.
if initial_commit:
# unknown revision, needs a commit to run most commands
instance.commit(
message='Initial commit', amend=False, stage=False)
return instance
@property
def origin(self):
"""Show where the 'origin' remote ref points.
Returns None if the 'origin' remote ref does not exist.
If 'origin' has different locations for different commands,
the result is ambiguous and None is returned.
Notes:
A repo does not necessarily have any remotes configured.
A repo with remotes configured does not necessarily have
an 'origin' remote ref.
This property is for common convenience.
"""
remotes = self.list_remotes()
candidates = set()
for remote_ref in remotes:
if remote_ref['name'] == 'origin':
candidates.add(remote_ref['location'])
if len(candidates) == 1:
return candidates.pop()
@property
def head(self):
"""Return the current commit hash."""
return git_head_commit(self.repo_dir)
@property
def current_branch(self):
"""Return the current branch name.
If the repo is in 'detached HEAD' state, this just returns "HEAD".
"""
return git_current_branch(self.repo_dir)
def __repr__(self):
"""Customize representation."""
rpr = '<Simpl GitRepo'
if self.temp:
rpr += ' (tmp)'
return ('%s %s at %s>'
% (rpr, self.repo_dir,
hex(id(self))))
def run_command(self, command):
"""Execute a command inside the repo."""
return execute_git_command(command, repo_dir=self.repo_dir)
def status(self):
"""Get the working tree status."""
return git_status(self.repo_dir)
def tag(self, tagname, message=None, force=True):
"""Create an annotated tag."""
return git_tag(self.repo_dir, tagname, message=message, force=force)
# pylint: disable=invalid-name
def ls(self):
"""Return a list of *all* files & dirs in the repo.
Think of this as a recursive `ls` command from the root of the repo.
"""
tree = self.ls_tree()
return [t.get('file') for t in tree if t.get('file')]
def list_remotes(self):
"""List configured remotes."""
return git_list_remotes(self.repo_dir)
def ls_tree(self, treeish='HEAD'):
"""List *all* files/dirs in the repo at ref 'treeish'.
Returns::
[
{'mode': <file permissions>,
'type': <git object type>, # blob, tree, commit or tag
'object': <object hash>,
'file': <path/to/file.py>},
{...},
]
"""
return git_ls_tree(self.repo_dir, treeish=treeish)
def list_refs(self):
"""List references available in the local repo with commit ids.
This is similar to ls-remote, but shows the *local* refs.
Returns::
{'HEAD': <commit_hash0>,
<ref1>: <commit_hash1>,
<ref2>: <commit_hash2>,
...,
<refN>: <commit_hashN>,
}
"""
return git_list_refs(self.repo_dir)
def ls_remote(self, remote='origin', refs=None):
"""Return a mapping of refs to commit ids for the given remote.
'remote' can be a remote ref in a local repo, e.g. origin,
or url of a remote repository.
Returns::
{<ref1>: <commit_hash1>,
<ref2>: <commit_hash2>,
...,
<refN>: <commit_hashN>,
}
If 'refs' is supplied, only matching refs are returned.
"""
return git_ls_remote(
self.repo_dir, remote=remote, refs=refs)
def list_tags(self, with_messages=False):
"""Return a list of git tags for the repository.
If 'with_messages' is True, returns
a list of (tag, message) tuples::
[(<tag1>, <message1>), (<tag2>, <message2>)]
"""
return git_list_tags(
self.repo_dir, with_messages=with_messages)
def list_config(self):
"""Return a dictionary of the git config."""
return git_list_config(self.repo_dir)
def list_branches(self):
"""Return a list of dicts, describing the branches.
Returns::
[
{'branch': <branchname,
'commit': <commit_hash>,
'message': <commit message>},
{...},
]
"""
return git_list_branches(self.repo_dir)
def branch(self, branch_name, start_point='HEAD', force=True,
checkout=False):
"""Create branch as in `git branch <branch_name> <start_point>`.
If 'checkout' is True, checkout the branch after creation.
"""
return git_branch(
self.repo_dir, branch_name, start_point, force=force,
checkout=checkout)
def checkout(self, ref, branch=None):
"""Do a git checkout of `ref`."""
return git_checkout(self.repo_dir, ref, branch=branch)
def fetch(self, remote=None, refspec=None, verbose=False, tags=True):
"""Do a git fetch of `refspec`."""
return git_fetch(self.repo_dir, remote=remote,
refspec=refspec, verbose=verbose, tags=tags)
def pull(self, remote="origin", ref=None):
"""Do a git pull of `ref` from `remote`."""
return git_pull(self.repo_dir, remote=remote, ref=ref)
def add_all(self):
"""Stage all changes in the working tree."""
return git_add_all(self.repo_dir)
def commit(self, message=None, amend=False, stage=True):
"""Commit any changes, optionally staging all changes beforehand."""
return git_commit(self.repo_dir, message=message,
amend=amend, stage=stage)
def remote_resolve_reference(self, ref, remote='origin'):
"""Resolve a reference to a remote revision."""
return git_remote_resolve_reference(self.repo_dir, ref, remote=remote)
def _cleanup_tempdir(tempdir):
"""Clean up temp directory ignoring ENOENT errors."""
try:
shutil.rmtree(tempdir)
except OSError as err:
if err.errno != errno.ENOENT:
raise
def create_tempdir(suffix='', prefix='tmp', directory=None, delete=True):
"""Create a tempdir and return the path.
This function registers the new temporary directory
for deletion with the atexit module.
"""
tempd = tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=directory)
if delete:
atexit.register(_cleanup_tempdir, tempd)
return tempd
|
|
# -*- coding: iso-8859-15 -*-
# =================================================================
#
# Authors: Tom Kralidis <tomkralidis@gmail.com>
# Angelos Tzotsos <tzotsos@gmail.com>
#
# Copyright (c) 2011 Tom Kralidis
# Copyright (c) 2011 Angelos Tzotsos
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
import os
from lxml import etree
from pycsw import config, util
from pycsw.plugins.profiles import profile
CODELIST = 'http://www.isotc211.org/2005/resources/Codelist/gmxCodelists.xml'
CODESPACE = 'ISOTC211/19115'
class APISO(profile.Profile):
''' APISO class '''
def __init__(self, model, namespaces, context):
self.context = context
self.namespaces = {
'apiso': 'http://www.opengis.net/cat/csw/apiso/1.0',
'gco': 'http://www.isotc211.org/2005/gco',
'gmd': 'http://www.isotc211.org/2005/gmd',
'srv': 'http://www.isotc211.org/2005/srv',
'xlink': 'http://www.w3.org/1999/xlink'
}
self.inspire_namespaces = {
'inspire_ds': 'http://inspire.ec.europa.eu/schemas/inspire_ds/1.0',
'inspire_common': 'http://inspire.ec.europa.eu/schemas/common/1.0'
}
self.repository = {
'gmd:MD_Metadata': {
'outputschema': 'http://www.isotc211.org/2005/gmd',
'queryables': {
'SupportedISOQueryables': {
'apiso:Subject': {'xpath': 'gmd:identificationInfo/gmd:MD_Identification/gmd:descriptiveKeywords/gmd:MD_Keywords/gmd:keyword/gco:CharacterString|gmd:identificationInfo/gmd:MD_DataIdentification/gmd:topicCategory/gmd:MD_TopicCategoryCode', 'dbcol': self.context.md_core_model['mappings']['pycsw:Keywords']},
'apiso:Title': {'xpath': 'gmd:identificationInfo/gmd:MD_DataIdentification/gmd:citation/gmd:CI_Citation/gmd:title/gco:CharacterString', 'dbcol': self.context.md_core_model['mappings']['pycsw:Title']},
'apiso:Abstract': {'xpath': 'gmd:identificationInfo/gmd:MD_DataIdentification/gmd:abstract/gco:CharacterString', 'dbcol': self.context.md_core_model['mappings']['pycsw:Abstract']},
'apiso:Format': {'xpath': 'gmd:distributionInfo/gmd:MD_Distribution/gmd:distributionFormat/gmd:MD_Format/gmd:name/gco:CharacterString', 'dbcol': self.context.md_core_model['mappings']['pycsw:Format']},
'apiso:Identifier': {'xpath': 'gmd:fileIdentifier/gco:CharacterString', 'dbcol': self.context.md_core_model['mappings']['pycsw:Identifier']},
'apiso:Modified': {'xpath': 'gmd:dateStamp/gco:Date', 'dbcol': self.context.md_core_model['mappings']['pycsw:Modified']},
'apiso:Type': {'xpath': 'gmd:hierarchyLevel/gmd:MD_ScopeCode', 'dbcol': self.context.md_core_model['mappings']['pycsw:Type']},
'apiso:BoundingBox': {'xpath': 'apiso:BoundingBox', 'dbcol': self.context.md_core_model['mappings']['pycsw:BoundingBox']},
'apiso:CRS': {'xpath': 'concat("urn:ogc:def:crs:","gmd:referenceSystemInfo/gmd:MD_ReferenceSystem/gmd:referenceSystemIdentifier/gmd:RS_Identifier/gmd:codeSpace/gco:CharacterString",":","gmd:referenceSystemInfo/gmd:MD_ReferenceSystem/gmd:referenceSystemIdentifier/gmd:RS_Identifier/gmd:version/gco:CharacterString",":","gmd:referenceSystemInfo/gmd:MD_ReferenceSystem/gmd:referenceSystemIdentifier/gmd:RS_Identifier/gmd:code/gco:CharacterString")', 'dbcol': self.context.md_core_model['mappings']['pycsw:CRS']},
'apiso:AlternateTitle': {'xpath': 'gmd:identificationInfo/gmd:MD_DataIdentification/gmd:citation/gmd:CI_Citation/gmd:alternateTitle/gco:CharacterString', 'dbcol': self.context.md_core_model['mappings']['pycsw:AlternateTitle']},
'apiso:RevisionDate': {'xpath': 'gmd:identificationInfo/gmd:MD_DataIdentification/gmd:citation/gmd:CI_Citation/gmd:date/gmd:CI_Date[gmd:dateType/gmd:CI_DateTypeCode/@codeListValue="revision"]/gmd:date/gco:Date', 'dbcol': self.context.md_core_model['mappings']['pycsw:RevisionDate']},
'apiso:CreationDate': {'xpath': 'gmd:identificationInfo/gmd:MD_DataIdentification/gmd:citation/gmd:CI_Citation/gmd:date/gmd:CI_Date[gmd:dateType/gmd:CI_DateTypeCode/@codeListValue="creation"]/gmd:date/gco:Date', 'dbcol': self.context.md_core_model['mappings']['pycsw:CreationDate']},
'apiso:PublicationDate': {'xpath': 'gmd:identificationInfo/gmd:MD_DataIdentification/gmd:citation/gmd:CI_Citation/gmd:date/gmd:CI_Date[gmd:dateType/gmd:CI_DateTypeCode/@codeListValue="publication"]/gmd:date/gco:Date', 'dbcol': self.context.md_core_model['mappings']['pycsw:PublicationDate']},
'apiso:OrganisationName': {'xpath': 'gmd:identificationInfo/gmd:MD_DataIdentification/gmd:pointOfContact/gmd:CI_ResponsibleParty/gmd:organisationName/gco:CharacterString', 'dbcol': self.context.md_core_model['mappings']['pycsw:OrganizationName']},
'apiso:HasSecurityConstraints': {'xpath': 'gmd:identificationInfo/gmd:MD_DataIdentification/gmd:resourceConstraints/gmd:MD_SecurityConstraints', 'dbcol': self.context.md_core_model['mappings']['pycsw:SecurityConstraints']},
'apiso:Language': {'xpath': 'gmd:language/gmd:LanguageCode|gmd:language/gco:CharacterString', 'dbcol': self.context.md_core_model['mappings']['pycsw:Language']},
'apiso:ParentIdentifier': {'xpath': 'gmd:parentIdentifier/gco:CharacterString', 'dbcol': self.context.md_core_model['mappings']['pycsw:ParentIdentifier']},
'apiso:KeywordType': {'xpath': 'gmd:identificationInfo/gmd:MD_DataIdentification/gmd:descriptiveKeywords/gmd:MD_Keywords/gmd:type/gmd:MD_KeywordTypeCode', 'dbcol': self.context.md_core_model['mappings']['pycsw:KeywordType']},
'apiso:TopicCategory': {'xpath': 'gmd:identificationInfo/gmd:MD_DataIdentification/gmd:topicCategory/gmd:MD_TopicCategoryCode', 'dbcol': self.context.md_core_model['mappings']['pycsw:TopicCategory']},
'apiso:ResourceLanguage': {'xpath': 'gmd:identificationInfo/gmd:MD_DataIdentification/gmd:citation/gmd:CI_Citation/gmd:identifier/gmd:code/gmd:MD_LanguageTypeCode', 'dbcol': self.context.md_core_model['mappings']['pycsw:ResourceLanguage']},
'apiso:GeographicDescriptionCode': {'xpath': 'gmd:identificationInfo/gmd:MD_DataIdentification/gmd:extent/gmd:EX_Extent/gmd:geographicElement/gmd:EX_GeographicDescription/gmd:geographicIdentifier/gmd:MD_Identifier/gmd:code/gco:CharacterString', 'dbcol': self.context.md_core_model['mappings']['pycsw:GeographicDescriptionCode']},
'apiso:Denominator': {'xpath': 'gmd:identificationInfo/gmd:MD_DataIdentification/gmd:spatialResolution/gmd:MD_Resolution/gmd:equivalentScale/gmd:MD_RepresentativeFraction/gmd:denominator/gco:Integer', 'dbcol': self.context.md_core_model['mappings']['pycsw:Denominator']},
'apiso:DistanceValue': {'xpath': 'gmd:identificationInfo/gmd:MD_DataIdentification/gmd:spatialResolution/gmd:MD_Resolution/gmd:distance/gco:Distance', 'dbcol': self.context.md_core_model['mappings']['pycsw:DistanceValue']},
'apiso:DistanceUOM': {'xpath': 'gmd:identificationInfo/gmd:MD_DataIdentification/gmd:spatialResolution/gmd:MD_Resolution/gmd:distance/gco:Distance/@uom', 'dbcol': self.context.md_core_model['mappings']['pycsw:DistanceUOM']},
'apiso:TempExtent_begin': {'xpath': 'gmd:identificationInfo/gmd:MD_DataIdentification/gmd:extent/gmd:EX_Extent/gmd:temporalElement/gmd:EX_TemporalExtent/gmd:extent/gml:TimePeriod/gml:beginPosition', 'dbcol': self.context.md_core_model['mappings']['pycsw:TempExtent_begin']},
'apiso:TempExtent_end': {'xpath': 'gmd:identificationInfo/gmd:MD_DataIdentification/gmd:extent/gmd:EX_Extent/gmd:temporalElement/gmd:EX_TemporalExtent/gmd:extent/gml:TimePeriod/gml:endPosition', 'dbcol': self.context.md_core_model['mappings']['pycsw:TempExtent_end']},
'apiso:AnyText': {'xpath': '//', 'dbcol': self.context.md_core_model['mappings']['pycsw:AnyText']},
'apiso:ServiceType': {'xpath': 'gmd:identificationInfo/srv:SV_ServiceIdentification/srv:serviceType/gco:LocalName', 'dbcol': self.context.md_core_model['mappings']['pycsw:ServiceType']},
'apiso:ServiceTypeVersion': {'xpath': 'gmd:identificationInfo/srv:SV_ServiceIdentification/srv:serviceTypeVersion/gco:CharacterString', 'dbcol': self.context.md_core_model['mappings']['pycsw:ServiceTypeVersion']},
'apiso:Operation': {'xpath': 'gmd:identificationInfo/srv:SV_ServiceIdentification/srv:containsOperations/srv:SV_OperationMetadata/srv:operationName/gco:CharacterString', 'dbcol': self.context.md_core_model['mappings']['pycsw:Operation']},
'apiso:CouplingType': {'xpath': 'gmd:identificationInfo/srv:SV_ServiceIdentification/srv:couplingType/srv:SV_CouplingType', 'dbcol': self.context.md_core_model['mappings']['pycsw:CouplingType']},
'apiso:OperatesOn': {'xpath': 'gmd:identificationInfo/srv:SV_ServiceIdentification/srv:operatesOn/gmd:MD_DataIdentification/gmd:citation/gmd:CI_Citation/gmd:identifier/gmd:RS_Identifier/gmd:code/gco:CharacterString', 'dbcol': self.context.md_core_model['mappings']['pycsw:OperatesOn']},
'apiso:OperatesOnIdentifier': {'xpath': 'gmd:identificationInfo/srv:SV_ServiceIdentification/srv:coupledResource/srv:SV_CoupledResource/srv:identifier/gco:CharacterString', 'dbcol': self.context.md_core_model['mappings']['pycsw:OperatesOnIdentifier']},
'apiso:OperatesOnName': {'xpath': 'gmd:identificationInfo/srv:SV_ServiceIdentification/srv:coupledResource/srv:SV_CoupledResource/srv:operationName/gco:CharacterString', 'dbcol': self.context.md_core_model['mappings']['pycsw:OperatesOnName']},
},
'AdditionalQueryables': {
'apiso:Degree': {'xpath': 'gmd:dataQualityInfo/gmd:DQ_DataQuality/gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:pass/gco:Boolean', 'dbcol': self.context.md_core_model['mappings']['pycsw:Degree']},
'apiso:AccessConstraints': {'xpath': 'gmd:identificationInfo/gmd:MD_DataIdentification/gmd:resourceConstraints/gmd:MD_LegalConstraints/gmd:accessConstraints/gmd:MD_RestrictionCode', 'dbcol': self.context.md_core_model['mappings']['pycsw:AccessConstraints']},
'apiso:OtherConstraints': {'xpath': 'gmd:identificationInfo/gmd:MD_DataIdentification/gmd:resourceConstraints/gmd:MD_LegalConstraints/gmd:otherConstraints/gco:CharacterString', 'dbcol': self.context.md_core_model['mappings']['pycsw:OtherConstraints']},
'apiso:Classification': {'xpath': 'gmd:identificationInfo/gmd:MD_DataIdentification/gmd:resourceConstraints/gmd:MD_LegalConstraints/gmd:accessConstraints/gmd:MD_ClassificationCode', 'dbcol': self.context.md_core_model['mappings']['pycsw:Classification']},
'apiso:ConditionApplyingToAccessAndUse': {'xpath': 'gmd:identificationInfo/gmd:MD_DataIdentification/gmd:useLimitation/gco:CharacterString', 'dbcol': self.context.md_core_model['mappings']['pycsw:ConditionApplyingToAccessAndUse']},
'apiso:Lineage': {'xpath': 'gmd:dataQualityInfo/gmd:DQ_DataQuality/gmd:lineage/gmd:LI_Lineage/gmd:statement/gco:CharacterString', 'dbcol': self.context.md_core_model['mappings']['pycsw:Lineage']},
'apiso:ResponsiblePartyRole': {'xpath': 'gmd:contact/gmd:CI_ResponsibleParty/gmd:role/gmd:CI_RoleCode', 'dbcol': self.context.md_core_model['mappings']['pycsw:ResponsiblePartyRole']},
'apiso:SpecificationTitle': {'xpath': 'gmd:dataQualityInfo/gmd:DQ_DataQuality/gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:title/gco:CharacterString', 'dbcol': self.context.md_core_model['mappings']['pycsw:SpecificationTitle']},
'apiso:SpecificationDate': {'xpath': 'gmd:dataQualityInfo/gmd:DQ_DataQuality/gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:date/gmd:CI_Date/gmd:date/gco:Date', 'dbcol': self.context.md_core_model['mappings']['pycsw:SpecificationDate']},
'apiso:SpecificationDateType': {'xpath': 'gmd:dataQualityInfo/gmd:DQ_DataQuality/gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:date/gmd:CI_Date/gmd:dateType/gmd:CI_DateTypeCode', 'dbcol': self.context.md_core_model['mappings']['pycsw:SpecificationDateType']},
'apiso:Creator': {'xpath': 'gmd:identificationInfo/gmd:MD_DataIdentification/gmd:pointOfContact/gmd:CI_ResponsibleParty/gmd:organisationName[gmd:role/gmd:CI_RoleCode/@codeListValue="originator"]/gco:CharacterString', 'dbcol': self.context.md_core_model['mappings']['pycsw:Creator']},
'apiso:Publisher': {'xpath': 'gmd:identificationInfo/gmd:MD_DataIdentification/gmd:pointOfContact/gmd:CI_ResponsibleParty/gmd:organisationName[gmd:role/gmd:CI_RoleCode/@codeListValue="publisher"]/gco:CharacterString', 'dbcol': self.context.md_core_model['mappings']['pycsw:Publisher']},
'apiso:Contributor': {'xpath': 'gmd:identificationInfo/gmd:MD_DataIdentification/gmd:pointOfContact/gmd:CI_ResponsibleParty/gmd:organisationName[gmd:role/gmd:CI_RoleCode/@codeListValue="contributor"]/gco:CharacterString', 'dbcol': self.context.md_core_model['mappings']['pycsw:Contributor']},
'apiso:Relation': {'xpath': 'gmd:identificationInfo/gmd:MD_Data_Identification/gmd:aggregationInfo', 'dbcol': self.context.md_core_model['mappings']['pycsw:Relation']}
}
},
'mappings': {
'csw:Record': {
# map APISO queryables to DC queryables
'apiso:Title': 'dc:title',
'apiso:Creator': 'dc:creator',
'apiso:Subject': 'dc:subject',
'apiso:Abstract': 'dct:abstract',
'apiso:Publisher': 'dc:publisher',
'apiso:Contributor': 'dc:contributor',
'apiso:Modified': 'dct:modified',
#'apiso:Date': 'dc:date',
'apiso:Type': 'dc:type',
'apiso:Format': 'dc:format',
'apiso:Language': 'dc:language',
'apiso:Relation': 'dc:relation',
'apiso:AccessConstraints': 'dc:rights',
}
}
}
}
profile.Profile.__init__(self,
name='apiso',
version='1.0.0',
title='ISO Metadata Application Profile',
url='http://portal.opengeospatial.org/files/?artifact_id=21460',
namespace=self.namespaces['gmd'],
typename='gmd:MD_Metadata',
outputschema=self.namespaces['gmd'],
prefixes=['apiso', 'gmd'],
model=model,
core_namespaces=namespaces,
added_namespaces=self.namespaces,
repository=self.repository['gmd:MD_Metadata'])
def extend_core(self, model, namespaces, config):
''' Extend core configuration '''
# update INSPIRE vars
self.context.namespaces.update(self.inspire_namespaces)
# update harvest resource types with WMS, since WMS is not a typename,
if 'Harvest' in model['operations']:
model['operations']['Harvest']['parameters']['ResourceType']['values'].append('http://www.isotc211.org/schemas/2005/gmd/')
# set INSPIRE config
if config.has_section('metadata:inspire') and config.has_option('metadata:inspire', 'enabled') and config.get('metadata:inspire', 'enabled') == 'true':
self.inspire_config = {}
self.inspire_config['languages_supported'] = config.get('metadata:inspire', 'languages_supported')
self.inspire_config['default_language'] = config.get('metadata:inspire', 'default_language')
self.inspire_config['date'] = config.get('metadata:inspire', 'date')
self.inspire_config['gemet_keywords'] = config.get('metadata:inspire', 'gemet_keywords')
self.inspire_config['conformity_service'] = config.get('metadata:inspire', 'conformity_service')
self.inspire_config['contact_name'] = config.get('metadata:inspire', 'contact_name')
self.inspire_config['contact_email'] = config.get('metadata:inspire', 'contact_email')
self.inspire_config['temp_extent'] = config.get('metadata:inspire', 'temp_extent')
else:
self.inspire_config = None
self.ogc_schemas_base = config.get('server', 'ogc_schemas_base')
self.url = config.get('server', 'url')
def check_parameters(self, kvp):
'''Check for Language parameter in GetCapabilities request'''
if self.inspire_config is not None:
result = None
if 'language' not in kvp:
self.inspire_config['current_language'] = self.inspire_config['default_language']
else:
if kvp['language'] not in self.inspire_config['languages_supported'].split(','):
text = 'Requested Language not supported, Supported languages: %s' % self.inspire_config['languages_supported']
return {'error': 'true', 'locator': 'language', 'code': 'InvalidParameterValue', 'text': text}
else:
self.inspire_config['current_language'] = kvp['language']
return None
return None
return None
def get_extendedcapabilities(self):
''' Add child to ows:OperationsMetadata Element '''
if self.inspire_config is not None:
ex_caps = etree.Element(
util.nspath_eval('inspire_ds:ExtendedCapabilities', self.inspire_namespaces))
ex_caps.attrib[util.nspath_eval('xsi:schemaLocation', self.context.namespaces)] = \
'%s %s/inspire_ds.xsd' % \
(self.inspire_namespaces['inspire_ds'], self.inspire_namespaces['inspire_ds'])
# Resource Locator
res_loc = etree.SubElement(ex_caps,
util.nspath_eval('inspire_common:ResourceLocator', self.inspire_namespaces))
etree.SubElement(res_loc,
util.nspath_eval('inspire_common:URL', self.inspire_namespaces)).text = '%sservice=CSW&version=2.0.2&request=GetCapabilities' % (util.bind_url(self.url))
etree.SubElement(res_loc,
util.nspath_eval('inspire_common:MediaType', self.inspire_namespaces)).text = 'application/xml'
# Resource Type
etree.SubElement(ex_caps,
util.nspath_eval('inspire_common:ResourceType', self.inspire_namespaces)).text = 'service'
# Temporal Reference
temp_ref = etree.SubElement(ex_caps,
util.nspath_eval('inspire_common:TemporalReference', self.inspire_namespaces))
temp_extent = etree.SubElement(temp_ref,
util.nspath_eval('inspire_common:TemporalExtent', self.inspire_namespaces))
val = self.inspire_config['temp_extent'].split('/')
if len(val) == 1:
etree.SubElement(temp_extent,
util.nspath_eval('inspire_common:IndividualDate', self.inspire_namespaces)).text = val[0]
else:
interval_dates = etree.SubElement(temp_extent,
util.nspath_eval('inspire_common:IntervalOfDates', self.inspire_namespaces))
etree.SubElement(interval_dates,
util.nspath_eval('inspire_common:StartingDate', self.inspire_namespaces)).text = val[0]
etree.SubElement(interval_dates,
util.nspath_eval('inspire_common:EndDate', self.inspire_namespaces)).text = val[1]
# Conformity - service
cfm = etree.SubElement(ex_caps,
util.nspath_eval('inspire_common:Conformity', self.inspire_namespaces))
spec = etree.SubElement(cfm,
util.nspath_eval('inspire_common:Specification', self.inspire_namespaces))
spec.attrib[util.nspath_eval('xsi:type', self.context.namespaces)] = 'inspire_common:citationInspireInteroperabilityRegulation_eng'
etree.SubElement(spec,
util.nspath_eval('inspire_common:Title', self.inspire_namespaces)).text = 'COMMISSION REGULATION (EU) No 1089/2010 of 23 November 2010 implementing Directive 2007/2/EC of the European Parliament and of the Council as regards interoperability of spatial data sets and services'
etree.SubElement(spec,
util.nspath_eval('inspire_common:DateOfPublication', self.inspire_namespaces)).text = '2010-12-08'
etree.SubElement(spec,
util.nspath_eval('inspire_common:URI', self.inspire_namespaces)).text = 'OJ:L:2010:323:0011:0102:EN:PDF'
spec_loc = etree.SubElement(spec,
util.nspath_eval('inspire_common:ResourceLocator', self.inspire_namespaces))
etree.SubElement(spec_loc,
util.nspath_eval('inspire_common:URL', self.inspire_namespaces)).text = 'http://eur-lex.europa.eu/LexUriServ/LexUriServ.do?uri=OJ:L:2010:323:0011:0102:EN:PDF'
etree.SubElement(spec_loc,
util.nspath_eval('inspire_common:MediaType', self.inspire_namespaces)).text = 'application/pdf'
spec = etree.SubElement(cfm,
util.nspath_eval('inspire_common:Degree', self.inspire_namespaces)).text = self.inspire_config['conformity_service']
# Metadata Point of Contact
poc = etree.SubElement(ex_caps,
util.nspath_eval('inspire_common:MetadataPointOfContact', self.inspire_namespaces))
etree.SubElement(poc,
util.nspath_eval('inspire_common:OrganisationName', self.inspire_namespaces)).text = self.inspire_config['contact_name']
etree.SubElement(poc,
util.nspath_eval('inspire_common:EmailAddress', self.inspire_namespaces)).text = self.inspire_config['contact_email']
# Metadata Date
etree.SubElement(ex_caps,
util.nspath_eval('inspire_common:MetadataDate', self.inspire_namespaces)).text = self.inspire_config['date']
# Spatial Data Service Type
etree.SubElement(ex_caps,
util.nspath_eval('inspire_common:SpatialDataServiceType', self.inspire_namespaces)).text = 'discovery'
# Mandatory Keyword
mkey = etree.SubElement(ex_caps,
util.nspath_eval('inspire_common:MandatoryKeyword', self.inspire_namespaces))
mkey.attrib[util.nspath_eval('xsi:type', self.context.namespaces)] = 'inspire_common:classificationOfSpatialDataService'
etree.SubElement(mkey,
util.nspath_eval('inspire_common:KeywordValue', self.inspire_namespaces)).text = 'infoCatalogueService'
# Gemet Keywords
for gkw in self.inspire_config['gemet_keywords'].split(','):
gkey = etree.SubElement(ex_caps,
util.nspath_eval('inspire_common:Keyword', self.inspire_namespaces))
gkey.attrib[util.nspath_eval('xsi:type', self.context.namespaces)] = 'inspire_common:inspireTheme_eng'
ocv = etree.SubElement(gkey,
util.nspath_eval('inspire_common:OriginatingControlledVocabulary', self.inspire_namespaces))
etree.SubElement(ocv,
util.nspath_eval('inspire_common:Title', self.inspire_namespaces)).text = 'GEMET - INSPIRE themes'
etree.SubElement(ocv,
util.nspath_eval('inspire_common:DateOfPublication', self.inspire_namespaces)).text = '2008-06-01'
etree.SubElement(gkey,
util.nspath_eval('inspire_common:KeywordValue', self.inspire_namespaces)).text = gkw
# Languages
slang = etree.SubElement(ex_caps,
util.nspath_eval('inspire_common:SupportedLanguages', self.inspire_namespaces))
dlang = etree.SubElement(slang,
util.nspath_eval('inspire_common:DefaultLanguage', self.inspire_namespaces))
etree.SubElement(dlang,
util.nspath_eval('inspire_common:Language', self.inspire_namespaces)).text = self.inspire_config['default_language']
for l in self.inspire_config['languages_supported'].split(','):
lang = etree.SubElement(slang,
util.nspath_eval('inspire_common:SupportedLanguage', self.inspire_namespaces))
etree.SubElement(lang,
util.nspath_eval('inspire_common:Language', self.inspire_namespaces)).text = l
clang = etree.SubElement(ex_caps,
util.nspath_eval('inspire_common:ResponseLanguage', self.inspire_namespaces))
etree.SubElement(clang,
util.nspath_eval('inspire_common:Language', self.inspire_namespaces)).text = self.inspire_config['current_language']
return ex_caps
def get_schemacomponents(self):
''' Return schema components as lxml.etree.Element list '''
node1 = etree.Element(
util.nspath_eval('csw:SchemaComponent', self.context.namespaces),
schemaLanguage='XMLSCHEMA', targetNamespace=self.namespace,
parentSchema='gmd.xsd')
schema_file = os.path.join(self.context.pycsw_home,
'plugins', 'profiles', 'apiso', 'schemas', 'ogc', 'iso',
'19139', '20060504', 'gmd', 'identification.xsd')
schema = etree.parse(schema_file, self.context.parser).getroot()
node1.append(schema)
node2 = etree.Element(
util.nspath_eval('csw:SchemaComponent', self.context.namespaces),
schemaLanguage='XMLSCHEMA', targetNamespace=self.namespace,
parentSchema='gmd.xsd')
schema_file = os.path.join(self.context.pycsw_home, 'plugins',
'profiles', 'apiso', 'schemas', 'ogc', 'iso', '19139',
'20060504', 'srv', 'serviceMetadata.xsd')
schema = etree.parse(schema_file, self.context.parser).getroot()
node2.append(schema)
return [node1, node2]
def check_getdomain(self, kvp):
'''Perform extra profile specific checks in the GetDomain request'''
return None
def write_record(self, result, esn, outputschema, queryables, caps=None):
''' Return csw:SearchResults child as lxml.etree.Element '''
typename = util.getqattr(result, self.context.md_core_model['mappings']['pycsw:Typename'])
is_iso_anyway = False
xml_blob = util.getqattr(result, self.context.md_core_model['mappings']['pycsw:XML'])
if caps is None and xml_blob is not None and xml_blob.startswith('<gmd:MD_Metadata'):
is_iso_anyway = True
if (esn == 'full' and (typename == 'gmd:MD_Metadata' or is_iso_anyway)):
# dump record as is and exit
return etree.fromstring(xml_blob, self.context.parser)
if typename == 'csw:Record': # transform csw:Record -> gmd:MD_Metadata model mappings
util.transform_mappings(queryables, self.repository['mappings']['csw:Record'])
node = etree.Element(util.nspath_eval('gmd:MD_Metadata', self.namespaces))
node.attrib[util.nspath_eval('xsi:schemaLocation', self.context.namespaces)] = \
'%s %s/csw/2.0.2/profiles/apiso/1.0.0/apiso.xsd' % (self.namespace, self.ogc_schemas_base)
# identifier
idval = util.getqattr(result, self.context.md_core_model['mappings']['pycsw:Identifier'])
identifier = etree.SubElement(node, util.nspath_eval('gmd:fileIdentifier', self.namespaces))
etree.SubElement(identifier, util.nspath_eval('gco:CharacterString', self.namespaces)).text = idval
if esn in ['summary', 'full']:
# language
val = util.getqattr(result, queryables['apiso:Language']['dbcol'])
lang = etree.SubElement(node, util.nspath_eval('gmd:language', self.namespaces))
etree.SubElement(lang, util.nspath_eval('gco:CharacterString', self.namespaces)).text = val
# hierarchyLevel
mtype = util.getqattr(result, queryables['apiso:Type']['dbcol']) or None
if mtype is not None:
if mtype == 'http://purl.org/dc/dcmitype/Dataset':
mtype = 'dataset'
hierarchy = etree.SubElement(node, util.nspath_eval('gmd:hierarchyLevel', self.namespaces))
hierarchy.append(_write_codelist_element('gmd:MD_ScopeCode', mtype, self.namespaces))
if esn in ['summary', 'full']:
# contact
contact = etree.SubElement(node, util.nspath_eval('gmd:contact', self.namespaces))
if caps is not None:
CI_resp = etree.SubElement(contact, util.nspath_eval('gmd:CI_ResponsibleParty', self.namespaces))
if hasattr(caps.provider.contact, 'name'):
ind_name = etree.SubElement(CI_resp, util.nspath_eval('gmd:individualName', self.namespaces))
etree.SubElement(ind_name, util.nspath_eval('gco:CharacterString', self.namespaces)).text = caps.provider.contact.name
if hasattr(caps.provider.contact, 'organization'):
if caps.provider.contact.organization is not None:
org_val = caps.provider.contact.organization
else:
org_val = caps.provider.name
org_name = etree.SubElement(CI_resp, util.nspath_eval('gmd:organisationName', self.namespaces))
etree.SubElement(org_name, util.nspath_eval('gco:CharacterString', self.namespaces)).text = org_val
if hasattr(caps.provider.contact, 'position'):
pos_name = etree.SubElement(CI_resp, util.nspath_eval('gmd:positionName', self.namespaces))
etree.SubElement(pos_name, util.nspath_eval('gco:CharacterString', self.namespaces)).text = caps.provider.contact.position
contact_info = etree.SubElement(CI_resp, util.nspath_eval('gmd:contactInfo', self.namespaces))
ci_contact = etree.SubElement(contact_info, util.nspath_eval('gmd:CI_Contact', self.namespaces))
if hasattr(caps.provider.contact, 'phone'):
phone = etree.SubElement(ci_contact, util.nspath_eval('gmd:phone', self.namespaces))
ci_phone = etree.SubElement(phone, util.nspath_eval('gmd:CI_Telephone', self.namespaces))
voice = etree.SubElement(ci_phone, util.nspath_eval('gmd:voice', self.namespaces))
etree.SubElement(voice, util.nspath_eval('gco:CharacterString', self.namespaces)).text = caps.provider.contact.phone
if hasattr(caps.provider.contact, 'fax'):
fax = etree.SubElement(ci_phone, util.nspath_eval('gmd:facsimile', self.namespaces))
etree.SubElement(fax, util.nspath_eval('gco:CharacterString', self.namespaces)).text = caps.provider.contact.fax
address = etree.SubElement(ci_contact, util.nspath_eval('gmd:address', self.namespaces))
ci_address = etree.SubElement(address, util.nspath_eval('gmd:CI_Address', self.namespaces))
if hasattr(caps.provider.contact, 'address'):
delivery_point = etree.SubElement(ci_address, util.nspath_eval('gmd:deliveryPoint', self.namespaces))
etree.SubElement(delivery_point, util.nspath_eval('gco:CharacterString', self.namespaces)).text = caps.provider.contact.address
if hasattr(caps.provider.contact, 'city'):
city = etree.SubElement(ci_address, util.nspath_eval('gmd:city', self.namespaces))
etree.SubElement(city, util.nspath_eval('gco:CharacterString', self.namespaces)).text = caps.provider.contact.city
if hasattr(caps.provider.contact, 'region'):
admin_area = etree.SubElement(ci_address, util.nspath_eval('gmd:administrativeArea', self.namespaces))
etree.SubElement(admin_area, util.nspath_eval('gco:CharacterString', self.namespaces)).text = caps.provider.contact.region
if hasattr(caps.provider.contact, 'postcode'):
postal_code = etree.SubElement(ci_address, util.nspath_eval('gmd:postalCode', self.namespaces))
etree.SubElement(postal_code, util.nspath_eval('gco:CharacterString', self.namespaces)).text = caps.provider.contact.postcode
if hasattr(caps.provider.contact, 'country'):
country = etree.SubElement(ci_address, util.nspath_eval('gmd:country', self.namespaces))
etree.SubElement(country, util.nspath_eval('gco:CharacterString', self.namespaces)).text = caps.provider.contact.country
if hasattr(caps.provider.contact, 'email'):
email = etree.SubElement(ci_address, util.nspath_eval('gmd:electronicMailAddress', self.namespaces))
etree.SubElement(email, util.nspath_eval('gco:CharacterString', self.namespaces)).text = caps.provider.contact.email
contact_url = None
if hasattr(caps.provider, 'url'):
contact_url = caps.provider.url
if hasattr(caps.provider.contact, 'url') and caps.provider.contact.url is not None:
contact_url = caps.provider.contact.url
if contact_url is not None:
online_resource = etree.SubElement(ci_contact, util.nspath_eval('gmd:onlineResource', self.namespaces))
gmd_linkage = etree.SubElement(online_resource, util.nspath_eval('gmd:linkage', self.namespaces))
etree.SubElement(gmd_linkage, util.nspath_eval('gmd:URL', self.namespaces)).text = contact_url
if hasattr(caps.provider.contact, 'role'):
role = etree.SubElement(CI_resp, util.nspath_eval('gmd:role', self.namespaces))
role_val = caps.provider.contact.role
if role_val is None:
role_val = 'pointOfContact'
etree.SubElement(role, util.nspath_eval('gmd:CI_RoleCode', self.namespaces), codeListValue=role_val, codeList='%s#CI_RoleCode' % CODELIST).text = role_val
else:
val = util.getqattr(result, queryables['apiso:OrganisationName']['dbcol'])
if val:
CI_resp = etree.SubElement(contact, util.nspath_eval('gmd:CI_ResponsibleParty', self.namespaces))
org_name = etree.SubElement(CI_resp, util.nspath_eval('gmd:organisationName', self.namespaces))
etree.SubElement(org_name, util.nspath_eval('gco:CharacterString', self.namespaces)).text = val
# date
val = util.getqattr(result, queryables['apiso:Modified']['dbcol'])
date = etree.SubElement(node, util.nspath_eval('gmd:dateStamp', self.namespaces))
if val and val.find('T') != -1:
dateel = 'gco:DateTime'
else:
dateel = 'gco:Date'
etree.SubElement(date, util.nspath_eval(dateel, self.namespaces)).text = val
metadatastandardname = 'ISO19115'
metadatastandardversion = '2003/Cor.1:2006'
if mtype == 'service':
metadatastandardname = 'ISO19119'
metadatastandardversion = '2005/PDAM 1'
# metadata standard name
standard = etree.SubElement(node, util.nspath_eval('gmd:metadataStandardName', self.namespaces))
etree.SubElement(standard, util.nspath_eval('gco:CharacterString', self.namespaces)).text = metadatastandardname
# metadata standard version
standardver = etree.SubElement(node, util.nspath_eval('gmd:metadataStandardVersion', self.namespaces))
etree.SubElement(standardver, util.nspath_eval('gco:CharacterString', self.namespaces)).text = metadatastandardversion
# title
val = util.getqattr(result, queryables['apiso:Title']['dbcol']) or ''
identification = etree.SubElement(node, util.nspath_eval('gmd:identificationInfo', self.namespaces))
if mtype == 'service':
restagname = 'srv:SV_ServiceIdentification'
else:
restagname = 'gmd:MD_DataIdentification'
resident = etree.SubElement(identification, util.nspath_eval(restagname, self.namespaces), id=idval)
tmp2 = etree.SubElement(resident, util.nspath_eval('gmd:citation', self.namespaces))
tmp3 = etree.SubElement(tmp2, util.nspath_eval('gmd:CI_Citation', self.namespaces))
tmp4 = etree.SubElement(tmp3, util.nspath_eval('gmd:title', self.namespaces))
etree.SubElement(tmp4, util.nspath_eval('gco:CharacterString', self.namespaces)).text = val
# creation date
val = util.getqattr(result, queryables['apiso:CreationDate']['dbcol'])
if val is not None:
tmp3.append(_write_date(val, 'creation', self.namespaces))
# publication date
val = util.getqattr(result, queryables['apiso:PublicationDate']['dbcol'])
if val is not None:
tmp3.append(_write_date(val, 'publication', self.namespaces))
# revision date
val = util.getqattr(result, queryables['apiso:RevisionDate']['dbcol'])
if val is not None:
tmp3.append(_write_date(val, 'revision', self.namespaces))
if esn in ['summary', 'full']:
# abstract
val = util.getqattr(result, queryables['apiso:Abstract']['dbcol']) or ''
tmp = etree.SubElement(resident, util.nspath_eval('gmd:abstract', self.namespaces))
etree.SubElement(tmp, util.nspath_eval('gco:CharacterString', self.namespaces)).text = val
# keywords
kw = util.getqattr(result, queryables['apiso:Subject']['dbcol'])
if kw is not None:
md_keywords = etree.SubElement(resident, util.nspath_eval('gmd:descriptiveKeywords', self.namespaces))
md_keywords.append(write_keywords(kw, self.namespaces))
# spatial resolution
val = util.getqattr(result, queryables['apiso:Denominator']['dbcol'])
if val:
tmp = etree.SubElement(resident, util.nspath_eval('gmd:spatialResolution', self.namespaces))
tmp2 = etree.SubElement(tmp, util.nspath_eval('gmd:MD_Resolution', self.namespaces))
tmp3 = etree.SubElement(tmp2, util.nspath_eval('gmd:equivalentScale', self.namespaces))
tmp4 = etree.SubElement(tmp3, util.nspath_eval('gmd:MD_RepresentativeFraction', self.namespaces))
tmp5 = etree.SubElement(tmp4, util.nspath_eval('gmd:denominator', self.namespaces))
etree.SubElement(tmp5, util.nspath_eval('gco:Integer', self.namespaces)).text = str(val)
# resource language
val = util.getqattr(result, queryables['apiso:ResourceLanguage']['dbcol'])
tmp = etree.SubElement(resident, util.nspath_eval('gmd:language', self.namespaces))
etree.SubElement(tmp, util.nspath_eval('gco:CharacterString', self.namespaces)).text = val
# topic category
val = util.getqattr(result, queryables['apiso:TopicCategory']['dbcol'])
if val:
for v in val.split(','):
tmp = etree.SubElement(resident, util.nspath_eval('gmd:topicCategory', self.namespaces))
etree.SubElement(tmp, util.nspath_eval('gmd:MD_TopicCategoryCode', self.namespaces)).text = val
# bbox extent
val = util.getqattr(result, queryables['apiso:BoundingBox']['dbcol'])
bboxel = write_extent(val, self.namespaces)
if bboxel is not None and mtype != 'service':
resident.append(bboxel)
# service identification
if mtype == 'service':
# service type
# service type version
val = util.getqattr(result, queryables['apiso:ServiceType']['dbcol'])
val2 = util.getqattr(result, queryables['apiso:ServiceTypeVersion']['dbcol'])
if val is not None:
tmp = etree.SubElement(resident, util.nspath_eval('srv:serviceType', self.namespaces))
etree.SubElement(tmp, util.nspath_eval('gco:LocalName', self.namespaces)).text = val
tmp = etree.SubElement(resident, util.nspath_eval('srv:serviceTypeVersion', self.namespaces))
etree.SubElement(tmp, util.nspath_eval('gco:CharacterString', self.namespaces)).text = val2
kw = util.getqattr(result, queryables['apiso:Subject']['dbcol'])
if kw is not None:
srv_keywords = etree.SubElement(resident, util.nspath_eval('srv:keywords', self.namespaces))
srv_keywords.append(write_keywords(kw, self.namespaces))
if bboxel is not None:
bboxel.tag = util.nspath_eval('srv:extent', self.namespaces)
resident.append(bboxel)
val = util.getqattr(result, queryables['apiso:CouplingType']['dbcol'])
if val is not None:
couplingtype = etree.SubElement(resident, util.nspath_eval('srv:couplingType', self.namespaces))
etree.SubElement(couplingtype, util.nspath_eval('srv:SV_CouplingType', self.namespaces), codeListValue=val, codeList='%s#SV_CouplingType' % CODELIST).text = val
if esn in ['summary', 'full']:
# all service resources as coupled resources
coupledresources = util.getqattr(result, queryables['apiso:OperatesOn']['dbcol'])
operations = util.getqattr(result, queryables['apiso:Operation']['dbcol'])
if coupledresources:
for val2 in coupledresources.split(','):
coupledres = etree.SubElement(resident, util.nspath_eval('srv:coupledResource', self.namespaces))
svcoupledres = etree.SubElement(coupledres, util.nspath_eval('srv:SV_CoupledResource', self.namespaces))
opname = etree.SubElement(svcoupledres, util.nspath_eval('srv:operationName', self.namespaces))
etree.SubElement(opname, util.nspath_eval('gco:CharacterString', self.namespaces)).text = _get_resource_opname(operations)
sid = etree.SubElement(svcoupledres, util.nspath_eval('srv:identifier', self.namespaces))
etree.SubElement(sid, util.nspath_eval('gco:CharacterString', self.namespaces)).text = val2
# service operations
if operations:
for i in operations.split(','):
oper = etree.SubElement(resident, util.nspath_eval('srv:containsOperations', self.namespaces))
tmp = etree.SubElement(oper, util.nspath_eval('srv:SV_OperationMetadata', self.namespaces))
tmp2 = etree.SubElement(tmp, util.nspath_eval('srv:operationName', self.namespaces))
etree.SubElement(tmp2, util.nspath_eval('gco:CharacterString', self.namespaces)).text = i
tmp3 = etree.SubElement(tmp, util.nspath_eval('srv:DCP', self.namespaces))
etree.SubElement(tmp3, util.nspath_eval('srv:DCPList', self.namespaces), codeList='%s#DCPList' % CODELIST, codeListValue='HTTPGet').text = 'HTTPGet'
tmp4 = etree.SubElement(tmp, util.nspath_eval('srv:DCP', self.namespaces))
etree.SubElement(tmp4, util.nspath_eval('srv:DCPList', self.namespaces), codeList='%s#DCPList' % CODELIST, codeListValue='HTTPPost').text = 'HTTPPost'
connectpoint = etree.SubElement(tmp, util.nspath_eval('srv:connectPoint', self.namespaces))
onlineres = etree.SubElement(connectpoint, util.nspath_eval('gmd:CI_OnlineResource', self.namespaces))
linkage = etree.SubElement(onlineres, util.nspath_eval('gmd:linkage', self.namespaces))
etree.SubElement(linkage, util.nspath_eval('gmd:URL', self.namespaces)).text = util.getqattr(result, self.context.md_core_model['mappings']['pycsw:Source'])
# operates on resource(s)
if coupledresources:
for i in coupledresources.split(','):
operates_on = etree.SubElement(resident, util.nspath_eval('srv:operatesOn', self.namespaces), uuidref=i)
operates_on.attrib[util.nspath_eval('xlink:href', self.namespaces)] = '%sservice=CSW&version=2.0.2&request=GetRecordById&outputschema=http://www.isotc211.org/2005/gmd&id=%s-%s' % (util.bind_url(self.url), idval, i)
rlinks = util.getqattr(result, self.context.md_core_model['mappings']['pycsw:Links'])
if rlinks:
distinfo = etree.SubElement(node, util.nspath_eval('gmd:distributionInfo', self.namespaces))
distinfo2 = etree.SubElement(distinfo, util.nspath_eval('gmd:MD_Distribution', self.namespaces))
transopts = etree.SubElement(distinfo2, util.nspath_eval('gmd:transferOptions', self.namespaces))
dtransopts = etree.SubElement(transopts, util.nspath_eval('gmd:MD_DigitalTransferOptions', self.namespaces))
for link in rlinks.split('^'):
linkset = link.split(',')
online = etree.SubElement(dtransopts, util.nspath_eval('gmd:onLine', self.namespaces))
online2 = etree.SubElement(online, util.nspath_eval('gmd:CI_OnlineResource', self.namespaces))
linkage = etree.SubElement(online2, util.nspath_eval('gmd:linkage', self.namespaces))
etree.SubElement(linkage, util.nspath_eval('gmd:URL', self.namespaces)).text = linkset[-1]
protocol = etree.SubElement(online2, util.nspath_eval('gmd:protocol', self.namespaces))
etree.SubElement(protocol, util.nspath_eval('gco:CharacterString', self.namespaces)).text = linkset[2]
name = etree.SubElement(online2, util.nspath_eval('gmd:name', self.namespaces))
etree.SubElement(name, util.nspath_eval('gco:CharacterString', self.namespaces)).text = linkset[0]
desc = etree.SubElement(online2, util.nspath_eval('gmd:description', self.namespaces))
etree.SubElement(desc, util.nspath_eval('gco:CharacterString', self.namespaces)).text = linkset[1]
return node
def write_keywords(keywords, nsmap):
"""generate gmd:MD_Keywords construct"""
md_keywords = etree.Element(util.nspath_eval('gmd:MD_Keywords', nsmap))
for kw in keywords.split(','):
keyword = etree.SubElement(md_keywords, util.nspath_eval('gmd:keyword', nsmap))
etree.SubElement(keyword, util.nspath_eval('gco:CharacterString', nsmap)).text = kw
return md_keywords
def write_extent(bbox, nsmap):
''' Generate BBOX extent '''
if bbox is not None:
try:
bbox2 = util.wkt2geom(bbox)
except:
return None
extent = etree.Element(util.nspath_eval('gmd:extent', nsmap))
ex_extent = etree.SubElement(extent, util.nspath_eval('gmd:EX_Extent', nsmap))
ge = etree.SubElement(ex_extent, util.nspath_eval('gmd:geographicElement', nsmap))
gbb = etree.SubElement(ge, util.nspath_eval('gmd:EX_GeographicBoundingBox', nsmap))
west = etree.SubElement(gbb, util.nspath_eval('gmd:westBoundLongitude', nsmap))
east = etree.SubElement(gbb, util.nspath_eval('gmd:eastBoundLongitude', nsmap))
south = etree.SubElement(gbb, util.nspath_eval('gmd:southBoundLatitude', nsmap))
north = etree.SubElement(gbb, util.nspath_eval('gmd:northBoundLatitude', nsmap))
etree.SubElement(west, util.nspath_eval('gco:Decimal', nsmap)).text = str(bbox2[0])
etree.SubElement(south, util.nspath_eval('gco:Decimal', nsmap)).text = str(bbox2[1])
etree.SubElement(east, util.nspath_eval('gco:Decimal', nsmap)).text = str(bbox2[2])
etree.SubElement(north, util.nspath_eval('gco:Decimal', nsmap)).text = str(bbox2[3])
return extent
return None
def _write_date(dateval, datetypeval, nsmap):
date1 = etree.Element(util.nspath_eval('gmd:date', nsmap))
date2 = etree.SubElement(date1, util.nspath_eval('gmd:CI_Date', nsmap))
date3 = etree.SubElement(date2, util.nspath_eval('gmd:date', nsmap))
if dateval.find('T') != -1:
dateel = 'gco:DateTime'
else:
dateel = 'gco:Date'
etree.SubElement(date3, util.nspath_eval(dateel, nsmap)).text = dateval
datetype = etree.SubElement(date2, util.nspath_eval('gmd:dateType', nsmap))
datetype.append(_write_codelist_element('gmd:CI_DateTypeCode', datetypeval, nsmap))
return date1
def _get_resource_opname(operations):
for op in operations.split(','):
if op in ['GetMap', 'GetFeature', 'GetCoverage', 'GetObservation']:
return op
return None
def _write_codelist_element(codelist_element, codelist_value, nsmap):
namespace, codelist = codelist_element.split(':')
element = etree.Element(util.nspath_eval(codelist_element, nsmap),
codeSpace=CODESPACE, codeList='%s#%s' % (CODELIST, codelist),
codeListValue=codelist_value)
element.text = codelist_value
return element
|
|
#
# Copyright 2014 Cisco Systems,Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import fixtures
from oslotest import base
from ceilometer.network.services import discovery
from ceilometer.network.services import lbaas
from ceilometer.polling import manager
from ceilometer.polling import plugin_base
from ceilometer import service
class _BaseTestLBPollster(base.BaseTestCase):
def setUp(self):
super(_BaseTestLBPollster, self).setUp()
self.addCleanup(mock.patch.stopall)
self.CONF = service.prepare_service([], [])
self.manager = manager.AgentManager(0, self.CONF)
self.CONF.set_override('neutron_lbaas_version',
'v1',
group='service_types')
plugin_base._get_keystone = mock.Mock()
catalog = (plugin_base._get_keystone.session.auth.get_access.
return_value.service_catalog)
catalog.get_endpoints = mock.MagicMock(
return_value={'network': mock.ANY})
class TestLBPoolPollster(_BaseTestLBPollster):
def setUp(self):
super(TestLBPoolPollster, self).setUp()
self.pollster = lbaas.LBPoolPollster(self.CONF)
fake_pools = self.fake_get_pools()
self.useFixture(fixtures.MockPatch('ceilometer.neutron_client.Client.'
'pool_get_all',
return_value=fake_pools))
@staticmethod
def fake_get_pools():
return [{'status': 'ACTIVE',
'lb_method': 'ROUND_ROBIN',
'protocol': 'HTTP',
'description': '',
'health_monitors': [],
'members': [],
'provider': 'haproxy',
'status_description': None,
'id': 'ce73ad36-437d-4c84-aee1-186027d3da9a',
'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1',
'name': 'mylb',
'admin_state_up': True,
'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a',
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'health_monitors_status': []},
{'status': 'INACTIVE',
'lb_method': 'ROUND_ROBIN',
'protocol': 'HTTP',
'description': '',
'health_monitors': [],
'members': [],
'provider': 'haproxy',
'status_description': None,
'id': 'ce73ad36-437d-4c84-aee1-186027d3da9a',
'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1',
'name': 'mylb02',
'admin_state_up': True,
'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a',
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'health_monitors_status': []},
{'status': 'PENDING_CREATE',
'lb_method': 'ROUND_ROBIN',
'protocol': 'HTTP',
'description': '',
'health_monitors': [],
'members': [],
'provider': 'haproxy',
'status_description': None,
'id': 'fe7rad36-437d-4c84-aee1-186027d3bdcd',
'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1',
'name': 'mylb03',
'admin_state_up': True,
'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a',
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'health_monitors_status': []},
{'status': 'UNKNOWN',
'lb_method': 'ROUND_ROBIN',
'protocol': 'HTTP',
'description': '',
'health_monitors': [],
'members': [],
'provider': 'haproxy',
'status_description': None,
'id': 'fe7rad36-437d-4c84-aee1-186027d3bdcd',
'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1',
'name': 'mylb03',
'admin_state_up': True,
'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a',
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'health_monitors_status': []},
{'status': 'error',
'lb_method': 'ROUND_ROBIN',
'protocol': 'HTTP',
'description': '',
'health_monitors': [],
'members': [],
'provider': 'haproxy',
'status_description': None,
'id': 'fe7rad36-437d-4c84-aee1-186027d3bdcd',
'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1',
'name': 'mylb_error',
'admin_state_up': True,
'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a',
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'health_monitors_status': []},
]
def test_pool_get_samples(self):
samples = list(self.pollster.get_samples(
self.manager, {},
resources=self.fake_get_pools()))
self.assertEqual(4, len(samples))
for field in self.pollster.FIELDS:
self.assertEqual(self.fake_get_pools()[0][field],
samples[0].resource_metadata[field])
def test_pool_volume(self):
samples = list(self.pollster.get_samples(
self.manager, {},
resources=self.fake_get_pools()))
self.assertEqual(1, samples[0].volume)
self.assertEqual(0, samples[1].volume)
self.assertEqual(2, samples[2].volume)
def test_get_pool_meter_names(self):
samples = list(self.pollster.get_samples(
self.manager, {},
resources=self.fake_get_pools()))
self.assertEqual(set(['network.services.lb.pool']),
set([s.name for s in samples]))
def test_pool_discovery(self):
discovered_pools = discovery.LBPoolsDiscovery(
self.CONF).discover(self.manager)
self.assertEqual(4, len(discovered_pools))
for pool in self.fake_get_pools():
if pool['status'] == 'error':
self.assertNotIn(pool, discovered_pools)
else:
self.assertIn(pool, discovered_pools)
class TestLBVipPollster(_BaseTestLBPollster):
def setUp(self):
super(TestLBVipPollster, self).setUp()
self.pollster = lbaas.LBVipPollster(self.CONF)
fake_vips = self.fake_get_vips()
self.useFixture(fixtures.MockPatch('ceilometer.neutron_client.Client.'
'vip_get_all',
return_value=fake_vips))
@staticmethod
def fake_get_vips():
return [{'status': 'ACTIVE',
'status_description': None,
'protocol': 'HTTP',
'description': '',
'admin_state_up': True,
'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a',
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'connection_limit': -1,
'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a',
'session_persistence': None,
'address': '10.0.0.2',
'protocol_port': 80,
'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291',
'id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1',
'name': 'myvip'},
{'status': 'INACTIVE',
'status_description': None,
'protocol': 'HTTP',
'description': '',
'admin_state_up': True,
'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a',
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'connection_limit': -1,
'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a',
'session_persistence': None,
'address': '10.0.0.3',
'protocol_port': 80,
'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291',
'id': 'ba6a6fee-e2fa-4e6c-b3c2-bfbe395752c1',
'name': 'myvip02'},
{'status': 'PENDING_CREATE',
'status_description': None,
'protocol': 'HTTP',
'description': '',
'admin_state_up': True,
'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a',
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'connection_limit': -1,
'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a',
'session_persistence': None,
'address': '10.0.0.4',
'protocol_port': 80,
'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291',
'id': 'fg6a6fee-e2fa-4e6c-b3c2-bfbe395752c1',
'name': 'myvip03'},
{'status': 'UNKNOWN',
'status_description': None,
'protocol': 'HTTP',
'description': '',
'admin_state_up': True,
'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a',
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'connection_limit': -1,
'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a',
'session_persistence': None,
'address': '10.0.0.8',
'protocol_port': 80,
'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291',
'id': 'fg6a6fee-e2fa-4e6c-b3c2-bfbe395752c1',
'name': 'myvip03'},
{'status': 'error',
'status_description': None,
'protocol': 'HTTP',
'description': '',
'admin_state_up': True,
'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a',
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'connection_limit': -1,
'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a',
'session_persistence': None,
'address': '10.0.0.8',
'protocol_port': 80,
'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291',
'id': 'fg6a6fee-e2fa-4e6c-b3c2-bfbe395752c1',
'name': 'myvip_error'},
]
def test_vip_get_samples(self):
samples = list(self.pollster.get_samples(
self.manager, {},
resources=self.fake_get_vips()))
self.assertEqual(4, len(samples))
for field in self.pollster.FIELDS:
self.assertEqual(self.fake_get_vips()[0][field],
samples[0].resource_metadata[field])
def test_pool_volume(self):
samples = list(self.pollster.get_samples(
self.manager, {},
resources=self.fake_get_vips()))
self.assertEqual(1, samples[0].volume)
self.assertEqual(0, samples[1].volume)
self.assertEqual(2, samples[2].volume)
def test_get_vip_meter_names(self):
samples = list(self.pollster.get_samples(
self.manager, {},
resources=self.fake_get_vips()))
self.assertEqual(set(['network.services.lb.vip']),
set([s.name for s in samples]))
def test_vip_discovery(self):
discovered_vips = discovery.LBVipsDiscovery(
self.CONF).discover(self.manager)
self.assertEqual(4, len(discovered_vips))
for pool in self.fake_get_vips():
if pool['status'] == 'error':
self.assertNotIn(pool, discovered_vips)
else:
self.assertIn(pool, discovered_vips)
class TestLBMemberPollster(_BaseTestLBPollster):
def setUp(self):
super(TestLBMemberPollster, self).setUp()
self.pollster = lbaas.LBMemberPollster(self.CONF)
fake_members = self.fake_get_members()
self.useFixture(fixtures.MockPatch('ceilometer.neutron_client.Client.'
'member_get_all',
return_value=fake_members))
@staticmethod
def fake_get_members():
return [{'status': 'ACTIVE',
'protocol_port': 80,
'weight': 1,
'admin_state_up': True,
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a',
'address': '10.0.0.3',
'status_description': None,
'id': '290b61eb-07bc-4372-9fbf-36459dd0f96b'},
{'status': 'INACTIVE',
'protocol_port': 80,
'weight': 1,
'admin_state_up': True,
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a',
'address': '10.0.0.5',
'status_description': None,
'id': '2456661eb-07bc-4372-9fbf-36459dd0f96b'},
{'status': 'PENDING_CREATE',
'protocol_port': 80,
'weight': 1,
'admin_state_up': True,
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a',
'address': '10.0.0.6',
'status_description': None,
'id': '45630b61eb-07bc-4372-9fbf-36459dd0f96b'},
{'status': 'UNKNOWN',
'protocol_port': 80,
'weight': 1,
'admin_state_up': True,
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a',
'address': '10.0.0.6',
'status_description': None,
'id': '45630b61eb-07bc-4372-9fbf-36459dd0f96b'},
{'status': 'error',
'protocol_port': 80,
'weight': 1,
'admin_state_up': True,
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a',
'address': '10.0.0.6',
'status_description': None,
'id': '45630b61eb-07bc-4372-9fbf-36459dd0f96b'},
]
def test_get_samples_not_empty(self):
samples = list(self.pollster.get_samples(
self.manager, {},
self.fake_get_members()))
self.assertEqual(4, len(samples))
for field in self.pollster.FIELDS:
self.assertEqual(self.fake_get_members()[0][field],
samples[0].resource_metadata[field])
def test_pool_volume(self):
samples = list(self.pollster.get_samples(
self.manager, {},
self.fake_get_members()))
self.assertEqual(1, samples[0].volume)
self.assertEqual(0, samples[1].volume)
self.assertEqual(2, samples[2].volume)
def test_get_meter_names(self):
samples = list(self.pollster.get_samples(
self.manager, {},
self.fake_get_members()))
self.assertEqual(set(['network.services.lb.member']),
set([s.name for s in samples]))
def test_members_discovery(self):
discovered_members = discovery.LBMembersDiscovery(
self.CONF).discover(self.manager)
self.assertEqual(4, len(discovered_members))
for pool in self.fake_get_members():
if pool['status'] == 'error':
self.assertNotIn(pool, discovered_members)
else:
self.assertIn(pool, discovered_members)
class TestLBHealthProbePollster(_BaseTestLBPollster):
def setUp(self):
super(TestLBHealthProbePollster, self).setUp()
self.pollster = lbaas.LBHealthMonitorPollster(self.CONF)
fake_health_monitor = self.fake_get_health_monitor()
self.useFixture(fixtures.MockPatch('ceilometer.neutron_client.Client.'
'health_monitor_get_all',
return_value=fake_health_monitor))
@staticmethod
def fake_get_health_monitor():
return [{'id': '34ae33e1-0035-49e2-a2ca-77d5d3fab365',
'admin_state_up': True,
'tenant_id': "d5d2817dae6b42159be9b665b64beb0e",
'delay': 2,
'max_retries': 5,
'timeout': 5,
'pools': [],
'type': 'PING',
}]
def test_get_samples_not_empty(self):
samples = list(self.pollster.get_samples(
self.manager, {},
self.fake_get_health_monitor()))
self.assertEqual(1, len(samples))
for field in self.pollster.FIELDS:
self.assertEqual(self.fake_get_health_monitor()[0][field],
samples[0].resource_metadata[field])
def test_get_meter_names(self):
samples = list(self.pollster.get_samples(
self.manager, {},
self.fake_get_health_monitor()))
self.assertEqual(set(['network.services.lb.health_monitor']),
set([s.name for s in samples]))
def test_probes_discovery(self):
discovered_probes = discovery.LBHealthMonitorsDiscovery(
self.CONF).discover(self.manager)
self.assertEqual(discovered_probes, self.fake_get_health_monitor())
class TestLBStatsPollster(_BaseTestLBPollster):
def setUp(self):
super(TestLBStatsPollster, self).setUp()
fake_pool_stats = self.fake_pool_stats()
self.useFixture(fixtures.MockPatch('ceilometer.neutron_client.Client.'
'pool_stats',
return_value=fake_pool_stats))
fake_pools = self.fake_get_pools()
self.useFixture(fixtures.MockPatch('ceilometer.neutron_client.Client.'
'pool_get_all',
return_value=fake_pools))
@staticmethod
def fake_get_pools():
return [{'status': 'ACTIVE',
'lb_method': 'ROUND_ROBIN',
'protocol': 'HTTP',
'description': '',
'health_monitors': [],
'members': [],
'provider': 'haproxy',
'status_description': None,
'id': 'ce73ad36-437d-4c84-aee1-186027d3da9a',
'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1',
'name': 'mylb',
'admin_state_up': True,
'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a',
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'health_monitors_status': []},
]
@staticmethod
def fake_pool_stats():
return {'stats': {'active_connections': 2,
'bytes_in': 1,
'bytes_out': 3,
'total_connections': 4
}
}
def _check_get_samples(self, factory, sample_name, expected_volume,
expected_type):
pollster = factory(self.CONF)
cache = {}
samples = list(pollster.get_samples(self.manager, cache,
self.fake_get_pools()))
self.assertEqual(1, len(samples))
self.assertIsNotNone(samples)
self.assertIn('lbstats', cache)
self.assertEqual(set([sample_name]), set([s.name for s in samples]))
match = [s for s in samples if s.name == sample_name]
self.assertEqual(1, len(match), 'missing counter %s' % sample_name)
self.assertEqual(expected_volume, match[0].volume)
self.assertEqual(expected_type, match[0].type)
def test_lb_total_connections(self):
self._check_get_samples(lbaas.LBTotalConnectionsPollster,
'network.services.lb.total.connections',
4, 'cumulative')
def test_lb_active_connections(self):
self._check_get_samples(lbaas.LBActiveConnectionsPollster,
'network.services.lb.active.connections',
2, 'gauge')
def test_lb_incoming_bytes(self):
self._check_get_samples(lbaas.LBBytesInPollster,
'network.services.lb.incoming.bytes',
1, 'gauge')
def test_lb_outgoing_bytes(self):
self._check_get_samples(lbaas.LBBytesOutPollster,
'network.services.lb.outgoing.bytes',
3, 'gauge')
|
|
from __future__ import print_function
old_print = print
from numpy import zeros_like, zeros
from numpy.linalg import solve
import numpy
from numba import guvectorize
from numba import double
def solve(m, sol):
# seems to segfault on windows
h, w = m.shape
for y in range(0, h):
maxrow = y
for y2 in range(y + 1, h): # Find max pivot
if abs(m[y2, y]) > abs(m[maxrow, y]):
maxrow = y2
for y2 in range(0, w):
t = m[y, y2]
m[y, y2] = m[maxrow, y2]
m[maxrow, y2] = t
for y2 in range(y + 1, h): # Eliminate column y
c = m[y2, y] / m[y, y]
for x in range(y, w):
m[y2, x] -= m[y, x] * c
for y in range(h - 1, 0 - 1, -1): # Backsubstitute
c = m[y, y]
for y2 in range(0, y):
for x in range(w - 1, y - 1, -1):
m[y2, x] -= m[y, x] * m[y2, y] / c
m[y, y] /= c
for x in range(h, w): # Normalize row y
m[y, x] /= c
for y in range(h):
sol[y] = m[y, w - 1]
serial_solve_numba = guvectorize("void(f8[:,:], f8[:])", "(m,n)->(m)")(solve)
from numpy.linalg import solve as linalg_solve
def serial_solve(A, B, diagnose=True):
if diagnose:
sol = zeros_like(B)
for i in range(sol.shape[0]):
try:
sol[i, :] = linalg_solve(A[i, :, :], B[i, :])
except:
# Should be a special type of exception
a = Exception("Error solving point {}".format(i))
a.x = B[i, :]
a.J = A[i, :, :]
a.i = i
raise a
else:
M = numpy.concatenate([A, B[:, :, None]], axis=2)
sol = numpy.zeros_like(B)
serial_solve_numba(M, sol)
return sol
#
# return sol
import time
def newton(f, x, verbose=False, tol=1e-6, maxit=5, jactype="serial"):
"""Solve nonlinear system using safeguarded Newton iterations
Parameters
----------
Return
------
"""
if verbose:
print = lambda txt: old_print(txt)
else:
print = lambda txt: None
it = 0
error = 10
converged = False
maxbacksteps = 30
x0 = x
if jactype == "sparse":
from scipy.sparse.linalg import spsolve as solve
elif jactype == "full":
from numpy.linalg import solve
else:
solve = serial_solve
while it < maxit and not converged:
[v, dv] = f(x)
# TODO: rewrite starting here
# print("Time to evaluate {}".format(ss-tt)0)
error_0 = abs(v).max()
if error_0 < tol:
if verbose:
print(
"> System was solved after iteration {}. Residual={}".format(
it, error_0
)
)
converged = True
else:
it += 1
dx = solve(dv, v)
# norm_dx = abs(dx).max()
for bck in range(maxbacksteps):
xx = x - dx * (2 ** (-bck))
vm = f(xx)[0]
err = abs(vm).max()
if err < error_0:
break
x = xx
if verbose:
print("\t> {} | {} | {}".format(it, err, bck))
if not converged:
import warnings
warnings.warn("Did not converge")
return [x, it]
serial_newton = newton
from numpy import sqrt, finfo, inf
from numpy import isinf, newaxis, diag, zeros
def SerialDifferentiableFunction(f, epsilon=1e-8):
def df(x):
v0 = f(x)
N = v0.shape[0]
n_v = v0.shape[1]
assert x.shape[0] == N
n_x = x.shape[1]
dv = zeros((N, n_v, n_x))
for i in range(n_x):
xi = x.copy()
xi[:, i] += epsilon
vi = f(xi)
dv[:, :, i] = (vi - v0) / epsilon
return [v0, dv]
return df
def test_serial_solve():
N = 10
import numpy
A = numpy.random.random((N, 2, 2))
B = numpy.random.random((N, 2))
print(A)
print(B)
out = serial_solve(A, B)
print("A")
print(A)
print("B")
print(B)
print("out")
print(out)
import numpy.linalg
print(numpy.linalg.solve(A[0, :, :], B[0, :]))
if __name__ == "__main__":
test_serial_solve()
|
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import os
import cStringIO
from py_vulcanize import resource_loader
def _FindAllFilesRecursive(source_paths):
all_filenames = set()
for source_path in source_paths:
for dirpath, _, filenames in os.walk(source_path):
for f in filenames:
if f.startswith('.'):
continue
x = os.path.abspath(os.path.join(dirpath, f))
all_filenames.add(x)
return all_filenames
class AbsFilenameList(object):
def __init__(self, willDirtyCallback):
self._willDirtyCallback = willDirtyCallback
self._filenames = []
self._filenames_set = set()
def _WillBecomeDirty(self):
if self._willDirtyCallback:
self._willDirtyCallback()
def append(self, filename):
assert os.path.isabs(filename)
self._WillBecomeDirty()
self._filenames.append(filename)
self._filenames_set.add(filename)
def extend(self, iterable):
self._WillBecomeDirty()
for filename in iterable:
assert os.path.isabs(filename)
self._filenames.append(filename)
self._filenames_set.add(filename)
def appendRel(self, basedir, filename):
assert os.path.isabs(basedir)
self._WillBecomeDirty()
n = os.path.abspath(os.path.join(basedir, filename))
self._filenames.append(n)
self._filenames_set.add(n)
def extendRel(self, basedir, iterable):
self._WillBecomeDirty()
assert os.path.isabs(basedir)
for filename in iterable:
n = os.path.abspath(os.path.join(basedir, filename))
self._filenames.append(n)
self._filenames_set.add(n)
def __contains__(self, x):
return x in self._filenames_set
def __len__(self):
return self._filenames.__len__()
def __iter__(self):
return iter(self._filenames)
def __repr__(self):
return repr(self._filenames)
def __str__(self):
return str(self._filenames)
class Project(object):
py_vulcanize_path = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..'))
def __init__(self, source_paths=None):
"""
source_paths: A list of top-level directories in which modules and raw
scripts can be found. Module paths are relative to these directories.
"""
self._loader = None
self._frozen = False
self.source_paths = AbsFilenameList(self._WillPartOfPathChange)
if source_paths is not None:
self.source_paths.extend(source_paths)
def Freeze(self):
self._frozen = True
def _WillPartOfPathChange(self):
if self._frozen:
raise Exception('The project is frozen. You cannot edit it now')
self._loader = None
@staticmethod
def FromDict(d):
return Project(d['source_paths'])
def AsDict(self):
return {
'source_paths': list(self.source_paths)
}
def __repr__(self):
return "Project(%s)" % repr(self.source_paths)
def AddSourcePath(self, path):
self.source_paths.append(path)
@property
def loader(self):
if self._loader is None:
self._loader = resource_loader.ResourceLoader(self)
return self._loader
def ResetLoader(self):
self._loader = None
def _Load(self, filenames):
return [self.loader.LoadModule(module_filename=filename) for
filename in filenames]
def LoadModule(self, module_name=None, module_filename=None):
return self.loader.LoadModule(module_name=module_name,
module_filename=module_filename)
def CalcLoadSequenceForModuleNames(self, module_names):
modules = [self.loader.LoadModule(module_name=name) for
name in module_names]
return self.CalcLoadSequenceForModules(modules)
def CalcLoadSequenceForModules(self, modules):
already_loaded_set = set()
load_sequence = []
for m in modules:
m.ComputeLoadSequenceRecursive(load_sequence, already_loaded_set)
return load_sequence
def GetDepsGraphFromModuleNames(self, module_names):
modules = [self.loader.LoadModule(module_name=name) for
name in module_names]
return self.GetDepsGraphFromModules(modules)
def GetDepsGraphFromModules(self, modules):
load_sequence = self.CalcLoadSequenceForModules(modules)
g = _Graph()
for m in load_sequence:
g.AddModule(m)
for dep in m.dependent_modules:
g.AddEdge(m, dep.id)
# FIXME: _GetGraph is not defined. Maybe `return g` is intended?
return _GetGraph(load_sequence)
def GetDominatorGraphForModulesNamed(self, module_names, load_sequence):
modules = [self.loader.LoadModule(module_name=name)
for name in module_names]
return self.GetDominatorGraphForModules(modules, load_sequence)
def GetDominatorGraphForModules(self, start_modules, load_sequence):
modules_by_id = {}
for m in load_sequence:
modules_by_id[m.id] = m
module_referrers = collections.defaultdict(list)
for m in load_sequence:
for dep in m.dependent_modules:
module_referrers[dep].append(m)
# Now start at the top module and reverse.
visited = set()
g = _Graph()
pending = collections.deque()
pending.extend(start_modules)
while len(pending):
cur = pending.pop()
g.AddModule(cur)
visited.add(cur)
for out_dep in module_referrers[cur]:
if out_dep in visited:
continue
g.AddEdge(out_dep, cur)
visited.add(out_dep)
pending.append(out_dep)
# Visited -> Dot
return g.GetDot()
class _Graph(object):
def __init__(self):
self.nodes = []
self.edges = []
def AddModule(self, m):
f = cStringIO.StringIO()
m.AppendJSContentsToFile(f, False, None)
attrs = {
'label': '%s (%i)' % (m.name, f.tell())
}
f.close()
attr_items = ['%s="%s"' % (x, y) for x, y in attrs.iteritems()]
node = 'M%i [%s];' % (m.id, ','.join(attr_items))
self.nodes.append(node)
def AddEdge(self, mFrom, mTo):
edge = 'M%i -> M%i;' % (mFrom.id, mTo.id)
self.edges.append(edge)
def GetDot(self):
return 'digraph deps {\n\n%s\n\n%s\n}\n' % (
'\n'.join(self.nodes), '\n'.join(self.edges))
|
|
#!/usr/bin/env python
import argparse
import json
import os
import sys
from pathlib import Path
import numpy as np
GOLDEN_ROOT = Path('ci/golden')
DEFAULT_BUILD_URL = 'https://buildkite.com/plaidml'
DEFAULT_PERF_THRESHOLD = 0.7
class RawResult:
def __init__(self, path):
self.path = path
data = {}
result_path = self.path / 'result.json'
if result_path.exists():
try:
with result_path.open() as fp:
data = json.load(fp)
except Exception as ex:
data = {'exception': str(ex)}
self.exception = data.get('exception')
self.compile_duration = data.get('compile_duration')
self.execution_duration = data.get('duration_per_example', data.get('execution_duration'))
self.np_data = None
np_result_path = self.path / 'result.npy'
if np_result_path.exists():
try:
self.np_data = np.load(np_result_path)
except Exception as ex:
print(' Exception:', ex)
if self.exception is None:
self.exception = str(ex)
def exists(self):
return self.path.exists()
class TestResult:
def __init__(self, skip, compare):
self.errors = []
self.failures = []
self.skips = []
self.expected = None
self.skipped = skip
self.compare = compare
def add_error(self, msg):
print(' ERROR:', msg)
self.errors.append(msg)
def add_failure(self, msg):
print(' FAIL:', msg)
self.failures.append(msg)
def add_skip(self, msg):
print(' SKIP:', msg)
self.skips.append(msg)
def set_expected(self, msg):
print(' SKIP:', msg)
self.expected = msg
def status(self):
if self.errors:
if self.skipped:
return 'SKIP'
return 'ERROR'
elif self.failures:
if self.skipped:
return 'SKIP'
return 'FAIL'
elif self.expected:
return 'SKIP'
elif len(self.skips) > 0:
return 'SKIP'
return 'PASS'
def reason(self):
parts = []
if self.errors:
parts += map(lambda x: 'ERROR: ' + x, self.errors)
if self.failures:
parts += map(lambda x: 'FAIL: ' + x, self.failures)
if self.skips:
parts += map(lambda x: 'SKIP: ' + x, self.skips)
if self.expected:
parts += ['SKIP: ' + self.expected]
return '\n'.join(parts)
def is_ok(self):
return self.status() == 'SKIP' or self.status() == 'PASS'
class Result:
def __init__(self, root, path):
self.path = path
# The current results
self.cur = RawResult(root / path)
# The last results matching the platform of the current results
self.ref = RawResult(GOLDEN_ROOT / path)
self.ratio = None
if self.cur.execution_duration and self.ref.execution_duration:
self.ratio = self.ref.execution_duration / self.cur.execution_duration
def __repr__(self):
return '<Result({})>'.format(self.path)
def check_result(self, skip, compare, precision, perf_threshold, expected, correct):
print(self.path, self.cur.compile_duration, self.ref.execution_duration,
self.cur.execution_duration, self.ratio)
if not self.cur.exists():
print(' missing cur')
if not compare and not self.ref.exists():
print(' missing ref')
test_result = TestResult(skip, compare)
try:
if self.cur.exception:
first_line = self.cur.exception.split('\n')[0]
if expected:
if expected not in self.cur.exception:
test_result.add_failure('Expected: %r' % expected)
else:
test_result.set_expected(first_line)
else:
test_result.add_failure(first_line)
elif compare:
if not self.ref.execution_duration:
test_result.add_error('Missing golden duration')
elif not self.cur.execution_duration:
test_result.add_error('Missing result duration')
else:
if self.ratio < perf_threshold:
test_result.add_failure('Performance regression')
base_output = self.ref.np_data
if precision != 'untested':
# If base_output is None and precision == 'untested' then
# this is interpreted to mean no correctness test is desired;
# so no error that it's missing in result.
if base_output is None:
test_result.add_error('Golden correctness data not found')
else:
if self.cur.np_data is None:
test_result.add_error('Missing correctness test output')
else:
self._check_correctness(base_output, self.cur.np_data, test_result,
precision, correct)
except Exception as ex:
import traceback
traceback.print_exc()
test_result.add_error(str(ex))
return test_result
def _check_correctness(self, base_output, cur_output, test_result, precision,
should_be_correct):
# TODO: Parameterize relative and absolute error tolerance
if precision == 'high':
rel_err = 1e-04
elif precision == 'low':
rel_err = 0.2
else:
test_result.add_error('Unexpected precision {!r} in test suite'.format(precision))
correct = np.allclose(base_output, cur_output, rtol=rel_err, atol=2e-05)
# This duplicates allclose calculation for more detailed report
relative_error = ((rel_err * np.absolute(base_output - cur_output)) /
(1e-06 + rel_err * np.absolute(cur_output)))
max_error = np.amax(relative_error)
max_abs_error = np.amax(np.absolute(base_output - cur_output))
correct_entries = 0
incorrect_entries = 0
for x in np.nditer(relative_error):
if x > rel_err:
incorrect_entries += 1
else:
correct_entries += 1
try:
fail_ratio = incorrect_entries / float(correct_entries + incorrect_entries)
except ZeroDivisionError:
fail_ratio = 'Undefined'
if not correct:
if should_be_correct:
msg = 'Correctness failure: {}, max_abs_error: {}, fail rate: {}'
test_result.add_failure(msg.format(max_error, max_abs_error, fail_ratio))
else:
msg = 'Correctness failure (expected): {}, max_abs_error: {}, fail rate: {}'
test_result.add_skip(msg.format(max_error, max_abs_error, fail_ratio))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('root', type=Path)
parser.add_argument('path', type=Path)
parser.add_argument('--skip', type=bool, default=False)
parser.add_argument('--compare', type=bool, default=True)
parser.add_argument('--precision', choices=['untested', 'high', 'low'], default='untested')
parser.add_argument('--threshold', type=float, default=DEFAULT_PERF_THRESHOLD)
parser.add_argument('--expected', type=str)
parser.add_argument('--correct', type=bool, default=True)
args = parser.parse_args()
build_url = os.getenv('BUILDKITE_BUILD_URL')
if build_url:
job_id = os.getenv('BUILDKITE_JOB_ID')
build_url = f'{build_url}#{job_id}'
else:
build_url = DEFAULT_BUILD_URL
result = Result(args.root, args.path)
test_result = result.check_result(args.skip, args.compare, args.precision, args.threshold,
args.expected, args.correct)
report = {
'build_url': build_url,
'compare': test_result.compare,
'errors': test_result.errors,
'failures': test_result.failures,
'ratio': result.ratio,
'reason': test_result.reason(),
'status': test_result.status(),
'compile_duration': result.cur.compile_duration,
'cur.execution_duration': result.cur.execution_duration,
'ref.execution_duration': result.ref.execution_duration,
}
with (args.root / args.path / 'report.json').open('w') as fp:
json.dump(report, fp)
if not test_result.is_ok():
sys.exit(1)
if __name__ == '__main__':
main()
|
|
#
#
#
from BCDataStream import *
from enumeration import Enumeration
from base58 import public_key_to_bc_address, hash_160_to_bc_address
import logging
import socket
import time
from util import short_hex, long_hex
def parse_CAddress(vds):
d = {}
d['nVersion'] = vds.read_int32()
d['nTime'] = vds.read_uint32()
d['nServices'] = vds.read_uint64()
d['pchReserved'] = vds.read_bytes(12)
d['ip'] = socket.inet_ntoa(vds.read_bytes(4))
d['port'] = socket.htons(vds.read_uint16())
return d
def deserialize_CAddress(d):
return d['ip']+":"+str(d['port'])+" (lastseen: %s)"%(time.ctime(d['nTime']),)
def parse_setting(setting, vds):
if setting[0] == "f": # flag (boolean) settings
return str(vds.read_boolean())
elif setting == "addrIncoming":
return "" # bitcoin 0.4 purposely breaks addrIncoming setting in encrypted wallets.
elif setting[0:4] == "addr": # CAddress
d = parse_CAddress(vds)
return deserialize_CAddress(d)
elif setting == "nTransactionFee":
return vds.read_int64()
elif setting == "nLimitProcessors":
return vds.read_int32()
return 'unknown setting'
def parse_TxIn(vds):
d = {}
d['prevout_hash'] = vds.read_bytes(32)
d['prevout_n'] = vds.read_uint32()
d['scriptSig'] = vds.read_bytes(vds.read_compact_size())
d['sequence'] = vds.read_uint32()
return d
def deserialize_TxIn(d, transaction_index=None, owner_keys=None):
if d['prevout_hash'] == "\x00"*32:
result = "TxIn: COIN GENERATED"
result += " coinbase:"+d['scriptSig'].encode('hex_codec')
elif transaction_index is not None and d['prevout_hash'] in transaction_index:
p = transaction_index[d['prevout_hash']]['txOut'][d['prevout_n']]
result = "TxIn: value: %f"%(p['value']/1.0e8,)
result += " prev("+long_hex(d['prevout_hash'][::-1])+":"+str(d['prevout_n'])+")"
else:
result = "TxIn: prev("+long_hex(d['prevout_hash'][::-1])+":"+str(d['prevout_n'])+")"
pk = extract_public_key(d['scriptSig'])
result += " pubkey: "+pk
result += " sig: "+decode_script(d['scriptSig'])
if d['sequence'] < 0xffffffff: result += " sequence: "+hex(d['sequence'])
return result
def parse_TxOut(vds):
d = {}
d['value'] = vds.read_int64()
d['scriptPubKey'] = vds.read_bytes(vds.read_compact_size())
return d
def deserialize_TxOut(d, owner_keys=None):
result = "TxOut: value: %f"%(d['value']/1.0e8,)
pk = extract_public_key(d['scriptPubKey'])
result += " pubkey: "+pk
result += " Script: "+decode_script(d['scriptPubKey'])
if owner_keys is not None:
if pk in owner_keys: result += " Own: True"
else: result += " Own: False"
return result
def parse_Transaction(vds):
d = {}
d['version'] = vds.read_int32()
n_vin = vds.read_compact_size()
d['txIn'] = []
for i in xrange(n_vin):
d['txIn'].append(parse_TxIn(vds))
n_vout = vds.read_compact_size()
d['txOut'] = []
for i in xrange(n_vout):
d['txOut'].append(parse_TxOut(vds))
d['lockTime'] = vds.read_uint32()
return d
def deserialize_Transaction(d, transaction_index=None, owner_keys=None):
result = "%d tx in, %d out\n"%(len(d['txIn']), len(d['txOut']))
for txIn in d['txIn']:
result += deserialize_TxIn(txIn, transaction_index) + "\n"
for txOut in d['txOut']:
result += deserialize_TxOut(txOut, owner_keys) + "\n"
return result
def parse_MerkleTx(vds):
d = parse_Transaction(vds)
d['hashBlock'] = vds.read_bytes(32)
n_merkleBranch = vds.read_compact_size()
d['merkleBranch'] = vds.read_bytes(32*n_merkleBranch)
d['nIndex'] = vds.read_int32()
return d
def deserialize_MerkleTx(d, transaction_index=None, owner_keys=None):
tx = deserialize_Transaction(d, transaction_index, owner_keys)
result = "block: "+(d['hashBlock'][::-1]).encode('hex_codec')
result += " %d hashes in merkle branch\n"%(len(d['merkleBranch'])/32,)
return result+tx
def parse_WalletTx(vds):
d = parse_MerkleTx(vds)
n_vtxPrev = vds.read_compact_size()
d['vtxPrev'] = []
for i in xrange(n_vtxPrev):
d['vtxPrev'].append(parse_MerkleTx(vds))
d['mapValue'] = {}
n_mapValue = vds.read_compact_size()
for i in xrange(n_mapValue):
key = vds.read_string()
value = vds.read_string()
d['mapValue'][key] = value
n_orderForm = vds.read_compact_size()
d['orderForm'] = []
for i in xrange(n_orderForm):
first = vds.read_string()
second = vds.read_string()
d['orderForm'].append( (first, second) )
d['fTimeReceivedIsTxTime'] = vds.read_uint32()
d['timeReceived'] = vds.read_uint32()
d['fromMe'] = vds.read_boolean()
d['spent'] = vds.read_boolean()
return d
def deserialize_WalletTx(d, transaction_index=None, owner_keys=None):
result = deserialize_MerkleTx(d, transaction_index, owner_keys)
result += "%d vtxPrev txns\n"%(len(d['vtxPrev']),)
result += "mapValue:"+str(d['mapValue'])
if len(d['orderForm']) > 0:
result += "\n"+" orderForm:"+str(d['orderForm'])
result += "\n"+"timeReceived:"+time.ctime(d['timeReceived'])
result += " fromMe:"+str(d['fromMe'])+" spent:"+str(d['spent'])
return result
# The CAuxPow (auxiliary proof of work) structure supports merged mining.
# A flag in the block version field indicates the structure's presence.
# As of 8/2011, the Original Bitcoin Client does not use it. CAuxPow
# originated in Namecoin; see
# https://github.com/vinced/namecoin/blob/mergedmine/doc/README_merged-mining.md.
def parse_AuxPow(vds):
d = parse_MerkleTx(vds)
n_chainMerkleBranch = vds.read_compact_size()
d['chainMerkleBranch'] = vds.read_bytes(32*n_chainMerkleBranch)
d['chainIndex'] = vds.read_int32()
d['parentBlock'] = parse_BlockHeader(vds)
return d
def parse_BlockHeader(vds):
d = {}
header_start = vds.read_cursor
d['version'] = vds.read_int32()
d['hashPrev'] = vds.read_bytes(32)
d['hashMerkleRoot'] = vds.read_bytes(32)
d['nTime'] = vds.read_uint32()
d['nBits'] = vds.read_uint32()
d['nNonce'] = vds.read_uint32()
header_end = vds.read_cursor
d['__header__'] = vds.input[header_start:header_end]
return d
def parse_Block(vds):
d = parse_BlockHeader(vds)
d['transactions'] = []
# if d['version'] & (1 << 8):
# d['auxpow'] = parse_AuxPow(vds)
nTransactions = vds.read_compact_size()
for i in xrange(nTransactions):
d['transactions'].append(parse_Transaction(vds))
return d
def deserialize_Block(d):
result = "Time: "+time.ctime(d['nTime'])+" Nonce: "+str(d['nNonce'])
result += "\nnBits: 0x"+hex(d['nBits'])
result += "\nhashMerkleRoot: 0x"+d['hashMerkleRoot'][::-1].encode('hex_codec')
result += "\nPrevious block: "+d['hashPrev'][::-1].encode('hex_codec')
result += "\n%d transactions:\n"%len(d['transactions'])
for t in d['transactions']:
result += deserialize_Transaction(t)+"\n"
result += "\nRaw block header: "+d['__header__'].encode('hex_codec')
return result
def parse_BlockLocator(vds):
d = { 'hashes' : [] }
nHashes = vds.read_compact_size()
for i in xrange(nHashes):
d['hashes'].append(vds.read_bytes(32))
return d
def deserialize_BlockLocator(d):
result = "Block Locator top: "+d['hashes'][0][::-1].encode('hex_codec')
return result
opcodes = Enumeration("Opcodes", [
("OP_0", 0), ("OP_PUSHDATA1",76), "OP_PUSHDATA2", "OP_PUSHDATA4", "OP_1NEGATE", "OP_RESERVED",
"OP_1", "OP_2", "OP_3", "OP_4", "OP_5", "OP_6", "OP_7",
"OP_8", "OP_9", "OP_10", "OP_11", "OP_12", "OP_13", "OP_14", "OP_15", "OP_16",
"OP_NOP", "OP_VER", "OP_IF", "OP_NOTIF", "OP_VERIF", "OP_VERNOTIF", "OP_ELSE", "OP_ENDIF", "OP_VERIFY",
"OP_RETURN", "OP_TOALTSTACK", "OP_FROMALTSTACK", "OP_2DROP", "OP_2DUP", "OP_3DUP", "OP_2OVER", "OP_2ROT", "OP_2SWAP",
"OP_IFDUP", "OP_DEPTH", "OP_DROP", "OP_DUP", "OP_NIP", "OP_OVER", "OP_PICK", "OP_ROLL", "OP_ROT",
"OP_SWAP", "OP_TUCK", "OP_CAT", "OP_SUBSTR", "OP_LEFT", "OP_RIGHT", "OP_SIZE", "OP_INVERT", "OP_AND",
"OP_OR", "OP_XOR", "OP_EQUAL", "OP_EQUALVERIFY", "OP_RESERVED1", "OP_RESERVED2", "OP_1ADD", "OP_1SUB", "OP_2MUL",
"OP_2DIV", "OP_NEGATE", "OP_ABS", "OP_NOT", "OP_0NOTEQUAL", "OP_ADD", "OP_SUB", "OP_MUL", "OP_DIV",
"OP_MOD", "OP_LSHIFT", "OP_RSHIFT", "OP_BOOLAND", "OP_BOOLOR",
"OP_NUMEQUAL", "OP_NUMEQUALVERIFY", "OP_NUMNOTEQUAL", "OP_LESSTHAN",
"OP_GREATERTHAN", "OP_LESSTHANOREQUAL", "OP_GREATERTHANOREQUAL", "OP_MIN", "OP_MAX",
"OP_WITHIN", "OP_RIPEMD160", "OP_SHA1", "OP_SHA256", "OP_HASH160",
"OP_HASH256", "OP_CODESEPARATOR", "OP_CHECKSIG", "OP_CHECKSIGVERIFY", "OP_CHECKMULTISIG",
"OP_CHECKMULTISIGVERIFY",
"OP_NOP1", "OP_NOP2", "OP_NOP3", "OP_NOP4", "OP_NOP5", "OP_NOP6", "OP_NOP7", "OP_NOP8", "OP_NOP9", "OP_NOP10",
("OP_INVALIDOPCODE", 0xFF),
])
def script_GetOp(bytes):
i = 0
while i < len(bytes):
vch = None
opcode = ord(bytes[i])
i += 1
if opcode <= opcodes.OP_PUSHDATA4:
nSize = opcode
if opcode == opcodes.OP_PUSHDATA1:
nSize = ord(bytes[i])
i += 1
elif opcode == opcodes.OP_PUSHDATA2:
(nSize,) = struct.unpack_from('<H', bytes, i)
i += 2
elif opcode == opcodes.OP_PUSHDATA4:
(nSize,) = struct.unpack_from('<I', bytes, i)
i += 4
if i+nSize > len(bytes):
vch = "_INVALID_"+bytes[i:]
i = len(bytes)
else:
vch = bytes[i:i+nSize]
i += nSize
yield (opcode, vch)
def script_GetOpName(opcode):
try:
return (opcodes.whatis(opcode)).replace("OP_", "")
except KeyError:
return "InvalidOp_"+str(opcode)
def decode_script(bytes):
result = ''
for (opcode, vch) in script_GetOp(bytes):
if len(result) > 0: result += " "
if opcode <= opcodes.OP_PUSHDATA4:
result += "%d:"%(opcode,)
result += short_hex(vch)
else:
result += script_GetOpName(opcode)
return result
def match_decoded(decoded, to_match):
if len(decoded) != len(to_match):
return False;
for i in range(len(decoded)):
if to_match[i] == opcodes.OP_PUSHDATA4 and decoded[i][0] <= opcodes.OP_PUSHDATA4:
continue # Opcodes below OP_PUSHDATA4 all just push data onto stack, and are equivalent.
if to_match[i] != decoded[i][0]:
return False
return True
def extract_public_key(bytes):
decoded = [ x for x in script_GetOp(bytes) ]
# non-generated TxIn transactions push a signature
# (seventy-something bytes) and then their public key
# (33 or 65 bytes) onto the stack:
match = [ opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4 ]
if match_decoded(decoded, match):
return public_key_to_bc_address(decoded[1][1])
# The Genesis Block, self-payments, and pay-by-IP-address payments look like:
# 65 BYTES:... CHECKSIG
match = [ opcodes.OP_PUSHDATA4, opcodes.OP_CHECKSIG ]
if match_decoded(decoded, match):
return public_key_to_bc_address(decoded[0][1])
# Pay-by-Bitcoin-address TxOuts look like:
# DUP HASH160 20 BYTES:... EQUALVERIFY CHECKSIG
match = [ opcodes.OP_DUP, opcodes.OP_HASH160, opcodes.OP_PUSHDATA4, opcodes.OP_EQUALVERIFY, opcodes.OP_CHECKSIG ]
if match_decoded(decoded, match):
return hash_160_to_bc_address(decoded[2][1])
# BIP11 TxOuts look like one of these:
# Note that match_decoded is dumb, so OP_1 actually matches OP_1/2/3/etc:
multisigs = [
[ opcodes.OP_1, opcodes.OP_PUSHDATA4, opcodes.OP_1, opcodes.OP_CHECKMULTISIG ],
[ opcodes.OP_2, opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4, opcodes.OP_2, opcodes.OP_CHECKMULTISIG ],
[ opcodes.OP_3, opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4, opcodes.OP_3, opcodes.OP_CHECKMULTISIG ]
]
for match in multisigs:
if match_decoded(decoded, match):
return "["+[ public_key_to_bc_address(decoded[i][1]) for i in range(1,len(decoded-1)) ]+"]"
# BIP16 TxOuts look like:
# HASH160 20 BYTES:... EQUAL
match = [ opcodes.OP_HASH160, 0x14, opcodes.OP_EQUAL ]
if match_decoded(decoded, match):
return hash_160_to_bc_address(decoded[1][1], version="\x05")
return "(None)"
|
|
#!/usr/bin/python
#
# Copyright (C) 2010-2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GData definitions for Content API for Shopping"""
__author__ = 'afshar (Ali Afshar)'
import atom.core
import atom.data
import gdata.data
SC_NAMESPACE_TEMPLATE = ('{http://schemas.google.com/'
'structuredcontent/2009}%s')
SCP_NAMESPACE_TEMPLATE = ('{http://schemas.google.com/'
'structuredcontent/2009/products}%s')
class ProductId(atom.core.XmlElement):
"""sc:id element
It is required that all inserted products are provided with a unique
alphanumeric ID, in this element.
"""
_qname = SC_NAMESPACE_TEMPLATE % 'id'
class RequiredDestination(atom.core.XmlElement):
"""sc:required_destination element
This element defines the required destination for a product, namely
"ProductSearch", "ProductAds" or "CommerceSearch". It should be added to the
app:control element (ProductEntry's "control" attribute) to specify where the
product should appear in search APIs.
By default, when omitted, the api attempts to upload to as many destinations
as possible.
"""
_qname = SC_NAMESPACE_TEMPLATE % 'required_destination'
dest = 'dest'
class ExcludedDestination(atom.core.XmlElement):
"""sc:excluded_destination element
This element defines the required destination for a product, namely
"ProductSearch", "ProductAds" or "CommerceSearch". It should be added to the
app:control element (ProductEntry's "control" attribute) to specify where the
product should not appear in search APIs.
By default, when omitted, the api attempts to upload to as many destinations
as possible.
"""
_qname = SC_NAMESPACE_TEMPLATE % 'excluded_destination'
dest = 'dest'
class ProductControl(atom.data.Control):
"""
app:control element
overridden to provide additional elements in the sc namespace.
"""
required_destination = RequiredDestination
excluded_destination = ExcludedDestination
class ContentLanguage(atom.core.XmlElement):
"""
sc:content_language element
Language used in the item content for the product
"""
_qname = SC_NAMESPACE_TEMPLATE % 'content_language'
class TargetCountry(atom.core.XmlElement):
"""
sc:target_country element
The target country of the product
"""
_qname = SC_NAMESPACE_TEMPLATE % 'target_country'
class ImageLink(atom.core.XmlElement):
"""sc:image_link element
This is the URL of an associated image for a product. Please use full size
images (400x400 pixels or larger), not thumbnails.
"""
_qname = SC_NAMESPACE_TEMPLATE % 'image_link'
class ExpirationDate(atom.core.XmlElement):
"""sc:expiration_date
This is the date when the product listing will expire. If omitted, this will
default to 30 days after the product was created.
"""
_qname = SC_NAMESPACE_TEMPLATE % 'expiration_date'
class Adult(atom.core.XmlElement):
"""sc:adult element
Indicates whether the content is targeted towards adults, with possible
values of "true" or "false". Defaults to "false".
"""
_qname = SC_NAMESPACE_TEMPLATE % 'adult'
class Author(atom.core.XmlElement):
"""
scp:author element
Defines the author of the information, recommended for books.
"""
_qname = SCP_NAMESPACE_TEMPLATE % 'author'
class Availability(atom.core.XmlElement):
"""
scp:availability element
The retailer's suggested label for product availability. Supported values
include: 'in stock', 'out of stock', 'limited availability'.
"""
_qname = SCP_NAMESPACE_TEMPLATE % 'availability'
class Brand(atom.core.XmlElement):
"""
scp:brand element
The brand of the product
"""
_qname = SCP_NAMESPACE_TEMPLATE % 'brand'
class Color(atom.core.XmlElement):
"""scp:color element
The color of the product.
"""
_qname = SCP_NAMESPACE_TEMPLATE % 'color'
class Condition(atom.core.XmlElement):
"""scp:condition element
The condition of the product, one of "new", "used", "refurbished"
"""
_qname = SCP_NAMESPACE_TEMPLATE % 'condition'
class Edition(atom.core.XmlElement):
"""scp:edition element
The edition of the product. Recommended for products with multiple editions
such as collectors' editions etc, such as books.
"""
_qname = SCP_NAMESPACE_TEMPLATE % 'edition'
class Feature(atom.core.XmlElement):
"""scp:feature element
A product feature. A product may have multiple features, each being text, for
example a smartphone may have features: "wifi", "gps" etc.
"""
_qname = SCP_NAMESPACE_TEMPLATE % 'feature'
class FeaturedProduct(atom.core.XmlElement):
"""scp:featured_product element
Used to indicate that this item is a special, featured product; Supported
values are: "true", "false".
"""
_qname = SCP_NAMESPACE_TEMPLATE % 'featured_product'
class Genre(atom.core.XmlElement):
"""scp:genre element
Describes the genre of a product, eg "comedy". Strongly recommended for media.
"""
_qname = SCP_NAMESPACE_TEMPLATE % 'genre'
class Gtin(atom.core.XmlElement):
"""scp:gtin element
GTIN of the product (isbn/upc/ean)
"""
_qname = SCP_NAMESPACE_TEMPLATE % 'gtin'
class Manufacturer(atom.core.XmlElement):
"""scp:manufacturer element
Manufacturer of the product.
"""
_qname = SCP_NAMESPACE_TEMPLATE % 'manufacturer'
class Mpn(atom.core.XmlElement):
"""scp:mpn element
Manufacturer's Part Number. A unique code determined by the manufacturer for
the product.
"""
_qname = SCP_NAMESPACE_TEMPLATE % 'mpn'
class Price(atom.core.XmlElement):
"""scp:price element
The price of the product. The unit attribute must be set, and should represent
the currency.
Note: Required Element
"""
_qname = SCP_NAMESPACE_TEMPLATE % 'price'
unit = 'unit'
class ProductType(atom.core.XmlElement):
"""scp:product_type element
Describes the type of product. A taxonomy of available product types is
listed at http://www.google.com/basepages/producttype/taxonomy.txt and the
entire line in the taxonomy should be included, for example "Electronics >
Video > Projectors".
"""
_qname = SCP_NAMESPACE_TEMPLATE % 'product_type'
class Quantity(atom.core.XmlElement):
"""scp:quantity element
The number of items available. A value of 0 indicates items that are
currently out of stock.
"""
_qname = SCP_NAMESPACE_TEMPLATE % 'quantity'
class ShippingCountry(atom.core.XmlElement):
"""scp:shipping_country element
The two-letter ISO 3166 country code for the country to which an item will
ship.
This element should be placed inside the scp:shipping element.
"""
_qname = SCP_NAMESPACE_TEMPLATE % 'shipping_country'
class ShippingPrice(atom.core.XmlElement):
"""scp:shipping_price element
Fixed shipping price, represented as a number. Specify the currency as the
"unit" attribute".
This element should be placed inside the scp:shipping element.
"""
_qname = SCP_NAMESPACE_TEMPLATE % 'shipping_price'
unit = 'unit'
class ShippingRegion(atom.core.XmlElement):
"""scp:shipping_region element
The geographic region to which a shipping rate applies, e.g., in the US, the
two-letter state abbreviation, ZIP code, or ZIP code range using * wildcard.
This element should be placed inside the scp:shipping element.
"""
_qname = SCP_NAMESPACE_TEMPLATE % 'shipping_region'
class ShippingService(atom.core.XmlElement):
"""scp:shipping_service element
A free-form description of the service class or delivery speed.
This element should be placed inside the scp:shipping element.
"""
_qname = SCP_NAMESPACE_TEMPLATE % 'shipping_service'
class Shipping(atom.core.XmlElement):
"""scp:shipping element
Container for the shipping rules as provided by the shipping_country,
shipping_price, shipping_region and shipping_service tags.
"""
_qname = SCP_NAMESPACE_TEMPLATE % 'shipping'
shipping_price = ShippingPrice
shipping_country = ShippingCountry
shipping_service = ShippingService
shipping_region = ShippingRegion
class ShippingWeight(atom.core.XmlElement):
"""scp:shipping_weight element
The shipping weight of a product. Requires a value and a unit using the unit
attribute. Valid units include lb, pound, oz, ounce, g, gram, kg, kilogram.
"""
_qname = SCP_NAMESPACE_TEMPLATE % 'shipping_weight'
unit = 'unit'
class Size(atom.core.XmlElement):
"""scp:size element
Available sizes of an item. Appropriate values include: "small", "medium",
"large", etc. The product enttry may contain multiple sizes, to indicate the
available sizes.
"""
_qname = SCP_NAMESPACE_TEMPLATE % 'size'
class TaxRate(atom.core.XmlElement):
"""scp:tax_rate element
The tax rate as a percent of the item price, i.e., number, as a percentage.
This element should be placed inside the scp:tax (Tax class) element.
"""
_qname = SCP_NAMESPACE_TEMPLATE % 'tax_rate'
class TaxCountry(atom.core.XmlElement):
"""scp:tax_country element
The country an item is taxed in (as a two-letter ISO 3166 country code).
This element should be placed inside the scp:tax (Tax class) element.
"""
_qname = SCP_NAMESPACE_TEMPLATE % 'tax_country'
class TaxRegion(atom.core.XmlElement):
"""scp:tax_region element
The geographic region that a tax rate applies to, e.g., in the US, the
two-letter state abbreviation, ZIP code, or ZIP code range using * wildcard.
This element should be placed inside the scp:tax (Tax class) element.
"""
_qname = SCP_NAMESPACE_TEMPLATE % 'tax_region'
class TaxShip(atom.core.XmlElement):
"""scp:tax_ship element
Whether tax is charged on shipping for this product. The default value is
"false".
This element should be placed inside the scp:tax (Tax class) element.
"""
_qname = SCP_NAMESPACE_TEMPLATE % 'tax_ship'
class Tax(atom.core.XmlElement):
"""scp:tax element
Container for the tax rules for this product. Containing the tax_rate,
tax_country, tax_region, and tax_ship elements
"""
_qname = SCP_NAMESPACE_TEMPLATE % 'tax'
tax_rate = TaxRate
tax_country = TaxCountry
tax_region = TaxRegion
tax_ship = TaxShip
class Year(atom.core.XmlElement):
"""scp:year element
The year the product was produced. Expects four digits
"""
_qname = SCP_NAMESPACE_TEMPLATE % 'year'
class ProductEntry(gdata.data.BatchEntry):
"""Product entry containing product information
The elements of this entry that are used are made up of five different
namespaces. They are:
atom: - Atom
app: - Atom Publishing Protocol
gd: - Google Data API
sc: - Content API for Shopping, general attributes
scp: - Content API for Shopping, product attributes
Only the sc and scp namespace elements are defined here, but additional useful
elements are defined in superclasses, and are documented here because they are
part of the required Content for Shopping API.
.. attribute:: title
The title of the product.
This should be a :class:`atom.data.Title` element, for example::
entry = ProductEntry()
entry.title = atom.data.Title(u'32GB MP3 Player')
.. attribute:: author
The author of the product.
This should be a :class:`Author` element, for example::
entry = ProductEntry()
entry.author = atom.data.Author(u'Isaac Asimov')
.. attribute:: availability
The avilability of a product.
This should be an :class:`Availability` instance, for example::
entry = ProductEntry()
entry.availability = Availability('in stock')
.. attribute:: brand
The brand of a product.
This should be a :class:`Brand` element, for example::
entry = ProductEntry()
entry.brand = Brand(u'Sony')
.. attribute:: color
The color of a product.
This should be a :class:`Color` element, for example::
entry = ProductEntry()
entry.color = Color(u'purple')
.. attribute:: condition
The condition of a product.
This should be a :class:`Condition` element, for example::
entry = ProductEntry()
entry.condition = Condition(u'new')
.. attribute:: content_language
The language for the product.
This should be a :class:`ContentLanguage` element, for example::
entry = ProductEntry()
entry.content_language = ContentLanguage('EN')
.. attribute:: edition
The edition of the product.
This should be a :class:`Edition` element, for example::
entry = ProductEntry()
entry.edition = Edition('1')
.. attribute:: expiration
The expiration date of this product listing.
This should be a :class:`ExpirationDate` element, for example::
entry = ProductEntry()
entry.expiration_date = ExpirationDate('2011-22-03')
.. attribute:: feature
A list of features for this product.
Each feature should be a :class:`Feature` element, for example::
entry = ProductEntry()
entry.feature.append(Feature(u'wifi'))
entry.feature.append(Feature(u'gps'))
.. attribute:: featured_product
Whether the product is featured.
This should be a :class:`FeaturedProduct` element, for example::
entry = ProductEntry()
entry.featured_product = FeaturedProduct('true')
.. attribute:: genre
The genre of the product.
This should be a :class:`Genre` element, for example::
entry = ProductEntry()
entry.genre = Genre(u'comedy')
.. attribute:: image_link
A list of links to images of the product. Each link should be an
:class:`ImageLink` element, for example::
entry = ProductEntry()
entry.image_link.append(ImageLink('http://myshop/cdplayer.jpg'))
.. attribute:: manufacturer
The manufacturer of the product.
This should be a :class:`Manufacturer` element, for example::
entry = ProductEntry()
entry.manufacturer = Manufacturer('Sony')
.. attribute:: mpn
The manufacturer's part number for this product.
This should be a :class:`Mpn` element, for example::
entry = ProductEntry()
entry.mpn = Mpn('cd700199US')
.. attribute:: price
The price for this product.
This should be a :class:`Price` element, including a unit argument to
indicate the currency, for example::
entry = ProductEntry()
entry.price = Price('20.00', unit='USD')
.. attribute:: gtin
The gtin for this product.
This should be a :class:`Gtin` element, for example::
entry = ProductEntry()
entry.gtin = Gtin('A888998877997')
.. attribute:: product_type
The type of product.
This should be a :class:`ProductType` element, for example::
entry = ProductEntry()
entry.product_type = ProductType("Electronics > Video > Projectors")
.. attribute:: publisher
The publisher of this product.
This should be a :class:`Publisher` element, for example::
entry = ProductEntry()
entry.publisher = Publisher(u'Oxford University Press')
.. attribute:: quantity
The quantity of product available in stock.
This should be a :class:`Quantity` element, for example::
entry = ProductEntry()
entry.quantity = Quantity('100')
.. attribute:: shipping
The shipping rules for the product.
This should be a :class:`Shipping` with the necessary rules embedded as
elements, for example::
entry = ProductEntry()
entry.shipping = Shipping()
entry.shipping.shipping_price = ShippingPrice('10.00', unit='USD')
.. attribute:: shipping_weight
The shipping weight for this product.
This should be a :class:`ShippingWeight` element, including a unit parameter
for the unit of weight, for example::
entry = ProductEntry()
entry.shipping_weight = ShippingWeight('10.45', unit='kg')
.. attribute:: size
A list of the available sizes for this product.
Each item in this list should be a :class:`Size` element, for example::
entry = ProductEntry()
entry.size.append(Size('Small'))
entry.size.append(Size('Medium'))
entry.size.append(Size('Large'))
.. attribute:: target_country
The target country for the product.
This should be a :class:`TargetCountry` element, for example::
entry = ProductEntry()
entry.target_country = TargetCountry('US')
.. attribute:: tax
The tax rules for this product.
This should be a :class:`Tax` element, with the tax rule elements embedded
within, for example::
entry = ProductEntry()
entry.tax = Tax()
entry.tax.tax_rate = TaxRate('17.5')
.. attribute:: year
The year the product was created.
This should be a :class:`Year` element, for example::
entry = ProductEntry()
entry.year = Year('2001')
#TODO Document these atom elements which are part of the required API
<title>
<link>
<entry>
<id>
<category>
<content>
<author>
<created>
<updated>
"""
author = Author
product_id = ProductId
availability = Availability
brand = Brand
color = Color
condition = Condition
content_language = ContentLanguage
edition = Edition
expiration_date = ExpirationDate
feature = [Feature]
featured_product = FeaturedProduct
genre = Genre
image_link = [ImageLink]
manufacturer = Manufacturer
mpn = Mpn
price = Price
gtin = Gtin
product_type = ProductType
quantity = Quantity
shipping = Shipping
shipping_weight = ShippingWeight
size = [Size]
target_country = TargetCountry
tax = Tax
year = Year
control = ProductControl
# opensearch needs overriding for wrong version
# see http://code.google.com/p/gdata-python-client/issues/detail?id=483
class TotalResults(gdata.data.TotalResults):
_qname = gdata.data.TotalResults._qname[1]
class ItemsPerPage(gdata.data.ItemsPerPage):
_qname = gdata.data.ItemsPerPage._qname[1]
class StartIndex(gdata.data.StartIndex):
_qname = gdata.data.StartIndex._qname[1]
class ProductFeed(gdata.data.BatchFeed):
"""Represents a feed of a merchant's products."""
entry = [ProductEntry]
total_results = TotalResults
items_per_page = ItemsPerPage
start_index = StartIndex
def build_entry(product_id=None, title=None, content=None, link=None, condition=None,
target_country=None, content_language=None, price=None,
price_unit=None, tax_rate=None, shipping_price=None,
shipping_price_unit=None, image_links=(), expiration_date=None,
adult=None, author=None, brand=None, color=None, edition=None,
features=(), featured_product=None, genre=None,
manufacturer=None, mpn=None, gtin=None, product_type=None,
quantity=None, shipping_country=None, shipping_region=None,
shipping_service=None, shipping_weight=None,
shipping_weight_unit=None, sizes=(), tax_country=None,
tax_region=None, tax_ship=None, year=None, product=None):
"""Create a new product with the required attributes.
This function exists as an alternative constructor to help alleviate the
boilerplate involved in creating product definitions. You may well want to
fine-tune your products after creating them.
Documentation of each attribute attempts to explain the "long-hand" way of
achieving the same goal.
:param product_id: The unique ID for this product.
This is equivalent to creating and setting an product_id element::
entry = ProductEntry()
entry.product_id = ProductId(product_id)
:param title: The title of this product.
This is equivalent to creating and setting a title element::
entry = ProductEntry
entry.title = atom.data.Title(title)
:param content: The description of this product.
This is equivalent to creating and setting the content element::
entry.content = atom.data.Content(content)
:param link: The uri of the link to a page describing the product.
This is equivalent to creating and setting the link element::
entry.link = atom.data.Link(href=link, rel='alternate',
type='text/html')
:param condition: The condition of the product.
This is equivalent to creating and setting the condition element::
entry.condition = Condition(condition)
:param target_country: The target country of the product
This is equivalent to creating and setting the target_country element::
entry.target_country = TargetCountry(target_country)
:param content_language: The language of the content
This is equivalent to creating and setting the content_language element::
entry.content_language = ContentLanguage(content_language)
:param price: The price of the product
This is equivalent to creating and setting the price element, using the
price_unit parameter as the unit::
entry.price = Price(price, unit=price_unit)
:param price_unit: The price unit of the product
See price parameter.
:param tax_rate: The tax rate for this product
This is equivalent to creating and setting the tax element and its required
children::
entry.tax = Tax()
entry.tax.tax_rate = TaxRate(tax_rate)
:param shipping_price: Thie price of shipping for this product
This is equivalent to creating and setting the shipping element and its
required children. The unit for the price is taken from the
shipping_price_unit parameter::
entry.shipping = Shipping()
entry.shipping.shipping_price = ShippingPrice(shipping_price,
unit=shipping_price_unit)
:param shipping_price_unit: The unit of the shipping price
See shipping_price
:param image_links: A sequence of links for images for this product.
This is equivalent to creating a single image_link element for each image::
for image_link in image_links:
entry.image_link.append(ImageLink(image_link))
:param expiration_date: The date that this product listing expires
This is equivalent to creating and setting an expiration_date element::
entry.expiration_date = ExpirationDate(expiration_date)
:param adult: Whether this product listing contains adult content
This is equivalent to creating and setting the adult element::
entry.adult = Adult(adult)
:param author: The author of the product
This is equivalent to creating and setting the author element::
entry.author = Author(author)
:param brand: The brand of the product
This is equivalent to creating and setting the brand element::
entry.brand = Brand(brand)
:param color: The color of the product
This is equivalent to creating and setting the color element::
entry.color = Color(color)
:param edition: The edition of the product
This is equivalent to creating and setting the edition element::
entry.edition = Edition('1')
:param features=(): Features for this product
Each feature in the provided sequence will create a Feature element in the
entry, equivalent to::
for feature in features:
entry.feature.append(Feature(feature)))
:param featured_product: Whether this product is featured
This is equivalent to creating and setting the featured_product element::
entry.featured_product = FeaturedProduct(featured_product)
:param genre: The genre of the product
This is equivalent to creating and setting the genre element::
entry.genre = Genre(genre)
:param manufacturer: The manufacturer of the product
This is equivalent to creating and setting the manufacturer element::
entry.manufacturer = Manufacturer(manufacturer)
:param mpn: The manufacturer's part number for a product
This is equivalent to creating and setting the mpn element::
entry.mpn = Mpn(mpn)
:param gtin: The gtin for a product
This is equivalent to creating and setting the gtin element::
entry.gtin = Gtin(gtin)
:param product_type: The type of a product
This is equivalent to creating and setting the product_type element::
entry.product_type = ProductType(product_type)
:param quantity: The quantity of the product in stock
This is equivalent to creating and setting the quantity element::
entry.quantity = Quantity(quantity)
:param shipping_country: The country that this product can be shipped to
This is equivalent to creating a Shipping element, and creating and setting
the required element within::
entry.shipping = Shipping()
entry.shipping.shipping_country = ShippingCountry(shipping_country)
:param shipping_region: The region that this product can be shipped to
This is equivalent to creating a Shipping element, and creating and setting
the required element within::
entry.shipping = Shipping()
entry.shipping.shipping_region = ShippingRegion(shipping_region)
:param shipping_service: The service for shipping.
This is equivalent to creating a Shipping element, and creating and setting
the required element within::
entry.shipping = Shipping()
entry.shipping.shipping_service = ShippingRegion(shipping_service)
:param shipping_weight: The shipping weight of a product
Along with the shipping_weight_unit, this is equivalent to creating and
setting the shipping_weight element::
entry.shipping_weight = ShippingWeight(shipping_weight,
unit=shipping_weight_unit)
:param shipping_weight_unit: The unit of shipping weight
See shipping_weight.
:param: The sizes that are available for this product.
Each size of a list will add a size element to the entry, like so::
for size in sizes:
product.size.append(Size(size))
:param tax_country: The country that tax rules will apply
This is equivalent to creating a Tax element, and creating and setting the
required sub-element::
entry.tax = Tax()
entry.tax.tax_country = TaxCountry(tax_country)
:param tax_region: The region that the tax rule applies in
This is equivalent to creating a Tax element, and creating and setting the
required sub-element::
entry.tax = Tax()
entry.tax.tax_region = TaxRegion(tax_region)
:param tax_ship: Whether shipping cost is taxable
This is equivalent to creating a Tax element, and creating and setting the
required sub-element::
entry.tax = Tax()
entry.tax.tax_ship = TaxShip(tax_ship)
:param year: The year the product was created
This is equivalent to creating and setting a year element::
entry.year = Year('2001')
"""
product = product or ProductEntry()
if product_id is not None:
product.product_id = ProductId(product_id)
if content is not None:
product.content = atom.data.Content(content)
if title is not None:
product.title = atom.data.Title(title)
if condition is not None:
product.condition = Condition(condition)
if price is not None:
product.price = Price(price, unit=price_unit)
if content_language is not None:
product.content_language = ContentLanguage(content_language)
if target_country is not None:
product.target_country = TargetCountry(target_country)
if tax_rate is not None:
product.tax = Tax()
product.tax.tax_rate = TaxRate(tax_rate)
if shipping_price is not None:
if shipping_price_unit is None:
raise ValueError('Must provide shipping_price_unit if '
'shipping_price is provided')
product.shipping = Shipping()
product.shipping.shipping_price = ShippingPrice(shipping_price,
unit=shipping_price_unit)
if link is not None:
product.link.append(atom.data.Link(href=link, type='text/html',
rel='alternate'))
for image_link in image_links:
product.image_link.append(ImageLink(image_link))
if expiration_date is not None:
product.expiration_date = ExpirationDate(expiration_date)
if adult is not None:
product.adult = Adult(adult)
if author is not None:
product.author = Author(author)
if brand is not None:
product.brand = Brand(brand)
if color is not None:
product.color = Color(color)
if edition is not None:
product.edition = Edition(edition)
for feature in features:
product.feature.append(Feature(feature))
if featured_product is not None:
product.featured_product = FeaturedProduct(featured_product)
if genre is not None:
product.genre = Genre(genre)
if manufacturer is not None:
product.manufacturer = Manufacturer(manufacturer)
if mpn is not None:
product.mpn = Mpn(mpn)
if gtin is not None:
product.gtin = Gtin(gtin)
if product_type is not None:
product.product_type = ProductType(product_type)
if quantity is not None:
product.quantity = Quantity(quantity)
if shipping_country is not None:
product.shipping.shipping_country = ShippingCountry(
shipping_country)
if shipping_region is not None:
product.shipping.shipping_region = ShippingRegion(shipping_region)
if shipping_service is not None:
product.shipping.shipping_service = ShippingService(
shipping_service)
if shipping_weight is not None:
product.shipping_weight = ShippingWeight(shipping_weight)
if shipping_weight_unit is not None:
product.shipping_weight.unit = shipping_weight_unit
for size in sizes:
product.size.append(Size(size))
if tax_country is not None:
product.tax.tax_country = TaxCountry(tax_country)
if tax_region is not None:
product.tax.tax_region = TaxRegion(tax_region)
if tax_ship is not None:
product.tax.tax_ship = TaxShip(tax_ship)
if year is not None:
product.year = Year(year)
return product
class Edited(atom.core.XmlElement):
"""sc:edited element
"""
_qname = SC_NAMESPACE_TEMPLATE % 'edited'
class AttributeLanguage(atom.core.XmlElement):
"""sc:attribute_language element
"""
_qname = SC_NAMESPACE_TEMPLATE % 'attribute_language'
class Channel(atom.core.XmlElement):
"""sc:channel element
"""
_qname = SC_NAMESPACE_TEMPLATE % 'channel'
class FeedFileName(atom.core.XmlElement):
"""sc:feed_file_name element
"""
_qname = SC_NAMESPACE_TEMPLATE % 'feed_file_name'
class FeedType(atom.core.XmlElement):
"""sc:feed_type element
"""
_qname = SC_NAMESPACE_TEMPLATE % 'feed_type'
class UseQuotedFields(atom.core.XmlElement):
"""sc:use_quoted_fields element
"""
_qname = SC_NAMESPACE_TEMPLATE % 'use_quoted_fields'
class FileFormat(atom.core.XmlElement):
"""sc:file_format element
"""
_qname = SC_NAMESPACE_TEMPLATE % 'file_format'
use_quoted_fields = UseQuotedFields
format = 'format'
class ProcessingStatus(atom.core.XmlElement):
"""sc:processing_status element
"""
_qname = SC_NAMESPACE_TEMPLATE % 'processing_status'
class DatafeedEntry(gdata.data.GDEntry):
"""An entry for a Datafeed
"""
content_language = ContentLanguage
target_country = TargetCountry
feed_file_name = FeedFileName
file_format = FileFormat
attribute_language = AttributeLanguage
processing_status = ProcessingStatus
edited = Edited
feed_type = FeedType
class DatafeedFeed(gdata.data.GDFeed):
"""A datafeed feed
"""
entry = [DatafeedEntry]
class AdultContent(atom.core.XmlElement):
"""sc:adult_content element
"""
_qname = SC_NAMESPACE_TEMPLATE % 'adult_content'
class InternalId(atom.core.XmlElement):
"""sc:internal_id element
"""
_qname = SC_NAMESPACE_TEMPLATE % 'internal_id'
class ReviewsUrl(atom.core.XmlElement):
"""sc:reviews_url element
"""
_qname = SC_NAMESPACE_TEMPLATE % 'reviews_url'
class ClientAccount(gdata.data.GDEntry):
"""A multiclient account entry
"""
adult_content = AdultContent
internal_id = InternalId
reviews_url = ReviewsUrl
class ClientAccountFeed(gdata.data.GDFeed):
"""A multiclient account feed
"""
entry = [ClientAccount]
|
|
#
# Command line tool for scoring and managing Synapse challenges
#
# To use this script, first install the Synapse Python Client
# http://python-docs.synapse.org/
#
# Log in once using your user name and password
# import synapseclient
# syn = synapseclient.Synapse()
# syn.login(<username>, <password>, rememberMe=True)
#
# Your credentials will be saved after which you may run this script with no credentials.
#
# Author: chris.bare
#
###############################################################################
import synapseclient
import synapseclient.utils as utils
from synapseclient.exceptions import *
from synapseclient import Activity
from synapseclient import Project, Folder, File
from synapseclient import Evaluation, Submission, SubmissionStatus
from synapseclient import Wiki
from synapseclient import Column
from synapseclient.dict_object import DictObject
from synapseclient.annotations import from_submission_status_annotations
import synapseutils as synu
from collections import OrderedDict
from datetime import datetime, timedelta
from itertools import izip
from StringIO import StringIO
import copy
import argparse
import lock
import json
import math
import os
import random
import re
import sys
import tarfile
import tempfile
import time
import traceback
import urllib
import uuid
import warnings
from multiprocessing import Pool
from functools import partial
try:
import challenge_config as conf
except Exception as ex1:
sys.stderr.write("\nPlease configure your challenge. See challenge_config.template.py for an example.\n\n")
raise ex1
import messages
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
LOG_DIR = os.path.join(SCRIPT_DIR,"log")
if not os.path.exists(LOG_DIR)
os.mkdir(LOG_DIR)
# the batch size can be bigger, we do this just to demonstrate batching
BATCH_SIZE = 20
# how many times to we retry batch uploads of submission annotations
BATCH_UPLOAD_RETRY_COUNT = 5
UUID_REGEX = re.compile('[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}')
# A module level variable to hold the Synapse connection
syn = None
def to_column_objects(leaderboard_columns):
"""
Turns a list of dictionaries of column configuration information defined
in conf.leaderboard_columns) into a list of Column objects
"""
column_keys = ['name', 'columnType', 'maximumSize', 'enumValues', 'defaultValue']
return [Column(**{ key: col[key] for key in column_keys if key in col}) for col in leaderboard_columns]
def get_user_name(profile):
names = []
if 'firstName' in profile and profile['firstName'] and profile['firstName'].strip():
names.append(profile['firstName'])
if 'lastName' in profile and profile['lastName'] and profile['lastName'].strip():
names.append(profile['lastName'])
if len(names)==0:
names.append(profile['userName'])
return " ".join(names)
def update_single_submission_status(status, add_annotations, force=False):
"""
This will update a single submission's status
:param: Submission status: syn.getSubmissionStatus()
:param: Annotations that you want to add in dict or submission status annotations format.
If dict, all submissions will be added as private submissions
"""
existingAnnotations = status.get("annotations", dict())
privateAnnotations = {each['key']:each['value'] for annots in existingAnnotations for each in existingAnnotations[annots] if annots not in ['scopeId','objectId'] and each['isPrivate'] == True}
publicAnnotations = {each['key']:each['value'] for annots in existingAnnotations for each in existingAnnotations[annots] if annots not in ['scopeId','objectId'] and each['isPrivate'] == False}
if not synapseclient.annotations.is_submission_status_annotations(add_annotations):
privateAddedAnnotations = add_annotations
publicAddedAnnotations = dict()
else:
privateAddedAnnotations = {each['key']:each['value'] for annots in add_annotations for each in add_annotations[annots] if annots not in ['scopeId','objectId'] and each['isPrivate'] == True}
publicAddedAnnotations = {each['key']:each['value'] for annots in add_annotations for each in add_annotations[annots] if annots not in ['scopeId','objectId'] and each['isPrivate'] == False}
#If you add a private annotation that appears in the public annotation, it switches
if sum([key in publicAddedAnnotations for key in privateAnnotations]) == 0:
pass
elif sum([key in publicAddedAnnotations for key in privateAnnotations]) >0 and force:
privateAnnotations = {key:privateAnnotations[key] for key in privateAnnotations if key not in publicAddedAnnotations}
else:
raise ValueError("You are trying to add public annotations that are already part of the existing private annotations: %s. Either change the annotation key or specify force=True" % ", ".join([key for key in privateAnnotations if key in publicAddedAnnotations]))
if sum([key in privateAddedAnnotations for key in publicAnnotations]) == 0:
pass
elif sum([key in privateAddedAnnotations for key in publicAnnotations])>0 and force:
publicAnnotations= {key:publicAnnotations[key] for key in publicAnnotations if key not in privateAddedAnnotations}
else:
raise ValueError("You are trying to add private annotations that are already part of the existing public annotations: %s. Either change the annotation key or specify force=True" % ", ".join([key for key in publicAnnotations if key in privateAddedAnnotations]))
privateAnnotations.update(privateAddedAnnotations)
publicAnnotations.update(publicAddedAnnotations)
priv = synapseclient.annotations.to_submission_status_annotations(privateAnnotations, is_private=True)
pub = synapseclient.annotations.to_submission_status_annotations(publicAnnotations, is_private=False)
for annotType in ['stringAnnos', 'longAnnos', 'doubleAnnos']:
if priv.get(annotType) is not None and pub.get(annotType) is not None:
if pub.get(annotType) is not None:
priv[annotType].extend(pub[annotType])
else:
priv[annotType] = pub[annotType]
elif priv.get(annotType) is None and pub.get(annotType) is not None:
priv[annotType] = pub[annotType]
status.annotations = priv
return(status)
def update_submissions_status_batch(evaluation, statuses):
"""
Update statuses in batch. This can be much faster than individual updates,
especially in rank based scoring methods which recalculate scores for all
submissions each time a new submission is received.
Work on the retry logic and have to pull down the submission statuses
"""
for retry in range(BATCH_UPLOAD_RETRY_COUNT):
try:
token = None
offset = 0
while offset < len(statuses):
batch = {"statuses" : statuses[offset:offset+BATCH_SIZE],
"isFirstBatch" : (offset==0),
"isLastBatch" : (offset+BATCH_SIZE>=len(statuses)),
"batchToken" : token}
response = syn.restPUT("/evaluation/%s/statusBatch" % evaluation.id, json.dumps(batch))
token = response.get('nextUploadToken', None)
offset += BATCH_SIZE
except SynapseHTTPError as err:
# on 412 ConflictingUpdateException we want to retry
if err.response.status_code == 412:
# sys.stderr.write('%s, retrying...\n' % err.message)
time.sleep(2)
else:
raise
class Query(object):
"""
An object that helps with paging through annotation query results.
Also exposes properties totalNumberOfResults, headers and rows.
"""
def __init__(self, query, limit=20, offset=0):
self.query = query
self.limit = limit
self.offset = offset
self.fetch_batch_of_results()
def fetch_batch_of_results(self):
uri = "/evaluation/submission/query?query=" + urllib.quote_plus("%s limit %s offset %s" % (self.query, self.limit, self.offset))
results = syn.restGET(uri)
self.totalNumberOfResults = results['totalNumberOfResults']
self.headers = results['headers']
self.rows = results['rows']
self.i = 0
def __iter__(self):
return self
def next(self):
if self.i >= len(self.rows):
if self.offset >= self.totalNumberOfResults:
raise StopIteration()
self.fetch_batch_of_results()
values = self.rows[self.i]['values']
self.i += 1
self.offset += 1
return values
def validate(evaluation, canCancel, dry_run=False):
if type(evaluation) != Evaluation:
evaluation = syn.getEvaluation(evaluation)
print "\n\nValidating", evaluation.id, evaluation.name
print "-" * 60
sys.stdout.flush()
for submission, status in syn.getSubmissionBundles(evaluation, status='RECEIVED'):
## refetch the submission so that we get the file path
## to be later replaced by a "downloadFiles" flag on getSubmissionBundles
submission = syn.getSubmission(submission)
ex1 = None #Must define ex1 in case there is no error
print "validating", submission.id, submission.name
try:
is_valid, validation_message = conf.validate_submission(evaluation, submission)
except Exception as ex1:
is_valid = False
print "Exception during validation:", type(ex1), ex1, ex1.message
traceback.print_exc()
validation_message = str(ex1)
status.status = "VALIDATED" if is_valid else "INVALID"
if canCancel:
status.canCancel = True
if not is_valid:
failure_reason = {"FAILURE_REASON":validation_message}
else:
failure_reason = {"FAILURE_REASON":''}
add_annotations = synapseclient.annotations.to_submission_status_annotations(failure_reason,is_private=True)
status = update_single_submission_status(status, add_annotations)
if not dry_run:
status = syn.store(status)
## send message AFTER storing status to ensure we don't get repeat messages
profile = syn.getUserProfile(submission.userId)
if is_valid:
messages.validation_passed(
userIds=[submission.userId],
username=get_user_name(profile),
queue_name=evaluation.name,
submission_id=submission.id,
submission_name=submission.name)
else:
if isinstance(ex1, AssertionError):
sendTo = [submission.userId]
username = get_user_name(profile)
else:
sendTo = conf.ADMIN_USER_IDS
username = "Challenge Administrator"
messages.validation_failed(
userIds= sendTo,
username=username,
queue_name=evaluation.name,
submission_id=submission.id,
submission_name=submission.name,
message=validation_message)
def parallel_score(submissionId,evaluation,dry_run):
submission = syn.getSubmission(submissionId)
status = syn.getSubmissionStatus(submissionId)
logFile = open(os.path.join(LOG_DIR,status['id'] + "_log.txt",'w'))
status.status = "INVALID"
try:
score, message = conf.score_submission(evaluation, submission)
logFile.write("scored: %s %s %s %s" % (submission.id,submission.name,submission.userId,str(score)))
logFile.flush()
## fill in team in submission status annotations
if 'teamId' in submission:
team = syn.restGET('/team/{id}'.format(id=submission.teamId))
if 'name' in team:
score['team'] = team['name']
else:
score['team'] = submission.teamId
elif 'userId' in submission:
profile = syn.getUserProfile(submission.userId)
score['team'] = get_user_name(profile)
else:
score['team'] = '?'
add_annotations = synapseclient.annotations.to_submission_status_annotations(score,is_private=True)
status = update_single_submission_status(status, add_annotations)
status.status = "SCORED"
### Add in DATE as a public annotation and change team annotation to not private
## if there's a table configured, update it
if not dry_run and evaluation.id in conf.leaderboard_tables:
update_leaderboard_table(conf.leaderboard_tables[evaluation.id], submission, fields=score, dry_run=False)
except Exception as ex1:
logFile.write('\n\nError scoring submission %s %s:\n' % (submission.name, submission.id))
st = StringIO()
traceback.print_exc(file=st)
# sys.stderr.write(st.getvalue())
# sys.stderr.write('\n')
message = st.getvalue()
logFile.write(message)
logFile.flush()
if conf.ADMIN_USER_IDS:
submission_info = "submission id: %s\nsubmission name: %s\nsubmitted by user id: %s\n\n" % (submission.id, submission.name, submission.userId)
messages.error_notification(userIds=conf.ADMIN_USER_IDS, message=submission_info+message)
if not dry_run:
status = syn.store(status)
## send message AFTER storing status to ensure we don't get repeat messages
profile = syn.getUserProfile(submission.userId)
if status.status == 'SCORED':
messages.scoring_succeeded(
userIds=[submission.userId],
message=message,
username=get_user_name(profile),
queue_name=evaluation.name,
submission_name=submission.name,
submission_id=submission.id)
else:
messages.scoring_error(
userIds=conf.ADMIN_USER_IDS,
message=message,
username="Challenge Administrator,",
queue_name=evaluation.name,
submission_name=submission.name,
submission_id=submission.id)
#sys.stdout.write('\n')
logFile.close()
def score(evaluation, canCancel, threads, dry_run=False):
if type(evaluation) != Evaluation:
evaluation = syn.getEvaluation(evaluation)
print '\n\nScoring ', evaluation.id, evaluation.name
print "-" * 60
#sys.stdout.flush()
allSubmissions = []
for submission, status in syn.getSubmissionBundles(evaluation, status='VALIDATED'):
## refetch the submission so that we get the file path
## to be later replaced by a "downloadFiles" flag on getSubmissionBundles
#submission = syn.getSubmission(submission)
allSubmissions.append(submission.id)
par_score_function = partial(parallel_score, evaluation=evaluation, dry_run=dry_run)
p = Pool(threads)
p.map(par_score_function,allSubmissions)
def invalidateSubmission(evaluation, dry_run=False):
if type(evaluation) != Evaluation:
evaluation = syn.getEvaluation(evaluation)
for submission, status in syn.getSubmissionBundles(evaluation):
if status.cancelRequested is True:
status.status = "INVALID"
syn.store(status)
def create_leaderboard_table(name, columns, parent, evaluation, dry_run=False):
if not dry_run:
schema = syn.store(Schema(name=name, columns=cols, parent=project))
for submission, status in syn.getSubmissionBundles(evaluation):
annotations = synapseclient.annotations.from_submission_status_annotations(status.annotations) if 'annotations' in status else {}
update_leaderboard_table(schema.id, submission, annotations, dry_run)
def update_leaderboard_table(leaderboard_table, submission, fields, dry_run=False):
"""
Insert or update a record in a leaderboard table for a submission.
:param fields: a dictionary including all scoring statistics plus the team name for the submission.
"""
## copy fields from submission
## fields should already contain scoring stats
fields['objectId'] = submission.id
fields['userId'] = submission.userId
fields['entityId'] = submission.entityId
fields['versionNumber'] = submission.versionNumber
fields['name'] = submission.name
results = syn.tableQuery("select * from %s where objectId=%s" % (leaderboard_table, submission.id), resultsAs="rowset")
rowset = results.asRowSet()
## figure out if we're inserting or updating
if len(rowset['rows']) == 0:
row = {'values':[]}
rowset['rows'].append(row)
mode = 'insert'
elif len(rowset['rows']) == 1:
row = rowset['rows'][0]
mode = 'update'
else:
## shouldn't happen
raise RuntimeError("Multiple entries in leaderboard table %s for submission %s" % (leaderboard_table,submission.id))
## build list of fields in proper order according to headers
row['values'] = [fields.get(col['name'], None) for col in rowset['headers']]
if dry_run:
print mode, "row "+row['rowId'] if 'rowId' in row else "new row", row['values']
else:
return syn.store(rowset)
def query(evaluation, columns, out=sys.stdout):
"""Test the query that will be run to construct the leaderboard"""
import pandas as pd
if type(evaluation) != Evaluation:
evaluation = syn.getEvaluation(evaluation)
## Note: Constructing the index on which the query operates is an
## asynchronous process, so we may need to wait a bit.
results = Query(query="select * from evaluation_%s where status==\"SCORED\"" % evaluation.id)
## annotate each column with it's position in the query results, if it's there
cols = copy.deepcopy(columns)
for column in cols:
if column['name'] in results.headers:
column['index'] = results.headers.index(column['name'])
indices = [column['index'] for column in cols if 'index' in column]
column_index = {column['index']:column for column in cols if 'index' in column}
def column_to_string(row, column_index, i):
if column_index[i]['columnType']=="DOUBLE":
return "%0.6f"%float(row[i])
elif column_index[i]['columnType']=="STRING":
return "\"%s\""%unicode(row[i]).encode('utf-8')
else:
return unicode(row[i]).encode('utf-8')
## print leaderboard
out.write(",".join([column['name'] for column in cols if 'index' in column]) + "\n")
for row in results:
out.write(",".join(column_to_string(row, column_index, i) for i in indices))
out.write("\n")
return(pd.DataFrame.from_records(map(lambda x: x['values'], results.rows), columns=results.headers))
def list_submissions(evaluation, status=None, **kwargs):
if isinstance(evaluation, basestring):
evaluation = syn.getEvaluation(evaluation)
print '\n\nSubmissions for: %s %s' % (evaluation.id, evaluation.name.encode('utf-8'))
print '-' * 60
for submission, status in syn.getSubmissionBundles(evaluation, status=status):
print submission.id, submission.createdOn, status.status, submission.name.encode('utf-8'), submission.userId
def list_evaluations(project):
print '\n\nEvaluations for project: ', utils.id_of(project)
print '-' * 60
evaluations = syn.getEvaluationByContentSource(project)
for evaluation in evaluations:
print "Evaluation: %s" % evaluation.id, evaluation.name.encode('utf-8')
def archive(evaluation, archiveType, destination=None, name=None, query=None):
"""
Archive the submissions for the given evaluation queue and store them in the destination synapse folder.
:param evaluation: a synapse evaluation queue or its ID
:param destination: a synapse folder or its ID
:param query: a query that will return the desired submissions. At least the ID must be returned.
defaults to _select * from evaluation_[EVAL_ID] where status=="SCORED"_.
"""
tempdir = tempfile.mkdtemp()
archive_dirname = 'submissions_%s' % utils.id_of(evaluation)
if not query:
query = 'select * from evaluation_%s where status=="SCORED"' % utils.id_of(evaluation)
## for each submission, download it's associated file and write a line of metadata
results = Query(query=query)
if 'objectId' not in results.headers:
raise ValueError("Can't find the required field \"objectId\" in the results of the query: \"{0}\"".format(query))
if archiveType == "submission":
if not name:
name = 'submissions_%s.tgz' % utils.id_of(evaluation)
tar_path = os.path.join(tempdir, name)
print "creating tar at:", tar_path
print results.headers
with tarfile.open(tar_path, mode='w:gz') as archive:
with open(os.path.join(tempdir, 'submission_metadata.csv'), 'w') as f:
f.write( (','.join(hdr for hdr in (results.headers + ['filename'])) + '\n').encode('utf-8') )
for result in results:
## retrieve file into cache and copy it to destination
submission = syn.getSubmission(result[results.headers.index('objectId')])
prefixed_filename = submission.id + "_" + os.path.basename(submission.filePath)
archive.add(submission.filePath, arcname=os.path.join(archive_dirname, prefixed_filename))
line = (','.join(unicode(item) for item in (result+[prefixed_filename]))).encode('utf-8')
print line
f.write(line + '\n')
archive.add(
name=os.path.join(tempdir, 'submission_metadata.csv'),
arcname=os.path.join(archive_dirname, 'submission_metadata.csv'))
entity = syn.store(File(tar_path, parent=destination), evaluation_id=utils.id_of(evaluation))
print("created:", entity.id, entity.name)
toReturn = entity.id
else:
toReturn = {}
for result in results:
## retrieve file into cache and copy it to destination
submission = syn.getSubmission(result[results.headers.index('objectId')])
projectEntity = Project('Archived %s %s %s %s' % (time.strftime("%Y%m%d"),submission.id,submission.entity.id,submission.entity.name))
entity = syn.store(projectEntity)
copied = synu.copy(syn, submission.entity.id, entity.id)
toReturn.update(copied)
return toReturn
## ==================================================
## Handlers for commands
## ==================================================
def command_list(args):
"""
List either the submissions to an evaluation queue or
the evaluation queues associated with a given project.
"""
if args.all:
for queue_info in conf.evaluation_queues:
list_submissions(evaluation=queue_info['id'],
status=args.status)
elif args.challenge_project:
list_evaluations(project=args.challenge_project)
elif args.evaluation:
list_submissions(evaluation=args.evaluation,
status=args.status)
else:
list_evaluations(project=conf.CHALLENGE_SYN_ID)
def command_check_status(args):
submission = syn.getSubmission(args.submission)
status = syn.getSubmissionStatus(args.submission)
evaluation = syn.getEvaluation(submission.evaluationId)
## deleting the entity key is a hack to work around a bug which prevents
## us from printing a submission
del submission['entity']
print unicode(evaluation).encode('utf-8')
print unicode(submission).encode('utf-8')
print unicode(status).encode('utf-8')
def command_reset(args):
if args.rescore_all:
for queue_info in conf.evaluation_queues:
for submission, status in syn.getSubmissionBundles(queue_info['id'], status="SCORED"):
status.status = args.status
if not args.dry_run:
print unicode(syn.store(status)).encode('utf-8')
elif args.rescore:
for queue_id in args.rescore:
for submission, status in syn.getSubmissionBundles(queue_id, status="SCORED"):
status.status = args.status
if args.dry_run:
print "dry-run: ", submission.id, status.status
else:
print "reset: ", submission.id, status.status
#print unicode(syn.store(status)).encode('utf-8')
else:
for submission in args.submission:
status = syn.getSubmissionStatus(submission)
status.status = args.status
if not args.dry_run:
print unicode(syn.store(status)).encode('utf-8')
def command_validate(args):
if args.all:
for queue_info in conf.evaluation_queues:
validate(queue_info['id'], args.canCancel, dry_run=args.dry_run)
elif args.evaluation:
validate(args.evaluation, args.canCancel, dry_run=args.dry_run)
else:
sys.stderr.write("\nValidate command requires either an evaluation ID or --all to validate all queues in the challenge")
def command_score(args):
if args.all:
for queue_info in conf.evaluation_queues:
score(queue_info['id'], args.canCancel, threads=args.threads, dry_run=args.dry_run)
elif args.evaluation:
score(args.evaluation, args.canCancel, threads=args.threads, dry_run=args.dry_run)
else:
sys.stderr.write("\Score command requires either an evaluation ID or --all to score all queues in the challenge")
def command_rank(args):
raise NotImplementedError('Implement a ranking function for your challenge')
def command_leaderboard(args):
## show columns specific to an evaluation, if available
leaderboard_cols = conf.leaderboard_columns.get(args.evaluation, conf.LEADERBOARD_COLUMNS)
## write out to file if --out args given
if args.out is not None:
with open(args.out, 'w') as f:
query(args.evaluation, columns=leaderboard_cols, out=f)
print "Wrote leaderboard out to:", args.out
else:
query(args.evaluation, columns=leaderboard_cols)
def command_archive(args):
archive(args.evaluation, args.archiveType, args.destination, name=args.name, query=args.query)
## ==================================================
## main method
## ==================================================
def main():
if conf.CHALLENGE_SYN_ID == "":
sys.stderr.write("Please configure your challenge. See sample_challenge.py for an example.")
global syn
parser = argparse.ArgumentParser()
parser.add_argument("-u", "--user", help="UserName", default=None)
parser.add_argument("-p", "--password", help="Password", default=None)
parser.add_argument("--notifications", help="Send error notifications to challenge admins", action="store_true", default=False)
parser.add_argument("--send-messages", help="Send validation and scoring messages to participants", action="store_true", default=False)
parser.add_argument("--acknowledge-receipt", help="Send confirmation message on passing validation to participants", action="store_true", default=False)
parser.add_argument("--dry-run", help="Perform the requested command without updating anything in Synapse", action="store_true", default=False)
parser.add_argument("--debug", help="Show verbose error output from Synapse API calls", action="store_true", default=False)
parser.add_argument("--threads", help="Number of parallel processes to use for validation and scoring", type=int, default=1)
subparsers = parser.add_subparsers(title="subcommand")
parser_list = subparsers.add_parser('list', help="List submissions to an evaluation or list evaluations")
parser_list.add_argument("evaluation", metavar="EVALUATION-ID", nargs='?', default=None)
parser_list.add_argument("--challenge-project", "--challenge", "--project", metavar="SYNAPSE-ID", default=None)
parser_list.add_argument("-s", "--status", default=None)
parser_list.add_argument("--all", action="store_true", default=False)
parser_list.set_defaults(func=command_list)
parser_status = subparsers.add_parser('status', help="Check the status of a submission")
parser_status.add_argument("submission")
parser_status.set_defaults(func=command_check_status)
parser_reset = subparsers.add_parser('reset', help="Reset a submission to RECEIVED for re-scoring (or set to some other status)")
parser_reset.add_argument("submission", metavar="SUBMISSION-ID", type=int, nargs='*', help="One or more submission IDs, or omit if using --rescore-all")
parser_reset.add_argument("-s", "--status", default='RECEIVED')
parser_reset.add_argument("--rescore-all", action="store_true", default=False)
parser_reset.add_argument("--rescore", metavar="EVALUATION-ID", type=int, nargs='*', help="One or more evaluation IDs to rescore")
parser_reset.set_defaults(func=command_reset)
parser_validate = subparsers.add_parser('validate', help="Validate all RECEIVED submissions to an evaluation")
parser_validate.add_argument("evaluation", metavar="EVALUATION-ID", nargs='?', default=None)
parser_validate.add_argument("--all", action="store_true", default=False)
parser_validate.add_argument("--canCancel", action="store_true", default=False)
parser_validate.set_defaults(func=command_validate)
parser_score = subparsers.add_parser('score', help="Score all VALIDATED submissions to an evaluation")
parser_score.add_argument("evaluation", metavar="EVALUATION-ID", nargs='?', default=None)
parser_score.add_argument("--all", action="store_true", default=False)
parser_score.add_argument("--canCancel", action="store_true", default=False)
parser_score.set_defaults(func=command_score)
parser_rank = subparsers.add_parser('rank', help="Rank all SCORED submissions to an evaluation")
parser_rank.add_argument("evaluation", metavar="EVALUATION-ID", default=None)
parser_rank.set_defaults(func=command_rank)
parser_archive = subparsers.add_parser('archive', help="Archive submissions to a challenge")
parser_archive.add_argument("evaluation", metavar="EVALUATION-ID", default=None)
parser_archive.add_argument("archiveType",metavar="TYPE", choices=["submission","writeup"])
parser_archive.add_argument("destination", metavar="FOLDER-ID", default=None)
parser_archive.add_argument("-q", "--query", default=None)
parser_archive.add_argument("-n", "--name", default=None)
parser_archive.set_defaults(func=command_archive)
parser_leaderboard = subparsers.add_parser('leaderboard', help="Print the leaderboard for an evaluation")
parser_leaderboard.add_argument("evaluation", metavar="EVALUATION-ID", default=None)
parser_leaderboard.add_argument("--out", default=None)
parser_leaderboard.set_defaults(func=command_leaderboard)
args = parser.parse_args()
print "\n" * 2, "=" * 75
print datetime.utcnow().isoformat()
## Acquire lock, don't run two scoring scripts at once
try:
update_lock = lock.acquire_lock_or_fail('challenge', max_age=timedelta(hours=4))
except lock.LockedException:
print u"Is the scoring script already running? Can't acquire lock."
# can't acquire lock, so return error code 75 which is a
# temporary error according to /usr/include/sysexits.h
return 75
try:
syn = synapseclient.Synapse(debug=args.debug)
if not args.user:
args.user = os.environ.get('SYNAPSE_USER', None)
if not args.password:
args.password = os.environ.get('SYNAPSE_PASSWORD', None)
syn.login(email=args.user, password=args.password)
## initialize messages
messages.syn = syn
messages.dry_run = args.dry_run
messages.send_messages = args.send_messages
messages.send_notifications = args.notifications
messages.acknowledge_receipt = args.acknowledge_receipt
args.func(args)
except Exception as ex1:
sys.stderr.write('Error in scoring script:\n')
st = StringIO()
traceback.print_exc(file=st)
sys.stderr.write(st.getvalue())
sys.stderr.write('\n')
if conf.ADMIN_USER_IDS:
messages.error_notification(userIds=conf.ADMIN_USER_IDS, message=st.getvalue(), queue_name=conf.CHALLENGE_NAME)
finally:
update_lock.release()
print "\ndone: ", datetime.utcnow().isoformat()
print "=" * 75, "\n" * 2
if __name__ == '__main__':
main()
|
|
#!/usr/bin/python
# -- Content-Encoding: UTF-8 --
"""
iPOPO component factories repository
:author: Thomas Calmant
:license: Apache Software License 2.0
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Standard library
import ast
import logging
import threading
import cohorte.repositories
from cohorte.repositories.beans import Factory
import cohorte.version
from pelix.ipopo.decorators import ComponentFactory, Provides, Invalidate, \
Property, Requires, Validate
from pelix.utilities import is_string
# Pelix
# Repository beans
# ------------------------------------------------------------------------------
# Bundle version
__version__ = cohorte.version.__version__
# ------------------------------------------------------------------------------
_logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
class ComponentFactoryVisitor(ast.NodeVisitor):
"""
AST visitor to extract imports and version
"""
# pylint: disable=invalid-name
def __init__(self):
"""
Sets up the visitor
"""
ast.NodeVisitor.__init__(self)
self.factories = set()
self.values = {}
def generic_visit(self, node):
"""
Custom default visit method that avoids to visit further that the
module level.
"""
if type(node) is ast.Module:
ast.NodeVisitor.generic_visit(self, node)
def visit_ClassDef(self, node):
"""
Found a class definition
"""
for decorator in node.decorator_list:
try:
if decorator.func.id != "ComponentFactory":
# Not a ComponentFactory decorator
continue
except AttributeError:
# Not our kind of decorator
pass
else:
name = None
if decorator.args:
# Name: First argument
argument = decorator.args[0]
else:
argument = None
if hasattr(decorator, 'kwargs'):
# Before Python 3.5
if decorator.kwargs:
argument = decorator.kwargs.get('name')
elif hasattr(decorator, 'keywords'):
# Python 3.5: kwargs dictionary replaced by a list
# of keywords
for keyword in decorator.keywords:
if keyword.arg == 'name':
argument = keyword.value
if not argument:
# Default name
name = "{0}Factory".format(node.name)
if name is None:
if hasattr(argument, 'id'):
# Constant
try:
name = self.values[argument.id]
except KeyError:
_logger.debug("Factory name '%s' is unknown (%s)",
argument.id, node.name)
else:
# Literal
try:
name = ast.literal_eval(argument)
except (ValueError, SyntaxError) as ex:
_logger.debug(
"Invalid factory name for class %s: %s",
node.name, ex)
if name is not None:
# Store the factory name
self.factories.add(name)
def visit_Assign(self, node):
"""
Found an assignment
"""
field = getattr(node.targets[0], 'id', None)
if field:
try:
value = ast.literal_eval(node.value)
if is_string(value):
self.values[field] = value
except (ValueError, SyntaxError):
# Ignore errors
pass
def _extract_module_factories(filename):
"""
Extract the version and the imports from the given Python file
:param filename: Path to the file to parse
:return: A (version, [imports]) tuple
:raise ValueError: Unreadable file
"""
visitor = ComponentFactoryVisitor()
try:
with open(filename,encoding="utf8") as filep:
source = filep.read()
except (OSError, IOError,TypeError) as ex:
try:
import io
with io.open(filename,encoding="utf8") as filep:
source = filep.read()
except (OSError, IOError) as ex2:
_logger.exception(ex2)
raise ValueError("Error reading {0}: {1}".format(filename, ex2))
try:
module = ast.parse(source, filename, 'exec')
except (ValueError, SyntaxError) as ex:
raise ValueError("Error parsing {0}: {1}".format(filename, ex))
try:
visitor.visit(module)
except Exception as ex:
raise ValueError("Error visiting {0}: {1}".format(filename, ex))
return visitor.factories
# ------------------------------------------------------------------------------
@ComponentFactory("cohorte-repository-factories-ipopo-factory")
@Provides(cohorte.repositories.SERVICE_REPOSITORY_FACTORIES,
controller="_controller")
@Requires('_repositories', cohorte.repositories.SERVICE_REPOSITORY_ARTIFACTS,
True, False,
"({0}=python)".format(cohorte.repositories.PROP_REPOSITORY_LANGUAGE))
@Property('_model', cohorte.repositories.PROP_FACTORY_MODEL, "ipopo")
@Property('_language', cohorte.repositories.PROP_REPOSITORY_LANGUAGE, "python")
class IPopoRepository(object):
"""
Represents a repository
"""
def __init__(self):
"""
Sets up the repository
"""
# Properties
self._model = 'ipopo'
self._language = 'python'
# Service controller
self._controller = False
# Injected service
self._repositories = []
# Name -> [Factories]
self._factories = {}
# Artifact -> [Factories]
self._artifacts = {}
# Some locking
self.__lock = threading.RLock()
def __contains__(self, item):
"""
Tests if the given item is in the repository
:param item: Item to be tested
:return: True if the item is in the repository
"""
if isinstance(item, Factory):
# Test artifact model
if item.model != self._model:
return False
# Test if the name is in the factories
return item.name in self._factories
elif item in self._factories:
# Item matches a factory name
return True
# No match
return False
def __len__(self):
"""
Length of a repository <=> number of individual factories
"""
return sum((len(factories) for factories in self._factories.values()))
def add_artifact(self, artifact):
"""
Adds the factories provided by the given artifact
:param artifact: A Python Module artifact
:raise ValueError: Unreadable file
"""
with self.__lock:
# Extract factories
names = _extract_module_factories(artifact.file)
artifact_list = self._artifacts.setdefault(artifact, [])
for name in names:
# Make the bean
factory = Factory(name, self._language, self._model, artifact)
# Factory
factory_list = self._factories.setdefault(name, [])
if factory not in factory_list:
factory_list.append(factory)
# Artifact
if factory not in artifact_list:
artifact_list.append(factory)
def clear(self):
"""
Clears the repository content
"""
with self.__lock:
self._artifacts.clear()
self._factories.clear()
def find_factories(self, factories):
"""
Returns the list of artifacts that provides the given factories
:param factories: A list of iPOPO factory names
:return: A tuple ({Name -> [Artifacts]}, [Not found factories])
"""
with self.__lock:
factories_set = set(factories)
resolution = {}
unresolved = set()
if not factories:
# Nothing to do...
return resolution, factories_set
for name in factories_set:
try:
# Get the list of factories for this name
factories = self._factories[name]
providers = resolution.setdefault(name, [])
providers.extend(factory.artifact for factory in factories)
except KeyError:
# Factory name not found
unresolved.add(name)
# Sort the artifacts
for artifacts in resolution.values():
artifacts.sort(reverse=True)
return resolution, unresolved
def find_factory(self, factory, artifact_name=None, artifact_version=None):
"""
Find the artifacts that provides the given factory, filtered by name
and version.
:return: The list of artifacts providing the factory, sorted by name
and version
:raise KeyError: Unknown factory
"""
with self.__lock:
# Copy the list of artifacts for this factory
artifacts = [factory.artifact
for factory in self._factories[factory]]
if artifact_name is not None:
# Artifact must be selected
# Prepare the version bean
version = cohorte.repositories.beans.Version(artifact_version)
# Filter results
artifacts = [artifact for artifact in artifacts
if artifact.name == artifact_name and
version.matches(artifact.version)]
if not artifacts:
# No match found
raise KeyError("No matching artifact for {0} -> {1} {2}"
.format(factory, artifact_name, version))
# Sort results
artifacts.sort(reverse=True)
return artifacts
def get_language(self):
"""
Retrieves the language of the artifacts stored in this repository
"""
return self._language
def get_model(self):
"""
Retrieves the component model that can handle the factories of this
repository
"""
return self._model
def load_repositories(self):
"""
Loads the factories according to the repositories
"""
with self.__lock:
if not self._repositories:
# No repository
return
# Walk through artifacts
for repository in self._repositories:
for artifact in repository.walk():
try:
self.add_artifact(artifact)
except ValueError as ex:
# Log the exception instead of stopping here
_logger.warning("Error reading artifact: %s",
ex, exc_info=True)
def __initial_loading(self):
"""
Initial repository loading
"""
self.load_repositories()
self._controller = True
@Validate
def validate(self, context):
"""
Component validated
"""
self._controller = False
# Load repositories in another thread
threading.Thread(target=self.__initial_loading,
name="iPOPO-repository-loader").start()
@Invalidate
def invalidate(self, context):
"""
Component invalidated
"""
self.clear()
|
|
import functools
import numpy as np
import pytest
import tensorflow as tf
from tfsnippet.utils import get_static_shape, ensure_variables_initialized
from tfsnippet.variational import *
from tfsnippet.variational.estimators import (_vimco_replace_diag,
_vimco_control_variate)
def prepare_test_payload(is_reparameterized):
np.random.seed(1234)
x = tf.constant(np.random.normal(size=[7, 13]), dtype=tf.float32) # input
y = tf.constant(np.random.normal(size=[13]), dtype=tf.float32) # param
if is_reparameterized:
z = y * x # sample
else:
z = tf.stop_gradient(y) * x
f = tf.exp(y * z)
log_f = y * z
log_q = (x ** 2 - 1) * (y ** 3)
return x, y, z, f, log_f, log_q
class SGVBEstimatorTestCase(tf.test.TestCase):
def test_sgvb(self):
assert_allclose = functools.partial(
np.testing.assert_allclose, rtol=1e-5, atol=1e-6)
with self.test_session() as sess:
x, y, z, f, log_f, log_q = \
prepare_test_payload(is_reparameterized=True)
cost = sgvb_estimator(f)
cost_shape = cost.get_shape().as_list()
assert_allclose(*sess.run([
tf.gradients([cost], [y])[0],
tf.reduce_sum(2 * x * y * f, axis=0)
]))
cost_r = sgvb_estimator(f, axis=0)
self.assertListEqual(
cost_shape[1:], cost_r.get_shape().as_list())
assert_allclose(*sess.run([
tf.gradients([cost_r], [y])[0],
tf.reduce_sum(2 * x * y * f, axis=0) / 7
]))
cost_rk = sgvb_estimator(f, axis=0, keepdims=True)
self.assertListEqual(
[1] + cost_shape[1:], cost_rk.get_shape().as_list())
assert_allclose(*sess.run([
tf.gradients([cost_rk], [y])[0],
tf.reduce_sum(2 * x * y * f, axis=0) / 7
]))
class IWAEEstimatorTestCase(tf.test.TestCase):
def test_error(self):
with pytest.raises(ValueError,
match='iwae estimator requires multi-samples of '
'latent variables'):
x, y, z, f, log_f, log_q = \
prepare_test_payload(is_reparameterized=True)
_ = iwae_estimator(log_f, axis=None)
def test_iwae(self):
assert_allclose = functools.partial(
np.testing.assert_allclose, rtol=1e-5, atol=1e-6)
with self.test_session() as sess:
x, y, z, f, log_f, log_q = \
prepare_test_payload(is_reparameterized=True)
wk_hat = f / tf.reduce_sum(f, axis=0, keepdims=True)
cost = iwae_estimator(log_f, axis=0)
cost_shape = cost.get_shape().as_list()
assert_allclose(*sess.run([
tf.gradients([cost], [y])[0],
tf.reduce_sum(wk_hat * (2 * x * y), axis=0)
]))
cost_k = iwae_estimator(log_f, axis=0, keepdims=True)
self.assertListEqual(
[1] + cost_shape, cost_k.get_shape().as_list())
assert_allclose(*sess.run([
tf.gradients([cost], [y])[0],
tf.reduce_sum(wk_hat * (2 * x * y), axis=0)
]))
class NVILEstimatorTestCase(tf.test.TestCase):
def test_error(self):
x, y, z, f, log_f, log_q = \
prepare_test_payload(is_reparameterized=False)
with pytest.raises(ValueError,
match='`baseline` is not specified, thus '
'`center_by_moving_average` must be False'):
_ = nvil_estimator(log_f, log_q, center_by_moving_average=False)
with pytest.raises(ValueError,
match='The shape of `values` after `batch_axis` '
'having been reduced must be static'):
_ = nvil_estimator(
tf.placeholder(dtype=tf.float32, shape=[None, None]),
log_q,
batch_axis=-1
)
def test_nvil(self):
assert_allclose = functools.partial(
np.testing.assert_allclose, rtol=1e-5, atol=1e-6)
with self.test_session() as sess:
x, y, z, f, log_f, log_q = \
prepare_test_payload(is_reparameterized=False)
baseline = 3.14 * tf.cos(y)
alt_f = tf.exp(2 * y * z)
# baseline is None, center by moving average, no sampling
cost, baseline_cost = nvil_estimator(
values=f,
latent_log_joint=log_q
)
self.assertIsNone(baseline_cost)
var_count = len(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES))
moving_mean = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)[0]
self.assertEqual(moving_mean.name, 'nvil_estimator/moving_mean:0')
self.assertEqual(get_static_shape(moving_mean), (1, 1))
ensure_variables_initialized()
sess.run(tf.assign(moving_mean, [[6.]]))
cost_shape = cost.get_shape().as_list()
moving_mean = 4.8 + .2 * tf.reduce_mean(f)
assert_allclose(*sess.run([
tf.gradients([cost], [y])[0],
tf.reduce_sum(
z * f +
(f - moving_mean) * (3 * (x ** 2 - 1) * (y ** 2)),
axis=0)
]))
# baseline is given, no center by moving average
cost, baseline_cost = nvil_estimator(
values=f,
latent_log_joint=log_q,
baseline=baseline,
center_by_moving_average=False,
)
self.assertEqual(
len(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)),
var_count
)
self.assertListEqual(cost.get_shape().as_list(), cost_shape)
assert_allclose(*sess.run([
tf.gradients([cost], [y])[0],
tf.reduce_sum(
z * f +
(f - 3.14 * tf.cos(y)) * (3 * (x ** 2 - 1) * (y ** 2)),
axis=0)
]))
assert_allclose(*sess.run([
tf.gradients([baseline_cost], [y])[0],
# -2 * (f(x,z) - C(x)) * C'(x)
tf.reduce_sum(
-2 * (f - baseline) * (-3.14 * tf.sin(y)),
axis=0
)
]))
# baseline is given, no center by moving average, axis = [0]
cost, baseline_cost = nvil_estimator(
values=f,
latent_log_joint=log_q,
baseline=baseline,
center_by_moving_average=False,
axis=[0]
)
self.assertEqual(
len(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)),
var_count
)
self.assertListEqual(cost.get_shape().as_list(), cost_shape[1:])
assert_allclose(*sess.run([
tf.gradients([cost], [y])[0],
tf.reduce_sum(
z * f +
(f - 3.14 * tf.cos(y)) * (3 * (x ** 2 - 1) * (y ** 2)),
axis=0) / 7
]))
assert_allclose(*sess.run([
tf.gradients([baseline_cost], [y])[0],
# -2 * (f(x,z) - C(x)) * C'(x)
tf.reduce_sum(
-2 * (f - baseline) * (-3.14 * tf.sin(y)),
axis=0) / 7
]))
def log_mean_exp(x, axis, keepdims=False):
x_max = np.max(x, axis=axis, keepdims=True)
x_max_reduced = x_max if keepdims else np.squeeze(x_max, axis=axis)
out = x_max_reduced + np.log(
np.mean(np.exp(x - x_max), axis=axis, keepdims=keepdims))
return out
def slice_at(arr, axis, start, stop=None, step=None):
if axis < 0:
axis += len(arr.shape)
s = (slice(None, None, None),) * axis + (slice(start, stop, step),)
return arr[s]
def vimco_control_variate(log_f, axis):
K = log_f.shape[axis]
mean_except_k = (np.sum(log_f, axis=axis, keepdims=True) - log_f) / (K - 1)
def sub_k(k):
tmp = np.concatenate(
[slice_at(log_f, axis, 0, k),
slice_at(mean_except_k, axis, k, k + 1),
slice_at(log_f, axis, k+1)],
axis=axis
)
return log_mean_exp(tmp, axis=axis, keepdims=True)
return np.concatenate([sub_k(k) for k in range(K)], axis=axis)
class VIMCOEstimatorTestCase(tf.test.TestCase):
def test_vimco_replace_diag(self):
with self.test_session() as sess:
# 2-d
x = tf.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
y = tf.constant([[10], [11], [12]])
z = sess.run(_vimco_replace_diag(x, y, -2))
np.testing.assert_equal(z, [[10, 2, 3], [4, 11, 6], [7, 8, 12]])
# 4-d
x = np.arange(4 * 3 * 3 * 5, dtype=np.int32).reshape([4, 3, 3, 5])
y = -np.arange(4 * 3 * 1 * 5, dtype=np.int32).reshape([4, 3, 1, 5])
x_ph = tf.placeholder(tf.int32, [None] * 4)
y_ph = tf.placeholder(tf.int32, [None, None, 1, None])
diag_mask = np.eye(3, 3).reshape([1, 3, 3, 1])
z = sess.run(_vimco_replace_diag(
tf.convert_to_tensor(x_ph), tf.convert_to_tensor(y_ph), -3),
feed_dict={x_ph: x, y_ph: y}
)
np.testing.assert_equal(z, x * (1 - diag_mask) + y * diag_mask)
def test_vimco_control_variate(self):
with self.test_session() as sess:
np.random.seed(1234)
log_f = np.random.randn(4, 5, 6, 7).astype(np.float64)
log_f_ph = tf.placeholder(tf.float64, [None] * 4)
rank = len(log_f.shape)
for axis in range(rank):
out = sess.run(_vimco_control_variate(log_f, axis=axis - rank))
out2 = sess.run(
_vimco_control_variate(log_f_ph, axis=axis - rank),
feed_dict={log_f_ph: log_f}
)
ans = vimco_control_variate(log_f, axis=axis - rank)
np.testing.assert_allclose(out, ans)
np.testing.assert_allclose(out2, ans)
def test_error(self):
x, y, z, f, log_f, log_q = \
prepare_test_payload(is_reparameterized=False)
with pytest.raises(ValueError,
match='vimco_estimator requires multi-samples of '
'latent variables'):
_ = vimco_estimator(log_f, log_q, axis=None)
with pytest.raises(TypeError,
match=r'vimco_estimator only supports integer '
r'`axis`: got \[0, 1\]'):
_ = vimco_estimator(log_f, log_q, axis=[0, 1])
with pytest.raises(ValueError,
match='`axis` out of range: rank 2 vs axis 2'):
_ = vimco_estimator(log_f, log_q, axis=2)
with pytest.raises(ValueError,
match='`axis` out of range: rank 2 vs axis -3'):
_ = vimco_estimator(log_f, log_q, axis=-3)
with pytest.raises(ValueError,
match='vimco_estimator only supports `log_values` '
'with deterministic ndims'):
_ = vimco_estimator(
tf.placeholder(tf.float32, None),
tf.zeros([1, 2]),
axis=0
)
with pytest.raises(ValueError,
match='VIMCO requires sample size >= 2: '
'sample axis is 0'):
_ = vimco_estimator(
tf.placeholder(tf.float32, [1, None]),
tf.zeros([1, 2]),
axis=0
)
with pytest.raises(Exception,
match='VIMCO requires sample size >= 2: '
'sample axis is 1'):
ph = tf.placeholder(tf.float32, [3, None])
with tf.Session() as sess:
sess.run(vimco_estimator(ph, tf.zeros([3, 1]), axis=1),
feed_dict={ph: np.zeros([3, 1])})
def test_vimco(self):
assert_allclose = functools.partial(
np.testing.assert_allclose, rtol=1e-5, atol=1e-6)
with self.test_session() as sess:
x, y, z, f, log_f, log_q = \
prepare_test_payload(is_reparameterized=False)
# compute the gradient
x_out, y_out, z_out, f_out, log_f_out, log_q_out = \
sess.run([x, y, z, f, log_f, log_q])
log_q_grad_out = (x_out ** 2 - 1) * 3 * (y_out ** 2)
log_f_out = y_out * z_out
t = np.sum(
log_q_grad_out * (
log_mean_exp(log_f_out, axis=0, keepdims=True) -
vimco_control_variate(log_f_out, axis=0)
),
axis=0
)
w_k_hat = f_out / np.sum(f_out, axis=0, keepdims=True)
log_f_grad_out = z_out
t += np.sum(
w_k_hat * log_f_grad_out,
axis=0
)
cost = vimco_estimator(log_f, log_q, axis=0)
cost_shape = cost.get_shape().as_list()
assert_allclose(sess.run(tf.gradients([cost], [y])[0]), t)
cost_k = vimco_estimator(log_f, log_q, axis=0, keepdims=True)
self.assertListEqual(
[1] + cost_shape, cost_k.get_shape().as_list())
assert_allclose(sess.run(tf.gradients([cost], [y])[0]), t)
|
|
import re
from datetime import datetime
from django import forms
from django.conf import settings
from django.contrib.auth import authenticate, forms as auth_forms
from django.contrib.auth.forms import (PasswordResetForm as
DjangoPasswordResetForm)
from django.contrib.auth.models import User
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.models import get_current_site
from django.core.cache import cache
from django.utils.http import int_to_base36
from django.utils.translation import ugettext as _, ugettext_lazy as _lazy
from kitsune.sumo import email_utils
from kitsune.sumo.urlresolvers import reverse
from kitsune.sumo.widgets import ImageWidget
from kitsune.upload.forms import clean_image_extension
from kitsune.upload.utils import check_file_size, FileTooLargeError
from kitsune.users.models import Profile
from kitsune.users.widgets import FacebookURLWidget
from kitsune.users.widgets import MonthYearWidget
USERNAME_INVALID = _lazy(u'Username may contain only English letters, '
'numbers and ./-/_ characters.')
USERNAME_REQUIRED = _lazy(u'Username is required.')
USERNAME_SHORT = _lazy(u'Username is too short (%(show_value)s characters). '
'It must be at least %(limit_value)s characters.')
USERNAME_LONG = _lazy(u'Username is too long (%(show_value)s characters). '
'It must be %(limit_value)s characters or less.')
EMAIL_REQUIRED = _lazy(u'Email address is required.')
EMAIL_SHORT = _lazy(u'Email address is too short (%(show_value)s characters). '
'It must be at least %(limit_value)s characters.')
EMAIL_LONG = _lazy(u'Email address is too long (%(show_value)s characters). '
'It must be %(limit_value)s characters or less.')
PASSWD_REQUIRED = _lazy(u'Password is required.')
PASSWD2_REQUIRED = _lazy(u'Please enter your password twice.')
PASSWD_MIN_LENGTH = 8
PASSWD_MIN_LENGTH_MSG = _lazy('Password must be 8 or more characters.')
# Enforces at least one digit and at least one alpha character.
password_re = re.compile(r'(?=.*\d)(?=.*[a-zA-Z])')
class SettingsForm(forms.Form):
forums_watch_new_thread = forms.BooleanField(
required=False, initial=True,
label=_lazy(u'Watch forum threads I start'))
forums_watch_after_reply = forms.BooleanField(
required=False, initial=True,
label=_lazy(u'Watch forum threads I comment in'))
kbforums_watch_new_thread = forms.BooleanField(
required=False, initial=True,
label=_lazy(u'Watch KB discussion threads I start'))
kbforums_watch_after_reply = forms.BooleanField(
required=False, initial=True,
label=_lazy(u'Watch KB discussion threads I comment in'))
questions_watch_after_reply = forms.BooleanField(
required=False, initial=True,
label=_lazy(u'Watch Question threads I comment in'))
email_private_messages = forms.BooleanField(
required=False, initial=True,
label=_lazy(u'Send emails for private messages'))
def save_for_user(self, user):
for field in self.fields.keys():
value = str(self.cleaned_data[field])
setting = user.settings.filter(name=field)
update_count = setting.update(value=value)
if update_count == 0:
# This user didn't have this setting so create it.
user.settings.create(name=field, value=value)
class RegisterForm(forms.ModelForm):
"""A user registration form that requires unique email addresses.
The default Django user creation form does not require an email address,
let alone that it be unique. This form does, and sets a minimum length
for usernames.
"""
username = forms.RegexField(
label=_lazy(u'Username:'), max_length=30, min_length=4,
regex=r'^[\w.-]+$',
help_text=_lazy(u'Required. 30 characters or fewer. Letters, digits '
u'and ./- only.'),
error_messages={'invalid': USERNAME_INVALID,
'required': USERNAME_REQUIRED,
'min_length': USERNAME_SHORT,
'max_length': USERNAME_LONG})
email = forms.EmailField(
label=_lazy(u'Email address:'),
error_messages={'required': EMAIL_REQUIRED,
'min_length': EMAIL_SHORT,
'max_length': EMAIL_LONG})
password = forms.CharField(
label=_lazy(u'Password:'),
min_length=PASSWD_MIN_LENGTH,
widget=forms.PasswordInput(render_value=False),
error_messages={'required': PASSWD_REQUIRED,
'min_length': PASSWD_MIN_LENGTH_MSG})
interested = forms.BooleanField(required=False)
class Meta(object):
model = User
fields = ('email', 'username', 'password',)
def clean(self):
super(RegisterForm, self).clean()
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
_check_password(password)
_check_username(username)
return self.cleaned_data
def clean_email(self):
email = self.cleaned_data['email']
if User.objects.filter(email=email).exists():
raise forms.ValidationError(_('A user with that email address '
'already exists.'))
return email
def __init__(self, request=None, *args, **kwargs):
super(RegisterForm, self).__init__(request, auto_id='id_for_%s',
*args, **kwargs)
class AuthenticationForm(auth_forms.AuthenticationForm):
"""Overrides the default django form.
* Doesn't prefill password on validation error.
* Allows logging in inactive users (initialize with `only_active=False`).
"""
username = forms.CharField(
label=_lazy(u'Username:'),
error_messages={'required': USERNAME_REQUIRED})
password = forms.CharField(
label=_lazy(u'Password:'),
widget=forms.PasswordInput(render_value=False),
error_messages={'required': PASSWD_REQUIRED})
def __init__(self, request=None, only_active=True, *args, **kwargs):
self.only_active = only_active
super(AuthenticationForm, self).__init__(request, *args, **kwargs)
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
if username and password:
self.user_cache = authenticate(username=username,
password=password)
if self.user_cache is None:
raise forms.ValidationError(
_('Please enter a correct username and password. Note '
'that both fields are case-sensitive.'))
elif self.only_active and not self.user_cache.is_active:
raise forms.ValidationError(_('This account is inactive.'))
if self.request:
if not self.request.session.test_cookie_worked():
raise forms.ValidationError(
_("Your Web browser doesn't appear to have cookies "
"enabled. Cookies are required for logging in."))
return self.cleaned_data
class ProfileForm(forms.ModelForm):
"""The form for editing the user's profile."""
involved_from = forms.DateField(
required=False,
label=_lazy(u'Involved with Mozilla from'),
widget=MonthYearWidget(years=range(1998, datetime.today().year + 1),
required=False))
class Meta(object):
model = Profile
fields = ('name', 'public_email', 'bio', 'website', 'twitter',
'facebook', 'mozillians', 'irc_handle', 'timezone', 'country', 'city',
'locale', 'involved_from')
widgets = {
'facebook': FacebookURLWidget,
}
def clean_facebook(self):
facebook = self.cleaned_data['facebook']
if facebook and not re.match(FacebookURLWidget.pattern, facebook):
raise forms.ValidationError(_(u'Please enter a facebook.com URL.'))
return facebook
class AvatarForm(forms.ModelForm):
"""The form for editing the user's avatar."""
avatar = forms.ImageField(required=True, widget=ImageWidget)
def __init__(self, *args, **kwargs):
super(AvatarForm, self).__init__(*args, **kwargs)
self.fields['avatar'].help_text = (
_('Your avatar will be resized to {size}x{size}').format(
size=settings.AVATAR_SIZE))
class Meta(object):
model = Profile
fields = ('avatar',)
def clean_avatar(self):
if not ('avatar' in self.cleaned_data and self.cleaned_data['avatar']):
return self.cleaned_data['avatar']
try:
check_file_size(self.cleaned_data['avatar'],
settings.MAX_AVATAR_FILE_SIZE)
except FileTooLargeError as e:
raise forms.ValidationError(e.args[0])
clean_image_extension(self.cleaned_data.get('avatar'))
return self.cleaned_data['avatar']
class EmailConfirmationForm(forms.Form):
"""A simple form that requires an email address."""
email = forms.EmailField(label=_lazy(u'Email address:'))
class EmailChangeForm(forms.Form):
"""A simple form that requires an email address and validates that it is
not the current user's email."""
email = forms.EmailField(label=_lazy(u'Email address:'))
def __init__(self, user, *args, **kwargs):
super(EmailChangeForm, self).__init__(*args, **kwargs)
self.user = user
def clean_email(self):
email = self.cleaned_data['email']
if self.user.email == email:
raise forms.ValidationError(_('This is your current email.'))
if User.objects.filter(email=email).exists():
raise forms.ValidationError(_('A user with that email address '
'already exists.'))
return self.cleaned_data['email']
class SetPasswordForm(auth_forms.SetPasswordForm):
new_password1 = forms.CharField(
label=_lazy(u'New password:'),
min_length=PASSWD_MIN_LENGTH,
widget=forms.PasswordInput(render_value=False),
error_messages={'required': PASSWD_REQUIRED,
'min_length': PASSWD_MIN_LENGTH_MSG})
def clean(self):
super(SetPasswordForm, self).clean()
_check_password(self.cleaned_data.get('new_password1'))
return self.cleaned_data
class PasswordChangeForm(auth_forms.PasswordChangeForm):
new_password1 = forms.CharField(
label=_lazy(u'New password:'),
min_length=PASSWD_MIN_LENGTH,
widget=forms.PasswordInput(render_value=False),
error_messages={'required': PASSWD_REQUIRED,
'min_length': PASSWD_MIN_LENGTH_MSG})
def clean(self):
super(PasswordChangeForm, self).clean()
_check_password(self.cleaned_data.get('new_password1'))
return self.cleaned_data
class ForgotUsernameForm(forms.Form):
"""A simple form to retrieve username.
Requires an email address."""
email = forms.EmailField(label=_lazy(u'Email address:'))
def clean_email(self):
"""
Validates that an active user exists with the given e-mail address.
"""
email = self.cleaned_data["email"]
try:
self.user = User.objects.get(email__iexact=email, is_active=True)
except User.DoesNotExist:
raise forms.ValidationError(
_(u"That e-mail address doesn't have an associated user "
u"account. Are you sure you've registered?"))
return email
def save(self, text_template='users/email/forgot_username.ltxt',
html_template='users/email/forgot_username.html', use_https=False,
request=None):
"""Sends email with username."""
user = self.user
current_site = get_current_site(request)
site_name = current_site.name
domain = current_site.domain
@email_utils.safe_translation
def _send_mail(locale, user, context):
subject = _('Your username on %s') % site_name
mail = email_utils.make_mail(
subject=subject,
text_template=text_template,
html_template=html_template,
context_vars=context,
from_email=settings.TIDINGS_FROM_ADDRESS,
to_email=user.email)
email_utils.send_messages([mail])
c = {
'email': user.email,
'domain': domain,
'login_url': reverse('users.login'),
'site_name': site_name,
'username': user.username,
'protocol': use_https and 'https' or 'http'}
# The user is not logged in, the user object comes from the
# supplied email address, and is filled in by `clean_email`. If
# an invalid email address was given, an exception would have
# been raised already.
locale = user.profile.locale or settings.WIKI_DEFAULT_LANGUAGE
_send_mail(locale, user, c)
class PasswordResetForm(DjangoPasswordResetForm):
def save(self, domain_override=None,
subject_template_name='registration/password_reset_subject.txt',
text_template=None,
html_template=None,
use_https=False, token_generator=default_token_generator,
from_email=None, request=None):
"""
Based off of django's but handles html and plain-text emails.
"""
users = User.objects.filter(
email__iexact=self.cleaned_data["email"], is_active=True)
for user in users:
if not domain_override:
current_site = get_current_site(request)
site_name = current_site.name
domain = current_site.domain
else:
site_name = domain = domain_override
c = {
'email': user.email,
'domain': domain,
'site_name': site_name,
'uid': int_to_base36(user.id),
'user': user,
'token': token_generator.make_token(user),
'protocol': use_https and 'https' or 'http',
}
subject = email_utils.render_email(subject_template_name, c)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
@email_utils.safe_translation
def _make_mail(locale):
mail = email_utils.make_mail(
subject=subject,
text_template=text_template,
html_template=html_template,
context_vars=c,
from_email=from_email,
to_email=user.email)
return mail
if request:
locale = request.LANGUAGE_CODE
else:
locale = settings.WIKI_DEFAULT_LANGUAGE
email_utils.send_messages([_make_mail(locale)])
def _check_password(password):
if password: # Oddly, empty password validation happens after this.
if not password_re.search(password):
msg = _('At least one number and one English letter are required '
'in the password.')
raise forms.ValidationError(msg)
USERNAME_CACHE_KEY = 'username-blacklist'
def username_allowed(username):
if not username:
return False
"""Returns True if the given username is not a blatent bad word."""
blacklist = cache.get(USERNAME_CACHE_KEY)
if blacklist is None:
f = open(settings.USERNAME_BLACKLIST, 'r')
blacklist = [w.strip() for w in f.readlines()]
cache.set(USERNAME_CACHE_KEY, blacklist, 60 * 60) # 1 hour
# Lowercase
username = username.lower()
# Add lowercased and non alphanumerics to start.
usernames = set([username, re.sub("\W", "", username)])
# Add words split on non alphanumerics.
for u in re.findall(r'\w+', username):
usernames.add(u)
# Do any match the bad words?
return not usernames.intersection(blacklist)
def _check_username(username):
if username and not username_allowed(username):
msg = _('The user name you entered is inappropriate. Please pick '
'another and consider that our helpers are other Firefox '
'users just like you.')
raise forms.ValidationError(msg)
|
|
#!/usr/bin/python
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import nuodbawsquickstart
import json
import os
import sys
import time
import unicodedata
import urllib2
INSTANCE_TYPE = "m4.xlarge"
NUODB_DOWNLOAD_URL = "http://download.nuohub.org/nuodb-%s.x86_64.rpm"
def save_config(config, file):
with open(file, 'wt') as f:
f.write(json.dumps(config, indent=4, sort_keys=True))
def user_prompt(prompt, valid_choices = [], default = None):
if default != None:
prompt = "%s [%s] " % (prompt, str(default))
val = raw_input(prompt).strip()
if len(valid_choices) == 0:
if default == None:
return val
else:
return default
for choice in valid_choices:
if val == str(choice):
return choice
valid_strings = []
#Handle integer inputs
for choice in valid_choices:
valid_strings.append(str(choice))
print "Invalid choice. Your choices are: [" + ",".join(valid_strings) + "]"
return user_prompt(prompt, valid_choices)
def choose_from_list(params = [], suggested = None):
# returns index of the list you gave me
i = 0
options = []
while i < len(params):
if suggested != None and suggested == i:
suggest_prompt = "<----- SUGGESTED"
else:
suggest_prompt = ""
#print "%s) %s %s" % (i+1, params[i], suggest_prompt)
print '{:2d}) {:25} {}'.format(i+1, params[i], suggest_prompt)
i += 1
options.append(i)
return user_prompt("Choose one:", options) - 1
def choose_multiple_from_list(params = []):
# returns list of indicies from parameters sent
tally = []
while True:
list_to_send = []
for idx, param in enumerate(params):
if idx not in tally:
list_to_send.append(param)
if len(list_to_send) == 0:
return tally
else:
list_to_send.append("DONE CHOOSING")
result = choose_from_list(list_to_send)
if result == len(list_to_send) - 1:
return tally
else:
choice = list_to_send[result]
for idx, param in enumerate(params):
if choice == param:
tally.append(idx)
def get_instance_type(c):
return INSTANCE_TYPE
def get_regions(c):
regions = []
for region in nuodbawsquickstart.Zone("us-east-1").connect(c["aws_access_key"], c["aws_secret"]).get_all_regions():
regions.append(region.name)
return regions
def get_zone_info(c, useCustomAmi=False):
# Find our how many regions
r = {}
available_zones = get_regions(c)
zonecount = len(available_zones)
zone_count_prompt = user_prompt("How many AWS regions? (1-%i)? " % zonecount, range(1,zonecount + 1))
if zone_count_prompt == str(zonecount):
for zone in available_zones:
r[zone] = {}
else:
i = 0
while i < int(zone_count_prompt):
regionlist = []
for zone in available_zones:
if zone not in r:
regionlist.append(zone)
get = int(choose_from_list(sorted(regionlist)))
r[sorted(regionlist)[get]] = {}
i += 1
# amazon has a ton of amis named the same thing. Choose the latest one. Only reliable way I can find is to scrape their wiki. Cache this.
page_cache = unicodedata.normalize("NFKD", unicode(urllib2.urlopen("http://aws.amazon.com/amazon-linux-ami/").read(), "utf-8"))
for region in r:
# Server count
r[region]["servers"] = user_prompt(region + " --- How many servers? (1-20) ", range(1,20))
zone_obj = nuodbawsquickstart.Zone(region)
zone_conn = zone_obj.connect(c["aws_access_key"], c["aws_secret"])
# Validate SSH Key
keypairs = zone_conn.get_all_key_pairs()
keynames = []
if len(keypairs) == 0:
print "Cannot find any key pairs in region %s. Please add a keypair to this region and then re-run this script." % region
sys.exit(2)
for keypair in keypairs:
keynames.append(keypair.name)
print region + " --- Choose a keypair:"
r[region]['keypair'] = keynames[choose_from_list(keynames)]
# Choose AMI
print region + " --- Determining AMIs (Loading...) "
amis = zone_obj.amis
ami_dict = {}
suggested = None
for ami in amis:
if ami.architecture == "x86_64" and ami.description != None and len(ami.description) > 0 and "ami-" in ami.id and ami.platform != "windows":
if ami.owner_alias != None:
if ami.owner_alias.encode('utf-8') == u"amazon" and ami.id in page_cache:
ami_dict[" ".join([ami.id.encode('utf-8'), ami.description.encode('utf-8')])] = {"id": ami.id, "location": ami.location}
elif ami.owner_alias.encode('utf8') != u"amazon":
ami_dict[" ".join([ami.id.encode('utf-8'), ami.description.encode('utf-8')])] = {"id": ami.id, "location": ami.location}
ami_descriptions = sorted(ami_dict.keys())
ami_descriptions.append("NONE OF THE ABOVE")
chosen_ami = None
if not useCustomAmi:
for idx, desc in enumerate(ami_descriptions):
if "HVM GP2" in desc:
chosen_ami = ami_dict[desc]['id']
suggested = idx
r[region]["ami"] = chosen_ami
if chosen_ami == None:
ami_choice = choose_from_list(ami_descriptions, suggested)
if ami_choice == len(ami_descriptions) - 1:
print region + " --- Choose the AMI (Loading...) "
ami_enter = None
ami_chosen = False
while not ami_chosen:
ami_enter = user_prompt("Enter the AMI you want to use (ami-xxxxxxxx): ")
if not zone_obj.does_ami_exist(imageid=ami_enter):
print "Invalid AMI"
else:
ami_chosen = True
r[region]["ami"] = ami_enter
else:
r[region]["ami"] = ami_dict[ami_descriptions[ami_choice]]['id']
#What subnets to use?
print region + " --- Finding subnets... "
if 'zones' in region and region in c['zones'] and "subnets" in c['zones'][region] and len(c['zones'][region]['subnets']) > 0 and "vpcs" in c['zones'][region]:
r[region]['subnets'] = c['zones'][region]['subnets']
r[region]['vpcs'] = c['zones'][region]['vpcs']
else:
subnets = zone_obj.get_subnets()
possibilities = {}
r[region]['subnets'] = None
for subnet in subnets:
if subnets[subnet]['state'] == "available":
possibilities[subnets[subnet]['id']] = subnets[subnet]
if subnets[subnet]['defaultForAz'] == "true":
r[region]['subnets'] = [subnets[subnet]['id']]
r[region]['vpcs'] = [subnets[subnet]['vpc_id']]
if r[region]['subnets']== None:
print "ERROR: Could not automatically determine default subnet in region %s. Please choose one:" % region
choice = choose_from_list(possibilities.keys())
id= possibilities.keys()[choice]
r[region]['subnets'] = [possibilities[id]['id']]
r[region]['vpcs'] = [possibilities[id]['vpc_id']]
#What security groups to use?
r[region]['security_group_ids'] = []
my_security_group = None
for group in zone_obj.get_security_groups():
if group.name == "NuoDB_default_ports" and group.vpc_id in r[region]['vpcs']:
my_security_group = group.id
if my_security_group == None:
res = user_prompt("I am going to create a security group in region %s. It would open the default NuoDB ports and SSH to all IPs. Is this OK? (y/n)" % region, ["y", "n"], "n")
if res =="y":
sg = zone_obj.edit_security_group("NuoDB_default_ports", "These are the default NuoDB ports, open to the world. Autogenerated by nuodb.nuodb_demo_storefront_setup", [{"protocol": "tcp", "from_port": 22, "to_port": 22, "cidr_ip": "0.0.0.0/0"}, {"protocol": "tcp", "from_port": 48004, "to_port": 48020, "cidr_ip": "0.0.0.0/0"}, {"protocol": "tcp", "from_port": 8888, "to_port": 9001, "cidr_ip": "0.0.0.0/0"}, {"protocol": "tcp", "from_port": 8080, "to_port": 8080, "cidr_ip": "0.0.0.0/0"}, {"protocol": "tcp", "from_port": 9001, "to_port": 9001, "cidr_ip": "0.0.0.0/0"}], r[region]['vpcs'][0])
my_security_group = sg.id
else:
print "Cannot continue without a security group. Exiting."
sys.exit()
r[region]['security_group_ids'] = [my_security_group]
return r
#### Create a cluster
def help():
print "%s create" % sys.argv[0]
print "%s terminate" % sys.argv[0]
def validate_download(url):
try:
urllib2.urlopen(url)
return True
except:
return False
def __main__(cmdargs = None):
config_file = "./config.json"
if cmdargs.action == "create":
params = {
"cluster_name": { "default" : "NuoDBQuickstart", "prompt" : "What is the name of your cluster?"},
"aws_access_key": {"default" : "", "prompt" : "What is your AWS access key?"},
"aws_secret": {"default" : "", "prompt" : "What is your AWS secret?"},
"domain_password": {"default": "bird", "prompt": "What is the admin password of your NuoDB domain?"},
"alert_email" : {"default" : "","prompt" : "What email address would you like health alerts sent to?"},
}
#### Gather all the data we need
c = {}
if os.path.exists(config_file):
with open(config_file) as f:
static_config = json.loads(f.read())
f.close()
else:
static_config = {}
for key in static_config:
if key in params:
params[key]['default'] = static_config[key]
for key in sorted(params.keys()):
#if len(str(params[key]['default'])) > 30:
# default = str(params[key]['default'])[0:27] + "..."
#else:
default = str(params[key]['default'])
val = raw_input("%s [%s] " % (params[key]['prompt'], default)).strip()
if len(val) == 0:
c[key] = params[key]['default']
else:
c[key] = val
save_config(c, config_file)
#### Get Instance type
if hasattr(cmdargs, "instancetype") and cmdargs.instancetype != "DEFAULT":
c['instance_type'] = cmdargs.instancetype
else:
c['instance_type'] = get_instance_type(c)
save_config(c, config_file)
if hasattr(cmdargs, "nuodbVersion") and cmdargs.nuodbVersion != "DEFAULT":
if validate_download(NUODB_DOWNLOAD_URL % cmdargs.nuodbVersion):
c['nuodb_version'] = cmdargs.nuodbVersion
else:
print "Can't find NuoDB version %s on the public download server. Please check the version and try again." % cmdargs.nuodbVersion
sys.exit(2)
else:
c['nuodb_version'] = None
c['domain_name'] = "domain"
save_config(c, config_file)
c["zones"] = get_zone_info(c, useCustomAmi=cmdargs.usecustomami)
print "Saving this information for later to %s" % config_file
save_config(c, config_file)
#######################################
#### Actually do some work
#######################################
mycluster = nuodbawsquickstart.Cluster(
alert_email = c['alert_email'],
aws_access_key = c['aws_access_key'], aws_secret = c['aws_secret'],
cluster_name = c['cluster_name'], domain_name = c['domain_name'],
domain_password = c['domain_password'], instance_type = c['instance_type'],
nuodbVersion = c['nuodb_version'])
print "Creating the cluster."
count = 0
for zone in c['zones']:
mycluster.connect_zone(zone)
z = c['zones'][zone]
for i in range(0,z['servers']):
root_name = "%s-%i" % (c['cluster_name'], count)
myserver = mycluster.add_host(name=root_name, zone=zone, ami=z['ami'], subnets=z['subnets'], security_group_ids = z['security_group_ids'], keypair = z['keypair']) # Mark the number of nodes to be created
print "Added %s (%s)" % (root_name, myserver.region)
count += 1
print "Booting the cluster"
mycluster.create_cluster(peer=args.peer) # Actually spins up the nodes.
hosts = mycluster.get_hosts()
print("Waiting for an available web console")
healthy = False
i=0
wait = 600 #seconds
good_host = None
while i < wait:
if not healthy:
for host_id in hosts:
obj = mycluster.get_host(host_id)['host']
address = mycluster.get_host_address(host_id)
url = "http://%s:%s" % (address, obj.autoconsole_port)
if not healthy:
try:
urllib2.urlopen(url, None, 2)
good_host = url
healthy = True
except:
pass
time.sleep(1)
i += 1
if not healthy:
print "Gave up trying after %s seconds. Check the server" % str(wait)
else:
print
print "########################################################################################"
print "You can now access the NuoDB Admin Center (Username: domain Password: %s) at" % c['domain_password']
print str(good_host)
print "where you'll find documentation, samples and demos (check out Storefront!)"
print "and the Automation Console (create and manage databases)."
print "Other instances in the cluster may still be booting and will join the cluster over time."
print "########################################################################################"
print
########################
#### Terminate a cluster
########################
elif cmdargs.action == "terminate":
if os.path.exists(config_file):
with open(config_file) as f:
c = json.loads(f.read())
f.close()
else:
c = {}
params = {
"cluster_name": { "default" : "NuoDBQuickstart", "prompt" : "What is the name of your cluster?"},
"aws_access_key": {"default" : "", "prompt" : "What is your AWS access key?"},
"aws_secret": {"default" : "", "prompt" : "What is your AWS secret?"},
}
for key in sorted(params.keys()):
#if len(str(params[key]['default'])) > 30:
# default = str(params[key]['default'])[0:27] + "..."
#else:
default = str(params[key]['default'])
val = raw_input("%s [%s] " % (params[key]['prompt'], default)).strip()
if len(val) == 0:
c[key] = params[key]['default']
else:
c[key] = val
mycluster = nuodbawsquickstart.Cluster(
alert_email = "",
aws_access_key = c['aws_access_key'], aws_secret = c['aws_secret'],
cluster_name = c['cluster_name'], domain_name = "",
domain_password = "", instance_type = "")
for zone in get_regions(c):
mycluster.connect_zone(zone)
mycluster.terminate_hosts()
else:
help()
program_license = ""
parser = ArgumentParser(description=program_license, formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("--nuodbVersion", dest="nuodbVersion", help="Which version of NuoDB to use [default: %(default)s]", default="DEFAULT", required=False)
parser.add_argument("--instancetype", dest="instancetype", help="Which type of AWS instance to use [default: %s]" % INSTANCE_TYPE, default="DEFAULT", required=False)
parser.add_argument("--use-custom-ami", dest="usecustomami", action="store_true", help="Specify a custom AMI. (Only do this if you know what you are doing)", default=False, required=False)
parser.add_argument("--peer", dest="peer", help="What your custom peer is", default=None, required=False)
parser.add_argument("action", help="What action do you want to take on the cluster? (create/terminate)", nargs="?")
args = parser.parse_args()
if args.action not in ["create", "terminate"]:
res = user_prompt("What action do you want to take on the cluster? (create/terminate): ", ["create", "terminate"])
args.action = res.strip()
__main__(cmdargs = args)
|
|
"""
The huginn.fdm module contains classes and functions that can be used to
initialize the flight dynamics model and create a model for a simulated
aircraft
"""
from os import path
from math import degrees
import logging
from PyJSBSim import FGFDMExec
from huginn import configuration
from huginn.unit_conversions import (convert_jsbsim_acceleration,
convert_jsbsim_angular_acceleration,
convert_jsbsim_angular_velocity,
convert_jsbsim_velocity,
convert_jsbsim_pressure,
convert_jsbsim_temperature,
convert_jsbsim_density,
convert_jsbsim_force,
ur)
logger = logging.getLogger(__name__)
TRIM_MODE_LONGITUDINAL = 0
TRIM_MODE_FULL = 1
TRIM_MODE_GROUND = 2
TRIM_MODE_PULLUP = 3
TRIM_MODE_TURN = 5
class FDMBuilder(object):
"""The FDMBuilder creates the flight dynamics model object that will be
used by the simulator"""
def __init__(self, data_path):
self.data_path = data_path
self.dt = configuration.DT
self.aircraft = configuration.AIRCRAFT
self.latitude = configuration.LATITUDE
self.longitude = configuration.LONGITUDE
self.altitude = configuration.ALTITUDE
self.airspeed = configuration.AIRSPEED
self.heading = configuration.HEADING
def create_fdm(self):
"""Create the flight dynamics model"""
fdmexec = FGFDMExec()
logger.debug("Using jsbsim data at %s", self.data_path)
aircraft_data_path = self.data_path
engine_data_path = path.join(self.data_path, "Engines")
systems_data_path = path.join(self.data_path, "Systems")
logger.debug("Using aircraft data at %s", aircraft_data_path)
logger.debug("Using engine data at %s", engine_data_path)
logger.debug("Using systems data at %s", systems_data_path)
fdmexec.SetRootDir(self.data_path)
fdmexec.SetAircraftPath("")
fdmexec.SetEnginePath(engine_data_path)
fdmexec.SetSystemsPath(systems_data_path)
logger.debug("JSBSim dt is %f", self.dt)
fdmexec.Setdt(self.dt)
logger.debug("Loading '%s' aircraft model", self.aircraft)
fdmexec.LoadModel(self.aircraft)
altitude = self.altitude * ur.meter
altitude.ito(ur.foot)
altitude_in_feet = altitude.magnitude
airspeed = self.airspeed * ur.meters_per_second
airspeed.ito(ur.knot)
airspeed_in_knots = airspeed.magnitude
logger.debug("Initial latitude: %f degrees", self.latitude)
logger.debug("Initial longitude: %f degrees", self.longitude)
logger.debug("Initial altitude: %f meters", self.altitude)
logger.debug("Initial airspeed: %f meters/second", self.airspeed)
logger.debug("Initial heading: %f degrees", self.heading)
fdmexec.GetIC().SetLatitudeDegIC(self.latitude)
fdmexec.GetIC().SetLongitudeDegIC(self.longitude)
fdmexec.GetIC().SetAltitudeASLFtIC(altitude_in_feet)
fdmexec.GetIC().SetPsiDegIC(self.heading)
fdmexec.GetIC().SetVtrueKtsIC(airspeed_in_knots)
if not fdmexec.RunIC():
logger.error("Failed to run initial condition")
return None
return fdmexec
class Accelerations(object):
def __init__(self, fdmexec):
self.fdmexec = fdmexec
@property
def x(self):
"""Returns the acceleration along the x axis of the aircraft in
meters/sec^2"""
acceleration = self.fdmexec.GetAuxiliary().GetPilotAccel(1)
return convert_jsbsim_acceleration(acceleration)
@property
def y(self):
"""Returns the acceleration along the y axis of the aircraft in
meters/sec^2"""
acceleration = self.fdmexec.GetAuxiliary().GetPilotAccel(2)
return convert_jsbsim_acceleration(acceleration)
@property
def z(self):
"""Returns the acceleration along the z axis of the aircraft in
meters/sec^2"""
acceleration = self.fdmexec.GetAuxiliary().GetPilotAccel(3)
return convert_jsbsim_acceleration(acceleration)
@property
def p_dot(self):
"""Returns the p value of the body axis angular acceleration in
degress/sec^2"""
acceleration = self.fdmexec.GetAccelerations().GetPQRdot(1)
return convert_jsbsim_angular_acceleration(acceleration)
@property
def q_dot(self):
"""Returns the q value of the body axis angular acceleration in
degress/sec^2"""
acceleration = self.fdmexec.GetAccelerations().GetPQRdot(2)
return convert_jsbsim_angular_acceleration(acceleration)
@property
def r_dot(self):
"""Returns the r value of the body axis angular acceleration in
degress/sec^2"""
acceleration = self.fdmexec.GetAccelerations().GetPQRdot(3)
return convert_jsbsim_angular_acceleration(acceleration)
@property
def u_dot(self):
"""Returns the u item of the the body axis acceleration in
meters/sec^2"""
acceleration = self.fdmexec.GetAccelerations().GetUVWdot(1)
return convert_jsbsim_acceleration(acceleration)
@property
def v_dot(self):
"""Returns the v item of the the body axis acceleration in
meters/sec^2"""
acceleration = self.fdmexec.GetAccelerations().GetUVWdot(2)
return convert_jsbsim_acceleration(acceleration)
@property
def w_dot(self):
"""Returns the w item of the the body axis acceleration in
meters/sec^2"""
acceleration = self.fdmexec.GetAccelerations().GetUVWdot(3)
return convert_jsbsim_acceleration(acceleration)
@property
def gravity(self):
"""Returns the acceleration of the gravity in meters/sec^2"""
acceleration = self.fdmexec.GetAccelerations().GetGravAccelMagnitude()
return convert_jsbsim_acceleration(acceleration)
class Velocities(object):
def __init__(self, fdmexec):
self.fdmexec = fdmexec
@property
def p(self):
"""Return the p item of the body angular rates in degrees/sec"""
velocity = self.fdmexec.GetPropagate().GetPQR(1)
return convert_jsbsim_angular_velocity(velocity)
@property
def q(self):
"""Return the q item of the body angular rates in degrees/sec"""
velocity = self.fdmexec.GetPropagate().GetPQR(2)
return convert_jsbsim_angular_velocity(velocity)
@property
def r(self):
"""Return the r item of the body angular rates in degrees/sec"""
velocity = self.fdmexec.GetPropagate().GetPQR(3)
return convert_jsbsim_angular_velocity(velocity)
@property
def true_airspeed(self):
"""Return the true airspeed in meters/second"""
airspeed = self.fdmexec.GetAuxiliary().GetVtrueFPS()
return convert_jsbsim_velocity(airspeed)
@property
def climb_rate(self):
"""Return the vertical velocity in meters/seconds"""
# climb_rate = self.fdmexec.GetPropertyValue("velocities/v-down-fps")
climb_rate = -self.fdmexec.GetPropagate().GetVel(3)
return convert_jsbsim_velocity(climb_rate)
@property
def u(self):
"""Returns the u item of the body frame velocity vector in
meters/sec"""
velocity = self.fdmexec.GetPropagate().GetUVW(1)
return convert_jsbsim_velocity(velocity)
@property
def v(self):
"""Returns the v item of the body frame velocity vector in
meters/sec"""
velocity = self.fdmexec.GetPropagate().GetUVW(2)
return convert_jsbsim_velocity(velocity)
@property
def w(self):
"""Returns the w item of the body frame velocity vector in
meters/sec"""
velocity = self.fdmexec.GetPropagate().GetUVW(3)
return convert_jsbsim_velocity(velocity)
@property
def calibrated_airspeed(self):
"""Returns the calibrated airspeed in meters/sec"""
airspeed = self.fdmexec.GetAuxiliary().GetVcalibratedFPS()
return convert_jsbsim_velocity(airspeed)
@property
def equivalent_airspeed(self):
"""Returns the equivalent airspeed in meters/sec"""
airspeed = self.fdmexec.GetAuxiliary().GetVequivalentFPS()
return convert_jsbsim_velocity(airspeed)
@property
def ground_speed(self):
"""Returns the ground speed in meters/sec"""
airspeed = self.fdmexec.GetAuxiliary().GetVground()
return convert_jsbsim_velocity(airspeed)
class Position(object):
"""The Position class contains data about the position of the aircraft"""
def __init__(self, fdmexec):
self.fdmexec = fdmexec
@property
def latitude(self):
"""Returns the latitude in degrees"""
return self.fdmexec.GetPropagate().GetLatitudeDeg()
@property
def longitude(self):
"""Returns the longitude in degrees"""
return self.fdmexec.GetPropagate().GetLongitudeDeg()
@property
def altitude(self):
"""Returns the altitude in meters"""
return self.fdmexec.GetPropagate().GetAltitudeASLmeters()
@property
def heading(self):
"""Returns the heading in degrees"""
return degrees(self.fdmexec.GetPropagate().GetEuler(3))
class Orientation(object):
"""The Orientation class contains data about the orientation of the
aircraft"""
def __init__(self, fdmexec):
self.fdmexec = fdmexec
@property
def phi(self):
"""Return the phi euler angle angle in degrees"""
return self.fdmexec.GetPropagate().GetEulerDeg(1)
@property
def theta(self):
"""Return the theta euler angle angle in degrees"""
return self.fdmexec.GetPropagate().GetEulerDeg(2)
@property
def psi(self):
"""Return the psi euler angle angle in degrees"""
return self.fdmexec.GetPropagate().GetEulerDeg(3)
class Atmosphere(object):
"""The Atmosphere contains the fdm data about the atmosphere"""
def __init__(self, fdmexec):
"""Create a new Atmosphere object
Arguments:
fdmexec: a JSBSim FGFDMExec object
"""
self.fdmexec = fdmexec
@property
def pressure(self):
"""Returns the pressure at the current altitude. The value will be in
Pascal"""
pressure = self.fdmexec.GetAtmosphere().GetPressure()
return convert_jsbsim_pressure(pressure)
@property
def sea_level_pressure(self):
"""Returns the pressure at the sea level. The value will be in
Pascal"""
pressure = self.fdmexec.GetAtmosphere().GetPressureSL()
return convert_jsbsim_pressure(pressure)
@property
def temperature(self):
"""Returns the temperature in kelvin at the current altitude"""
temperature = self.fdmexec.GetAtmosphere().GetTemperature()
return convert_jsbsim_temperature(temperature)
@property
def sea_level_temperature(self):
"""Returns the temperature in kelvin at the sea level"""
temperature = self.fdmexec.GetAtmosphere().GetTemperatureSL()
return convert_jsbsim_temperature(temperature)
@property
def density(self):
"""Returns the atmospheric density at the current altitude in
kg/meters^3"""
density = self.fdmexec.GetAtmosphere().GetDensity()
return convert_jsbsim_density(density)
@property
def sea_level_density(self):
"""Returns the atmospheric density at sea level in kg/meters^3"""
density = self.fdmexec.GetAtmosphere().GetDensitySL()
return convert_jsbsim_density(density)
class Forces(object):
"""The Forces objects contains the aerodynamics forces"""
def __init__(self, fdmexec):
self.fdmexec = fdmexec
@property
def x_body(self):
"""Return the force along the x axis in the body frame. The value
is in Newtons"""
force = self.fdmexec.GetAerodynamics().GetForces(1)
return convert_jsbsim_force(force)
@property
def y_body(self):
"""Return the force along the y axis in the body frame. The value
is in Newtons"""
force = self.fdmexec.GetAerodynamics().GetForces(2)
return convert_jsbsim_force(force)
@property
def z_body(self):
"""Return the force along the z axis in the body frame. The value
is in Newtons"""
force = self.fdmexec.GetAerodynamics().GetForces(3)
return convert_jsbsim_force(force)
@property
def x_wind(self):
"""Return the force along the x axis in the wind frame. The value
is in Newtons"""
force = self.fdmexec.GetAerodynamics().GetvFw(1)
return convert_jsbsim_force(force)
@property
def y_wind(self):
"""Return the force along the y axis in the wind frame. The value
is in Newtons"""
force = self.fdmexec.GetAerodynamics().GetvFw(2)
return convert_jsbsim_force(force)
@property
def z_wind(self):
"""Return the force along the z axis in the wind frame. The value
is in Newtons"""
force = self.fdmexec.GetAerodynamics().GetvFw(3)
return convert_jsbsim_force(force)
@property
def x_total(self):
"""Return the total force along the x axis in the body frame. The
value is in Newtons"""
force = self.fdmexec.GetAccelerations().GetForces(1)
return convert_jsbsim_force(force)
@property
def y_total(self):
"""Return the total force along the y axis in the body frame. The
value is in Newtons"""
force = self.fdmexec.GetAccelerations().GetForces(2)
return convert_jsbsim_force(force)
@property
def z_total(self):
"""Return the total force along the z axis in the body frame. The
value is in Newtons"""
force = self.fdmexec.GetAccelerations().GetForces(3)
return convert_jsbsim_force(force)
class InitialCondition(object):
"""The InitialCondition class gets/sets the simulator initial conditions"""
def __init__(self, fdmexec):
"""Create a new InitialCondition object
Arguments:
fdmexec: a JSBSim FGFDMExec object
"""
self.fdmexec = fdmexec
@property
def latitude(self):
"""Get the starting position latitude in degrees"""
return self.fdmexec.GetIC().GetLatitudeDegIC()
@latitude.setter
def latitude(self, value):
"""Set the starting position latitude
Arguments:
value: the latitude in degrees
"""
self.fdmexec.GetIC().SetLatitudeDegIC(value)
@property
def longitude(self):
"""Get the starting position longitude in degrees"""
return self.fdmexec.GetIC().GetLongitudeDegIC()
@longitude.setter
def longitude(self, value):
"""Set the starting position longitude
Arguments:
value: the longitude in degrees
"""
self.fdmexec.GetIC().SetLongitudeDegIC(value)
@property
def altitude(self):
"""Get the altitude in meters"""
altitude = self.fdmexec.GetIC().GetAltitudeASLFtIC() * ur.foot
altitude.ito(ur.meter)
return altitude.magnitude
@altitude.setter
def altitude(self, value):
"""Set the starting altitude
Arguments:
value: the altitude in meters
"""
altitude = value * ur.meter
altitude.ito(ur.foot)
self.fdmexec.GetIC().SetAltitudeASLFtIC(altitude.magnitude)
@property
def heading(self):
"""Get the heading in degrees"""
return self.fdmexec.GetIC().GetPsiDegIC()
@heading.setter
def heading(self, value):
"""Set the heading
Arguments:
value: the heading in degrees
"""
self.fdmexec.GetIC().SetPsiDegIC(value)
@property
def airspeed(self):
"""Get the airspeed in meters/second"""
airspeed = self.fdmexec.GetIC().GetVtrueKtsIC() * ur.knot
airspeed.ito(ur.meters_per_second)
return airspeed.magnitude
@airspeed.setter
def airspeed(self, value):
"""Set the airspeed
Arguments:
value: the airspeed in meters/second
"""
airspeed = value * ur.meters_per_second
airspeed.ito(ur.knot)
self.fdmexec.GetIC().SetVtrueKtsIC(airspeed.magnitude)
class FDM(object):
"""The FDM object is a wrapper around the JSBSim objects that contains the
values of the flight dynamics model."""
def __init__(self, fdmexec):
self.fdmexec = fdmexec
self.accelerations = Accelerations(fdmexec)
self.velocities = Velocities(fdmexec)
self.position = Position(fdmexec)
self.orientation = Orientation(fdmexec)
self.atmosphere = Atmosphere(fdmexec)
self.forces = Forces(fdmexec)
self.initial_condition = InitialCondition(fdmexec)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.