gt stringclasses 1
value | context stringlengths 2.49k 119k |
|---|---|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.data.python.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
class GroupByWindowTest(test.TestCase):
def testSimple(self):
components = np.random.randint(100, size=(200,)).astype(np.int64)
iterator = dataset_ops.Iterator.from_dataset(
dataset_ops.Dataset.from_tensor_slices(components).map(lambda x: x * x)
.apply(dataset_ops.group_by_window(lambda x: x % 2,
lambda _, xs: xs.batch(4), 4)))
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
counts = []
with self.assertRaises(errors.OutOfRangeError):
while True:
result = sess.run(get_next)
self.assertTrue(
all(x % 2 == 0 for x in result) or all(x % 2 == 1)
for x in result)
counts.append(result.shape[0])
self.assertEqual(len(components), sum(counts))
num_full_batches = len([c for c in counts if c == 4])
self.assertGreaterEqual(num_full_batches, 23)
self.assertTrue(all(c == 4 for c in counts[:num_full_batches]))
def testImmediateOutput(self):
components = np.array(
[0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 0, 0, 2, 2, 0, 0], dtype=np.int64)
iterator = dataset_ops.Iterator.from_dataset(
dataset_ops.Dataset.from_tensor_slices(components).repeat(-1).apply(
dataset_ops.group_by_window(lambda x: x % 3,
lambda _, xs: xs.batch(4), 4)))
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
# The input is infinite, so this test demonstrates that:
# 1. We produce output without having to consume the entire input,
# 2. Different buckets can produce output at different rates, and
# 3. For deterministic input, the output is deterministic.
for _ in range(3):
self.assertAllEqual([0, 0, 0, 0], sess.run(get_next))
self.assertAllEqual([1, 1, 1, 1], sess.run(get_next))
self.assertAllEqual([2, 2, 2, 2], sess.run(get_next))
self.assertAllEqual([0, 0, 0, 0], sess.run(get_next))
def testSmallGroups(self):
components = np.array([0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0], dtype=np.int64)
iterator = dataset_ops.Iterator.from_dataset(
dataset_ops.Dataset.from_tensor_slices(components).apply(
dataset_ops.group_by_window(lambda x: x % 2,
lambda _, xs: xs.batch(4), 4)))
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
self.assertAllEqual([0, 0, 0, 0], sess.run(get_next))
self.assertAllEqual([1, 1, 1, 1], sess.run(get_next))
# The small outputs at the end are deterministically produced in key
# order.
self.assertAllEqual([0, 0, 0], sess.run(get_next))
self.assertAllEqual([1], sess.run(get_next))
def testReduceFuncError(self):
components = np.random.randint(100, size=(200,)).astype(np.int64)
def reduce_func(_, xs):
# Introduce an incorrect padded shape that cannot (currently) be
# detected at graph construction time.
return xs.padded_batch(
4,
padded_shapes=(tensor_shape.TensorShape([]),
constant_op.constant([5], dtype=dtypes.int64) * -1))
iterator = dataset_ops.Iterator.from_dataset(
dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: (x, ops.convert_to_tensor([x * x]))).apply(
dataset_ops.group_by_window(lambda x, _: x % 2, reduce_func, 32)))
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
with self.assertRaises(errors.InvalidArgumentError):
sess.run(get_next)
def testConsumeWindowDatasetMoreThanOnce(self):
components = np.random.randint(50, size=(200,)).astype(np.int64)
def reduce_func(key, window):
# Apply two different kinds of padding to the input: tight
# padding, and quantized (to a multiple of 10) padding.
return dataset_ops.Dataset.zip((
window.padded_batch(
4, padded_shapes=tensor_shape.TensorShape([None])),
window.padded_batch(
4, padded_shapes=ops.convert_to_tensor([(key + 1) * 10])),))
iterator = dataset_ops.Iterator.from_dataset(
dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.fill([math_ops.cast(x, dtypes.int32)], x))
.apply(dataset_ops.group_by_window(
lambda x: math_ops.cast(array_ops.shape(x)[0] // 10, dtypes.int64),
reduce_func, 4)))
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
counts = []
with self.assertRaises(errors.OutOfRangeError):
while True:
tight_result, multiple_of_10_result = sess.run(get_next)
self.assertEqual(0, multiple_of_10_result.shape[1] % 10)
self.assertAllEqual(tight_result,
multiple_of_10_result[:, :tight_result.shape[1]])
counts.append(tight_result.shape[0])
self.assertEqual(len(components), sum(counts))
# NOTE(mrry): These tests are based on the tests in bucket_ops_test.py.
# Currently, they use a constant batch size, though should be made to use a
# different batch size per key.
class BucketTest(test.TestCase):
def _dynamicPad(self, bucket, window, window_size):
# TODO(mrry): To match `tf.contrib.training.bucket()`, implement a
# generic form of padded_batch that pads every component
# dynamically and does not rely on static shape information about
# the arguments.
return dataset_ops.Dataset.zip(
(dataset_ops.Dataset.from_tensors(bucket), window.padded_batch(
32, (tensor_shape.TensorShape([]), tensor_shape.TensorShape([None]),
tensor_shape.TensorShape([3])))))
def testSingleBucket(self):
def _map_fn(v):
return (v, array_ops.fill([v], v),
array_ops.fill([3], string_ops.as_string(v)))
input_dataset = (
dataset_ops.Dataset.from_tensor_slices(math_ops.range(32)).map(_map_fn))
bucketed_dataset = input_dataset.apply(
dataset_ops.group_by_window(
lambda x, y, z: 0,
lambda k, bucket: self._dynamicPad(k, bucket, 32), 32))
iterator = dataset_ops.Iterator.from_dataset(bucketed_dataset)
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
which_bucket, bucketed_values = sess.run(get_next)
self.assertEqual(0, which_bucket)
expected_scalar_int = np.arange(32, dtype=np.int64)
expected_unk_int64 = np.zeros((32, 31)).astype(np.int64)
for i in range(32):
expected_unk_int64[i, :i] = i
expected_vec3_str = np.vstack(3 * [np.arange(32).astype(bytes)]).T
self.assertAllEqual(expected_scalar_int, bucketed_values[0])
self.assertAllEqual(expected_unk_int64, bucketed_values[1])
self.assertAllEqual(expected_vec3_str, bucketed_values[2])
def testEvenOddBuckets(self):
def _map_fn(v):
return (v, array_ops.fill([v], v),
array_ops.fill([3], string_ops.as_string(v)))
input_dataset = (
dataset_ops.Dataset.from_tensor_slices(math_ops.range(64)).map(_map_fn))
bucketed_dataset = input_dataset.apply(
dataset_ops.group_by_window(
lambda x, y, z: math_ops.cast(x % 2, dtypes.int64),
lambda k, bucket: self._dynamicPad(k, bucket, 32), 32))
iterator = dataset_ops.Iterator.from_dataset(bucketed_dataset)
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
# Get two minibatches (one containing even values, one containing odds)
which_bucket_even, bucketed_values_even = sess.run(get_next)
which_bucket_odd, bucketed_values_odd = sess.run(get_next)
# Count number of bucket_tensors.
self.assertEqual(3, len(bucketed_values_even))
self.assertEqual(3, len(bucketed_values_odd))
# Ensure bucket 0 was used for all minibatch entries.
self.assertAllEqual(0, which_bucket_even)
self.assertAllEqual(1, which_bucket_odd)
# Test the first bucket outputted, the events starting at 0
expected_scalar_int = np.arange(0, 32 * 2, 2, dtype=np.int64)
expected_unk_int64 = np.zeros((32, 31 * 2)).astype(np.int64)
for i in range(0, 32):
expected_unk_int64[i, :2 * i] = 2 * i
expected_vec3_str = np.vstack(
3 * [np.arange(0, 32 * 2, 2).astype(bytes)]).T
self.assertAllEqual(expected_scalar_int, bucketed_values_even[0])
self.assertAllEqual(expected_unk_int64, bucketed_values_even[1])
self.assertAllEqual(expected_vec3_str, bucketed_values_even[2])
# Test the second bucket outputted, the odds starting at 1
expected_scalar_int = np.arange(1, 32 * 2 + 1, 2, dtype=np.int64)
expected_unk_int64 = np.zeros((32, 31 * 2 + 1)).astype(np.int64)
for i in range(0, 32):
expected_unk_int64[i, :2 * i + 1] = 2 * i + 1
expected_vec3_str = np.vstack(
3 * [np.arange(1, 32 * 2 + 1, 2).astype(bytes)]).T
self.assertAllEqual(expected_scalar_int, bucketed_values_odd[0])
self.assertAllEqual(expected_unk_int64, bucketed_values_odd[1])
self.assertAllEqual(expected_vec3_str, bucketed_values_odd[2])
def testEvenOddBucketsFilterOutAllOdd(self):
def _map_fn(v):
return {
"x": v,
"y": array_ops.fill([v], v),
"z": array_ops.fill([3], string_ops.as_string(v))
}
def _dynamic_pad_fn(bucket, window, _):
return dataset_ops.Dataset.zip(
(dataset_ops.Dataset.from_tensors(bucket), window.padded_batch(
32, {
"x": tensor_shape.TensorShape([]),
"y": tensor_shape.TensorShape([None]),
"z": tensor_shape.TensorShape([3])
})))
input_dataset = (
dataset_ops.Dataset.from_tensor_slices(math_ops.range(128)).map(_map_fn)
.filter(lambda d: math_ops.equal(d["x"] % 2, 0)))
bucketed_dataset = input_dataset.apply(
dataset_ops.group_by_window(
lambda d: math_ops.cast(d["x"] % 2, dtypes.int64),
lambda k, bucket: _dynamic_pad_fn(k, bucket, 32), 32))
iterator = dataset_ops.Iterator.from_dataset(bucketed_dataset)
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
# Get two minibatches ([0, 2, ...] and [64, 66, ...])
which_bucket0, bucketed_values_even0 = sess.run(get_next)
which_bucket1, bucketed_values_even1 = sess.run(get_next)
# Ensure that bucket 1 was completely filtered out
self.assertAllEqual(0, which_bucket0)
self.assertAllEqual(0, which_bucket1)
self.assertAllEqual(
np.arange(0, 64, 2, dtype=np.int64), bucketed_values_even0["x"])
self.assertAllEqual(
np.arange(64, 128, 2, dtype=np.int64), bucketed_values_even1["x"])
def testDynamicWindowSize(self):
components = np.arange(100).astype(np.int64)
# Key fn: even/odd
# Reduce fn: batches of 5
# Window size fn: even=5, odd=10
def window_size_func(key):
window_sizes = constant_op.constant([5, 10], dtype=dtypes.int64)
return window_sizes[key]
dataset = dataset_ops.Dataset.from_tensor_slices(components).apply(
dataset_ops.group_by_window(
lambda x: x % 2, lambda _, xs: xs.batch(20), None,
window_size_func))
iterator = dataset_ops.Iterator.from_dataset(dataset)
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
with self.assertRaises(errors.OutOfRangeError):
batches = 0
while True:
result = sess.run(get_next)
is_even = all(x % 2 == 0 for x in result)
is_odd = all(x % 2 == 1 for x in result)
self.assertTrue(is_even or is_odd)
expected_batch_size = 5 if is_even else 10
self.assertEqual(expected_batch_size, result.shape[0])
batches += 1
self.assertEqual(batches, 15)
if __name__ == "__main__":
test.main()
| |
import unittest
#from pyramid import testing
class UtilTests(unittest.TestCase):
def setUp(self):
pass
# #request = testing.DummyRequest()
# self.config = testing.setUp()
# def tearDown(self):
# testing.tearDown()
def test_Base62(self):
from m4ed.util.base62 import Base62
b = Base62()
self.assertEqual(str(b), '0')
self.assertEqual(str(Base62('10')), '10')
self.assertEqual(str(Base62(10)), 'a')
self.assertEqual(str(Base62(1)), '1')
self.assertRaises(TypeError, Base62, Base62())
b += 11
a = b + '10'
self.assertEqual(str(a), '1b')
c = Base62('Z') + Base62('Z')
self.assertEqual(str(c), '1Y')
c += Base62('1')
self.assertEqual(str(c), '1Z')
b.increment()
self.assertEqual(int(b), 12)
self.assertEqual(str(b), 'c')
b += '1'
self.assertEqual(str(b), 'd')
self.assertEqual(unicode(b), u'd')
class FunctionalTests(unittest.TestCase):
def setUp(self):
#from uploadserver import main
#import paste
#import os
from pyramid import paster
# NOTE! Be sure to run `export TEST_INI='development.ini'` so that
# os.environ can find it!
app = paster.get_app('test.ini') # os.environ['TEST_INI'])
from webtest import TestApp
self.testapp = TestApp(app)
def _login(self, name='superuser', password='1234'):
# Can be used to log into the app
params = {
'name': name,
'password': password,
'form.submitted': 'true' # The value of this field does not matter
}
return self.testapp.post('/login', params=params)
def test_root(self):
self._login()
self.testapp.get('/')
#self.failUnless(res.status == '200 ok')
def test_login_valid_password(self):
# First a legit login
self.testapp.reset()
res = self.testapp.get('/login')
form = res.form
form['name'] = 'user'
form['password'] = '1234'
res = form.submit('form.submitted')
self.failUnless(res.status == '302 Found')
res = res.follow()
self.failUnless(res.request.url == 'http://localhost/')
# Duplicated login should redirect to root
res = self.testapp.get('/login')
self.failUnless(res.status == '302 Found')
res = res.follow()
self.failUnless(res.request.url == 'http://localhost/')
def test_login_invalid_password(self):
self.testapp.reset()
res = self.testapp.get('/login')
form = res.form
form['name'] = 'user'
form['password'] = 'invalid_password'
res = form.submit('form.submitted')
self.failUnless(res.request.url == 'http://localhost/login')
def test_logout(self):
self._login()
# Normal logout
res = self.testapp.get('/logout')
self.failUnless(res.status == '302 Found')
res = res.follow()
self.failUnless(res.request.url == 'http://localhost/')
# Logout without login
res = self.testapp.get('/logout')
self.failUnless(res.status == '302 Found')
res = res.follow()
self.failUnless(res.request.url == 'http://localhost/')
def test_api_items_get(self):
self._login()
res = self.testapp.get('/api/items')
json = res.json
self.failUnless(len(json) > 0)
_id = json[0].get('_id')
res = self.testapp.get('/api/items/{0}'.format(_id))
self.failUnless(str(res.json.get('_id')) == str(_id))
def test_api_assets_get(self):
self._login()
res = self.testapp.get('/api/assets')
json = res.json
self.failUnless(len(json) > 0)
object_id = json[0].get('_id')
short_id = json[0].get('id')
res = self.testapp.get('/api/assets/{0}'.format(object_id))
self.failUnless(str(res.json.get('_id')) == str(object_id))
res = self.testapp.get('/api/assets/{0}'.format(short_id))
self.failUnless(str(res.json.get('id')) == str(short_id))
self.failUnless(len(res.json.keys()) > 2)
def test_api_items_put_valid(self):
self._login()
res = self.testapp.get('/api/items')
json = res.json
self.failUnless(len(json) > 0)
object_id = json[0].get('_id')
params = dict(
_id=object_id,
listIndex=0,
type='lesson',
title='something or other',
desc='Nothing',
text=''
)
self.testapp.put_json('/api/items/{0}'.format(object_id), params=params)
def test_api_items_put_invalid(self):
self._login()
res = self.testapp.get('/api/items')
json = res.json
self.failUnless(len(json) > 0)
object_id = json[0].get('_id')
# First test non-json put
params = dict(
_id=object_id,
listIndex=0,
type='lesson',
title='something or other',
desc='Nothing',
text=''
)
# Should return 406 Not Acceptable
self.testapp.put('/api/items/{0}'.format(object_id), params=params, status=406)
# Test a request with no _id supplied
params = dict(
listIndex=0,
type='lesson',
title='something or other',
desc='Nothing',
text=''
)
# Should return 503
self.testapp.put_json('/api/items/{0}'.format(object_id), params=params, status=503)
def test_api_assets_get_invalid_id(self):
#self._login()
self.testapp.get('/api/assets/#!@"()/[]}{+?\\&`', status=404)
def test_misaka_post(self):
import random
import string
char_list = string.letters + string.digits
random_markdown = ''
for i in range(100):
random_markdown += random.choice(char_list)
params = {
'md': (r'$ \alpha = \beta $' # Math syntax
r'$ \alpha = \beta $' # Image cache hit
r'$ {} $' # To ensure a cache miss
'## Random heading' # Normal markdown
'' # Image tags
'').format(random_markdown)
}
self.testapp.post('/misaka', params=params, status=403)
self._login()
self.testapp.post('/misaka', params=params)
def test_get_asset_thumb_image_valid_id(self):
self._login()
res = self.testapp.get('/api/assets')
json = res.json
short_id = json[0].get('id')
res = self.testapp.get('/api/assets/{id}/thumb'.format(id=short_id), status=303)
self.failUnless('rackcdn.com' in res)
res = self.testapp.get('/api/assets/{id}/image'.format(id=short_id), status=303)
self.failUnless('rackcdn.com' in res)
#res.showbrowser()
def test_api_item_get_not_logged_in(self):
self.testapp.reset()
self.testapp.get('/api/items', status=403)
def test_api_asset_get_not_logged_in(self):
self.testapp.reset()
self.testapp.get('/api/assets', status=403)
| |
"""CatBoost coding"""
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator
import category_encoders.utils as util
from sklearn.utils.random import check_random_state
__author__ = 'Jan Motl'
class CatBoostEncoder(BaseEstimator, util.TransformerWithTargetMixin):
"""CatBoost coding for categorical features.
Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper.
This is very similar to leave-one-out encoding, but calculates the
values "on-the-fly". Consequently, the values naturally vary
during the training phase and it is not necessary to add random noise.
Beware, the training data have to be randomly permutated. E.g.:
# Random permutation
perm = np.random.permutation(len(X))
X = X.iloc[perm].reset_index(drop=True)
y = y.iloc[perm].reset_index(drop=True)
This is necessary because some data sets are sorted based on the target
value and this coder encodes the features on-the-fly in a single pass.
Parameters
----------
verbose: int
integer indicating verbosity of the output. 0 for none.
cols: list
a list of columns to encode, if None, all string columns will be encoded.
drop_invariant: bool
boolean for whether or not to drop columns with 0 variance.
return_df: bool
boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array).
handle_missing: str
options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean.
handle_unknown: str
options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean.
sigma: float
adds normal (Gaussian) distribution noise into training data in order to decrease overfitting (testing data are untouched).
sigma gives the standard deviation (spread or "width") of the normal distribution.
a: float
additive smoothing (it is the same variable as "m" in m-probability estimate). By default set to 1.
Example
-------
>>> from category_encoders import *
>>> import pandas as pd
>>> from sklearn.datasets import load_boston
>>> bunch = load_boston()
>>> y = bunch.target
>>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names)
>>> enc = CatBoostEncoder(cols=['CHAS', 'RAD']).fit(X, y)
>>> numeric_dataset = enc.transform(X)
>>> print(numeric_dataset.info())
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 506 entries, 0 to 505
Data columns (total 13 columns):
CRIM 506 non-null float64
ZN 506 non-null float64
INDUS 506 non-null float64
CHAS 506 non-null float64
NOX 506 non-null float64
RM 506 non-null float64
AGE 506 non-null float64
DIS 506 non-null float64
RAD 506 non-null float64
TAX 506 non-null float64
PTRATIO 506 non-null float64
B 506 non-null float64
LSTAT 506 non-null float64
dtypes: float64(13)
memory usage: 51.5 KB
None
References
----------
.. [1] Transforming categorical features to numerical features, from
https://tech.yandex.com/catboost/doc/dg/concepts/algorithm-main-stages_cat-to-numberic-docpage/
.. [2] CatBoost: unbiased boosting with categorical features, from
https://arxiv.org/abs/1706.09516
"""
def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True,
handle_unknown='value', handle_missing='value', random_state=None, sigma=None, a=1):
self.return_df = return_df
self.drop_invariant = drop_invariant
self.drop_cols = []
self.verbose = verbose
self.use_default_cols = cols is None # if True, even a repeated call of fit() will select string columns from X
self.cols = cols
self._dim = None
self.mapping = None
self.handle_unknown = handle_unknown
self.handle_missing = handle_missing
self._mean = None
self.random_state = random_state
self.sigma = sigma
self.feature_names = None
self.a = a
def fit(self, X, y, **kwargs):
"""Fit encoder according to X and y.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : encoder
Returns self.
"""
# unite the input into pandas types
X = util.convert_input(X)
y = util.convert_input_vector(y, X.index).astype(float)
if X.shape[0] != y.shape[0]:
raise ValueError("The length of X is " + str(X.shape[0]) + " but length of y is " + str(y.shape[0]) + ".")
self._dim = X.shape[1]
# if columns aren't passed, just use every string column
if self.use_default_cols:
self.cols = util.get_obj_cols(X)
else:
self.cols = util.convert_cols_to_list(self.cols)
if self.handle_missing == 'error':
if X[self.cols].isnull().any().any():
raise ValueError('Columns to be encoded can not contain null')
categories = self._fit(
X, y,
cols=self.cols
)
self.mapping = categories
X_temp = self.transform(X, y, override_return_df=True)
self.feature_names = X_temp.columns.tolist()
if self.drop_invariant:
self.drop_cols = []
generated_cols = util.get_generated_cols(X, X_temp, self.cols)
self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= 10e-5]
try:
[self.feature_names.remove(x) for x in self.drop_cols]
except KeyError as e:
if self.verbose > 0:
print("Could not remove column from feature names."
"Not found in generated cols.\n{}".format(e))
return self
def transform(self, X, y=None, override_return_df=False):
"""Perform the transformation to new categorical data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
y : array-like, shape = [n_samples] when transform by leave one out
None, when transform without target information (such as transform test set)
Returns
-------
p : array, shape = [n_samples, n_numeric + N]
Transformed values with encoding applied.
"""
if self.handle_missing == 'error':
if X[self.cols].isnull().any().any():
raise ValueError('Columns to be encoded can not contain null')
if self._dim is None:
raise ValueError('Must train encoder before it can be used to transform data.')
# unite the input into pandas types
X = util.convert_input(X)
# then make sure that it is the right size
if X.shape[1] != self._dim:
raise ValueError('Unexpected input dimension %d, expected %d' % (X.shape[1], self._dim,))
# if we are encoding the training data, we have to check the target
if y is not None:
y = util.convert_input_vector(y, X.index).astype(float)
if X.shape[0] != y.shape[0]:
raise ValueError("The length of X is " + str(X.shape[0]) + " but length of y is " + str(y.shape[0]) + ".")
if not list(self.cols):
return X
X = self._transform(
X, y,
mapping=self.mapping
)
if self.drop_invariant:
for col in self.drop_cols:
X.drop(col, 1, inplace=True)
if self.return_df or override_return_df:
return X
else:
return X.values
def _fit(self, X_in, y, cols=None):
X = X_in.copy(deep=True)
if cols is None:
cols = X.columns.values
self._mean = y.mean()
return {col: self._fit_column_map(X[col], y) for col in cols}
def _fit_column_map(self, series, y):
category = pd.Categorical(series)
categories = category.categories
codes = category.codes.copy()
codes[codes == -1] = len(categories)
categories = np.append(categories, np.nan)
return_map = pd.Series(dict([(code, category) for code, category in enumerate(categories)]))
result = y.groupby(codes).agg(['sum', 'count'])
return result.rename(return_map)
def _transform(self, X_in, y, mapping=None):
"""
The model uses a single column of floats to represent the means of the target variables.
"""
X = X_in.copy(deep=True)
random_state_ = check_random_state(self.random_state)
# Prepare the data
if y is not None:
# Convert bools to numbers (the target must be summable)
y = y.astype('double')
for col, colmap in mapping.items():
level_notunique = colmap['count'] > 1
unique_train = colmap.index
unseen_values = pd.Series([x for x in X_in[col].unique() if x not in unique_train], dtype=unique_train.dtype)
is_nan = X_in[col].isnull()
is_unknown_value = X_in[col].isin(unseen_values.dropna().astype(object))
if self.handle_unknown == 'error' and is_unknown_value.any():
raise ValueError('Columns to be encoded can not contain new values')
if y is None: # Replace level with its mean target; if level occurs only once, use global mean
level_means = ((colmap['sum'] + self._mean) / (colmap['count'] + self.a)).where(level_notunique, self._mean)
X[col] = X[col].map(level_means)
else:
# Simulation of CatBoost implementation, which calculates leave-one-out on the fly.
# The nice thing about this is that it helps to prevent overfitting. The bad thing
# is that CatBoost uses many iterations over the data. But we run just one iteration.
# Still, it works better than leave-one-out without any noise.
# See:
# https://tech.yandex.com/catboost/doc/dg/concepts/algorithm-main-stages_cat-to-numberic-docpage/
# Cumsum does not work nicely with None (while cumcount does).
# As a workaround, we cast the grouping column as string.
# See: issue #209
temp = y.groupby(X[col].astype(str)).agg(['cumsum', 'cumcount'])
X[col] = (temp['cumsum'] - y + self._mean) / (temp['cumcount'] + self.a)
if self.handle_unknown == 'value':
if X[col].dtype.name == 'category':
X[col] = X[col].astype(float)
X.loc[is_unknown_value, col] = self._mean
elif self.handle_unknown == 'return_nan':
X.loc[is_unknown_value, col] = np.nan
if self.handle_missing == 'value':
X.loc[is_nan & unseen_values.isnull().any(), col] = self._mean
elif self.handle_missing == 'return_nan':
X.loc[is_nan, col] = np.nan
if self.sigma is not None and y is not None:
X[col] = X[col] * random_state_.normal(1., self.sigma, X[col].shape[0])
return X
def get_feature_names(self):
"""
Returns the names of all transformed / added columns.
Returns
-------
feature_names: list
A list with all feature names transformed or added.
Note: potentially dropped features are not included!
"""
if not isinstance(self.feature_names, list):
raise ValueError('Must fit data first. Affected feature names are not known before.')
else:
return self.feature_names
| |
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rdflib
import core.descriptors.class_descriptor as class_descriptor
import core.descriptors.property_descriptor as property_descriptor
import core.descriptors.enum_descriptor as enum_descriptor
import utils.utils as utils
import utils.constants as constants
import json
import collections
from jinja2 import Environment, FileSystemLoader
from bs4 import BeautifulSoup
from typing import Dict, Set, Tuple
from utils.utils import PropertyToParent as PropertyToParent
class SchemaGenerator():
"""The SchemaGenerator is a class that generates protocol buffer code given
a schema.
Args:
src_file_path (str): Path to the file containing schema.
Attributes:
graph (rdflib.Graph): Graph which is result of parsing the schema.
"""
def __init__(self, src_file_path: str):
assert isinstance(
src_file_path, str), "Invalid parameter 'src_file_path' must be 'str'."
self.graph = rdflib.Graph()
self.graph.parse(
src_file_path,
format=rdflib.util.guess_format(src_file_path))
def write_proto(self, dst_path: str, package_name: str):
"""Write the protobuf code for the graph to file.
Args:
dst_path (str): Path to the output directory where code has to be
written.
package_name (str): Package name for the proto code.
"""
assert isinstance(
dst_path, str), "Invalid parameter 'dst_path' must be 'str'."
outFile = open(dst_path + 'schema.proto', 'w')
class_to_prop, prop_to_class, enumerations = self.__get_values()
proto_string = ''
proto_string += self.__get_header(package_name)
proto_string += self.__get_options()
proto_string += self.__get_datatypes()
proto_string += self.__class_to_proto(class_to_prop, enumerations)
proto_string += self.__enum_to_proto(class_to_prop, enumerations)
proto_string += self.__prop_to_proto(prop_to_class,
set(class_to_prop.keys()))
outFile.write(proto_string)
outFile.close()
outFile = open(dst_path + 'schema_descriptor.json', 'w')
json_descriptor = self.__get_json_descriptor(
class_to_prop, prop_to_class, enumerations)
json.dump(json_descriptor, outFile, indent=4)
outFile.close()
def __class_to_proto(self,
class_to_prop: Dict[str, Set[PropertyToParent]],
enumerations: Set[str]):
"""Call ClassDescriptor.to_proto() and get proto code for every schema
class.
Args:
class_to_prop (dict(set): Dictionary containing set of properties
for every class.
enumerations (set): Set containing the enumerations in the schema.
Returns:
str: The proto code for all the schema classes in class_to_prop as
a string.
"""
proto_class = '// Definition of classes begin here.\n\n'
for x in sorted(class_to_prop.keys()):
if ((x not in enumerations) and (x not in constants.schema_datatypes) and (
x not in constants.schema_primitives)):
comment = ''
for _, _, c in self.graph.triples(
(utils.add_url(x), constants.schema_constants['Comment'], None)):
comment += c
soup = BeautifulSoup(comment, 'html.parser')
comment = soup.get_text()
proto_class += class_descriptor.ClassDescriptor(
x, list(class_to_prop[x])).to_proto(comment)
proto_class += '\n'
return proto_class
def __prop_to_proto(self,
prop_to_class: Dict[str, Set[str]],
class_list: Set[str]):
"""Call PropertyDescriptor.to_proto() and get proto code for every
schema property.
Args:
prop_to_class (dict(set)): Dictionary containing range of
class/datatypes for every property.
class_list (set): Set of defined classes.
Returns:
str: The proto code for all the schema property in prop_to_class as
a string.
"""
proto_property = '// Definition of properties begin here.\n\n'
for x in sorted(prop_to_class.keys()):
if len(prop_to_class[x]) > 0:
comment = ''
for _, _, c in self.graph.triples(
(utils.add_url(x), constants.schema_constants['Comment'], None)):
comment += c
soup = BeautifulSoup(comment, 'html.parser')
comment = soup.get_text()
proto_property += property_descriptor.PropertyDescriptor(
x, list(prop_to_class[x]), list(class_list)).to_proto(comment)
proto_property += '\n'
return proto_property
def __enum_to_proto(self,
class_to_prop: Dict[str, Set[PropertyToParent]],
enumerations: Set[str]):
"""Call EnumDescriptor.to_proto() and get proto code for every schema
enumeration.
Args:
class_to_prop (dict(set): Dictionary containing set of properties
for every class.
enumerations (set): Set containing the enumerations in the schema.
Returns:
str: The proto code for all the schema enumerations in enumerations
as a string.
"""
proto_enum = '// Definition of enumerations begin here.\n\n'
for x in sorted(enumerations):
enum_values = set()
for ev, _, _ in self.graph.triples(
(None, constants.schema_constants['Type'], utils.add_url(x))):
enum_values.add(utils.strip_url(ev))
comment = ''
for _, _, c in self.graph.triples(
(utils.add_url(x), constants.schema_constants['Comment'], None)):
comment += c
soup = BeautifulSoup(comment, 'html.parser')
comment = soup.get_text()
proto_enum += enum_descriptor.EnumDescriptor(x, list(
class_to_prop[x]), list(enum_values)).to_proto(comment)
proto_enum += '\n'
return proto_enum
def __get_values(
self) -> Tuple[Dict[str, Set[PropertyToParent]], Dict[str, Set[str]], Set[str]]:
"""Call utils.toplogical_sort(), compress the inheritance heirarchy and
return mappings between schema classes, schema properties and schema
enumerations.
Returns:
dict[str, set[PropertyToParent]]: Dictionary containing set of
properties for every class.
dict[str, set[str]]: Dictionary containing range of
class/datatypes for every property.
set[str]: Set containing the enumerations in the schema.
"""
class_to_prop = dict()
inheritance_graph = dict()
for class_name, _, _ in self.graph.triples(
(None, constants.schema_constants['Type'], constants.schema_constants['Class'])):
class_to_prop[utils.strip_url(class_name)] = set()
for property_name, _, _ in self.graph.triples(
(None, constants.schema_constants['domainIncludes'], class_name)):
prop = utils.PropertyToParent(
utils.strip_url(property_name),
utils.strip_url(class_name))
class_to_prop[utils.strip_url(class_name)].add(prop)
for class_name, _, _ in self.graph.triples(
(None, constants.schema_constants['Type'], constants.schema_constants['Class'])):
if class_name not in inheritance_graph:
inheritance_graph[class_name] = set()
for _, _, parent_class in self.graph.triples(
(class_name, constants.schema_constants['subClassOf'], None)):
if parent_class not in inheritance_graph:
inheritance_graph[parent_class] = set()
inheritance_graph[parent_class].add(class_name)
topsort_order = utils.topological_sort(inheritance_graph)
for class_name in topsort_order:
for _, _, parent_class in self.graph.triples(
(class_name, constants.schema_constants['subClassOf'], None)):
if utils.strip_url(parent_class) in class_to_prop:
class_to_prop[utils.strip_url(class_name)] = class_to_prop[utils.strip_url(
class_name)] | class_to_prop[utils.strip_url(parent_class)]
enumerations = set()
for enum, _, _ in self.graph.triples(
(None, constants.schema_constants['subClassOf'], constants.schema_constants['Enumeration'])):
enumerations.add(utils.strip_url(enum))
class_to_children = utils.get_children(inheritance_graph)
# Temporary Code
# class_to_children[rdflib.URIRef('http://schema.org/Audience')].add(rdflib.URIRef("http://schema.org/Researcher"))
# class_to_prop["SteeringPositionValue"] = class_to_prop["Enumeration"]
# class_to_prop["DriveWheelConfigurationValue"] = class_to_prop["Enumeration"]
# enumerations.add("SteeringPositionValue")
# enumerations.add("DriveWheelConfigurationValue")
# End of temporary code
prop_to_class = dict()
for property_name, _, _ in self.graph.triples(
(None, constants.schema_constants['Type'], constants.schema_constants['Property'])):
prop_to_class[utils.strip_url(property_name)] = set()
for _, _, class_name in self.graph.triples(
(property_name, constants.schema_constants['rangeIncludes'], None)):
prop_to_class[utils.strip_url(property_name)].add(
utils.strip_url(class_name))
if class_name in class_to_children:
prop_to_class[utils.strip_url(property_name)] = prop_to_class[utils.strip_url(
property_name)] | set(map(utils.strip_url, class_to_children[class_name]))
if class_name == constants.schema_constants['Number']:
prop_to_class[utils.strip_url(property_name)].add(
utils.strip_url(constants.schema_constants['Integer']))
prop_to_class[utils.strip_url(property_name)].add(
utils.strip_url(constants.schema_constants['Float']))
if class_name == constants.schema_constants['Text']:
prop_to_class[utils.strip_url(property_name)].add(
utils.strip_url(constants.schema_constants['URL']))
return class_to_prop, prop_to_class, enumerations
def __get_header(self, package_name: str) -> str:
"""Return the header for proto code file.
Args:
package_name (str): Package name for the proto code.
Returns:
str: The proto code of header as a string.
"""
file_loader = FileSystemLoader('./core/templates')
env = Environment(loader=file_loader)
proto_header = env.get_template(
'header.txt').render(package_name=package_name)
return proto_header
def __get_options(self) -> str:
"""Return the options for JSONLD serializer.
Returns:
str: The proto code of options for JSONLD serializer as a string.
"""
file_loader = FileSystemLoader('./core/templates')
env = Environment(loader=file_loader)
proto_options = env.get_template('options.txt').render()
return proto_options
def __get_datatypes(self) -> str:
"""Return the datatypes in accordance with schemaorg.
Returns:
str: The proto code of datatypes in accordance with schemaorg as a
string.
"""
file_loader = FileSystemLoader('./core/templates')
env = Environment(loader=file_loader)
proto_datatypes = env.get_template('datatypes.txt').render()
return proto_datatypes
def __get_json_descriptor(self,
class_to_prop: Dict[str, Set[PropertyToParent]],
prop_to_class: Dict[str, Set[str]],
enumerations: Set[str]) -> Dict:
"""Return a json descriptor for the given schema.
Args:
dict[str, set[PropertyToParent]]: Dictionary containing set of
properties for every class.
dict[str, set[str]]: Dictionary containing range of class/datatypes
for every property.
set[str]: Set containing the enumerations in the schema.
Returns:
dict: The json descriptor for the schema.
"""
defined_classes = set(class_to_prop.keys())
total_classes = set()
for _, _, property_name in self.graph.triples(
(None, utils.constants.schema_constants['rangeIncludes'], None)):
total_classes.add(utils.strip_url(property_name))
undefined_classes = total_classes.difference(defined_classes)
undefined_classes = undefined_classes | set(
utils.constants.schema_primitives.keys())
message_descriptor = {}
for x in sorted(class_to_prop.keys()):
if ((x not in enumerations) and (x not in constants.schema_datatypes) and (
x not in constants.schema_primitives)):
o = {}
o['@type'] = utils.strip_url(x)
prop_from_self = list()
prop_inherited = dict()
o['fields'] = list()
o['fields'].append('@id')
for p in class_to_prop[x]:
if p.parent == x:
prop_from_self.append(p.name)
else:
if p.parent not in prop_inherited:
prop_inherited[p.parent] = list()
prop_inherited[p.parent].append(p.name)
prop_from_self = sorted(prop_from_self)
prop_inherited = collections.OrderedDict(
sorted(prop_inherited.items()))
for p in prop_from_self:
o['fields'].append(p)
for ky in prop_inherited:
props = sorted(prop_inherited[ky])
o['fields'].extend(props)
message_descriptor[x] = o
for x in sorted(prop_to_class.keys()):
if len(prop_to_class[x]) > 0:
o = {}
o['@type'] = 'Property'
o['fields'] = sorted(list(prop_to_class[x]))
message_descriptor[x] = o
for x in sorted(enumerations):
enum_values = set()
for ev, _, _ in self.graph.triples(
(None, constants.schema_constants['Type'], utils.add_url(x))):
enum_values.add(ev)
o = {}
o['@type'] = 'EnumWrapper'
o['values'] = sorted(list(enum_values))
o['values'].insert(0, 'Unknown')
o['fields'] = ['id', x + 'Class']
o2 = {}
o2['@type'] = x
prop_from_self = list()
prop_inherited = dict()
o2['fields'] = list()
o2['fields'].append('@id')
for p in class_to_prop[x]:
if p.parent == x:
prop_from_self.append(p.name)
else:
if p.parent not in prop_inherited:
prop_inherited[p.parent] = list()
prop_inherited[p.parent].append(p.name)
prop_from_self = sorted(prop_from_self)
prop_inherited = collections.OrderedDict(
sorted(prop_inherited.items()))
for p in prop_from_self:
o2['fields'].append(p)
for ky in prop_inherited:
props = sorted(prop_inherited[ky])
o2['fields'].extend(props)
message_descriptor[x] = o
message_descriptor[x + 'Class'] = o2
message_descriptor['Date'] = {}
message_descriptor['Date']['@type'] = 'DatatypeDate'
message_descriptor['DateTime'] = {}
message_descriptor['DateTime']['@type'] = 'DatatypeDateTime'
message_descriptor['Time'] = {}
message_descriptor['Time']['@type'] = 'DatatypeTime'
message_descriptor['Duration'] = {}
message_descriptor['Duration']['@type'] = 'DatatypeDuration'
message_descriptor['Distance'] = {}
message_descriptor['Distance']['@type'] = 'DatatypeQuantitative'
message_descriptor['Energy'] = {}
message_descriptor['Energy']['@type'] = 'DatatypeQuantitative'
message_descriptor['Mass'] = {}
message_descriptor['Mass']['@type'] = 'DatatypeQuantitative'
json_descriptor = {}
json_descriptor['messages'] = message_descriptor
json_descriptor['primitives'] = list(sorted(undefined_classes))
return json_descriptor
| |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""States record the grammar parsing tree information."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
import nltk
import numpy as np
from neural_guided_symbolic_regression.utils import postprocessor
class StateBase(object):
"""State object for Monte Carlo Tree Search.
Subclasses should define the following methods:
* is_terminal
* copy
* _equal
* _info
"""
def is_terminal(self):
"""Whether the current state is a terminal state.
Returns:
Boolean.
"""
raise NotImplementedError('Must be implemented by subclass.')
def copy(self):
"""Gets a copy of current state.
Returns:
State object.
"""
raise NotImplementedError('Must be implemented by subclass.')
def __eq__(self, other):
"""Defines the equality operator.
Args:
other: Another State object.
Returns:
Boolean whether two states equal.
"""
return isinstance(other, type(self)) and self._equal(other)
def _equal(self, other):
"""Defines the equality operator for the subclass.
This private method will be called in __eq__().
Args:
other: Another State object.
Returns:
Boolean whether two states equal.
"""
raise NotImplementedError('Must be implemented by subclass.')
def __repr__(self):
"""Defines behavior for when repr() is called on an instance of this class.
Returns:
String.
"""
return '%s [%s]' % (self.__class__.__name__, self._info())
def _info(self):
"""Defines the information to display in __repr__().
Returns:
String.
"""
raise NotImplementedError('Must be implemented by subclass.')
class ExpressionStateBase(StateBase):
"""State object of expression generation.
Subclasses should define the following methods:
* get_expression
"""
def get_expression(self):
"""Gets the expression of current state.
Returns:
String.
"""
raise NotImplementedError('Must be implemented by subclass.')
class ProductionRulesState(ExpressionStateBase):
"""Records the grammar parsing tree by grammar production rules sequence."""
def __init__(self, production_rules_sequence, stack=None):
"""Initializer.
If this state is the initial state with no production rules sequence, pass
a list of one symbol string to stack argument. This will enforce the next
production rule to append starting with this symbol.
Args:
production_rules_sequence: List of nltk.grammar.Production objects. This
sequence is obtained by a preorder traversal of the context-free
grammar parsing tree.
stack: GrammarLhsStack object or list, the stack to store the string of
left hand side symbol. The left hand side symbol of valid production
rule to append must match the top element in the stack. If the input
is a list, the last element in the list is the top element in the
stack.
Raises:
ValueError: If stack is not list, GrammarLhsStack or None.
"""
self._production_rules_sequence = production_rules_sequence
if stack is None:
self._stack = postprocessor.production_rules_sequence_to_stack(
production_rules_sequence)
elif isinstance(stack, list):
self._stack = postprocessor.GrammarLhsStack(stack)
elif isinstance(stack, postprocessor.GrammarLhsStack):
self._stack = stack.copy()
else:
raise ValueError('stack is expected to be list, GrammarLhsStack or '
'None, but got %s.' % type(stack))
# Log the state information defined in __repr__.
logging.info('Create %s', self)
@property
def production_rules_sequence(self):
"""Gets the production rules sequence.
Returns:
List of nltk.grammar.Production objects.
"""
return self._production_rules_sequence[:]
def generate_history(self):
"""Generates the history of the expression generation.
For example, if the current production rules in production_rules_sequence
is ['S -> S "+" T', 'S -> T', 'T -> "y"', 'T -> "x"']
The expression generation history when each production rule is appended is
['S + T', 'T + T', 'y + T', 'y + x'].
Returns:
List of expression strings.
"""
production_rules_sequence = self.production_rules_sequence
history = []
for partial_sequence_length in range(1, len(production_rules_sequence) + 1):
history.append(
postprocessor.production_rules_sequence_to_expression_string(
prod_rules_sequence=production_rules_sequence[
:partial_sequence_length],
delimiter=' ',
check_all_terminal=False))
return history
def is_valid_to_append(self, production_rule):
"""Whether a production rule is valid to append.
The left hand side symbol of production rule need to match the top symbol
in the grammar left hand side symbol stack.
Args:
production_rule: nltk.grammar.Production object. The production rule to
append on the production rule sequence in the current state.
Returns:
Boolean.
"""
return self.stack_peek() == production_rule.lhs().symbol()
def stack_peek(self):
"""Gets the top symbol in stack.
The next non terminal symbol to expand.
Returns:
String of symbol.
"""
return self._stack.peek()
def append_production_rule(self, production_rule):
"""Appends a production rule on the sequence and returns a new state.
Args:
production_rule: nltk.grammar.Production object. The production rule to
append on the production rule sequence in the current state.
Returns:
A ProductionRulesState object.
Raises:
ValueError: If the left hand side symbol of production rule does not
match the top symbol in the grammar left hand side stack.
"""
if not self.is_valid_to_append(production_rule):
raise ValueError('The left hand side symbol of production rule %s does '
'not match the top symbol in the grammar left hand side '
'stack (%s)' % (production_rule, self.stack_peek()))
self._stack.pop()
self._stack.push_reversed_list(
postprocessor.get_non_terminal_rhs(production_rule))
self._production_rules_sequence.append(production_rule)
logging.info('Append production rule: %s, %s', production_rule, self)
def is_terminal(self):
"""Whether the last production rule in the sequence is a terminal rule.
If the last production rule in the production_rules_sequence has left hand
side symbol of terminal rule defined in constants.DUMMY_LHS_SYMBOL.
Returns:
Boolean whether current state is terminal.
"""
return self._stack.is_empty()
def copy(self):
"""Gets a copy of current state.
Returns:
ProductionRulesState object.
"""
logging.info('Create a copy of ProductionRulesState.')
return ProductionRulesState(
production_rules_sequence=self.production_rules_sequence,
stack=self._stack.copy())
def _equal(self, other):
"""Defines the equality operator for ProductionRulesState.
This private method will be called in __eq__().
Args:
other: Another State object.
Returns:
Boolean whether two states equal.
"""
if len(self.production_rules_sequence) != len(
other.production_rules_sequence):
return False
else:
return all(
rule1 == rule2 for rule1, rule2 in zip(
self.production_rules_sequence, other.production_rules_sequence))
def get_expression(self, coefficients=None):
"""Gets the expression of current state.
Args:
coefficients: Dict of coefficients values in expression string.
{coefficient_symbol: value}. If not None, the values of the
coefficients will replace the symbols of coefficients in the
expression string.
Returns:
String.
"""
return _numericalize_coefficients(self._get_expression()[1], coefficients)
def _get_expression(self):
"""Gets the expression and symbols of current state.
Returns:
expression: String.
symbols: List of symbols.
"""
symbols = postprocessor.production_rules_sequence_to_symbols(
prod_rules_sequence=self.production_rules_sequence)
return ' '.join([str(symbol) for symbol in symbols]), symbols
def _info(self):
"""Defines information to display when __repr__() is called.
Returns:
String.
"""
expression, symbols = self._get_expression()
num_terminals = sum(
nltk.grammar.is_terminal(symbol) for symbol in symbols)
num_symbols = len(symbols)
if num_symbols:
terminal_ratio = float(num_terminals) / num_symbols
else:
terminal_ratio = np.nan
return ('symbols: %s, '
'length_production_rules_sequence: %d, '
'stack top: %s, '
'num_terminals / num_symbols: %d / %d, '
'terminal_ratio: %4.2f'
% (expression,
len(self.production_rules_sequence),
self.stack_peek(),
num_terminals,
num_symbols,
terminal_ratio))
def _numericalize_coefficients(raw_symbols, coefficients):
"""Replaces the symbols of coefficients in the expression string with values.
If there is coefficient symbol in raw_symbols which is not in coefficients
dict, it will remain symbolic in the expression string.
Args:
raw_symbols: List of context-free grammar symbols or strings.
coefficients: Dict of coefficients values in expression string.
{coefficient_symbol: value}. If not None, the values of the
coefficients will replace the symbols of coefficients in the
expression string.
Returns:
Expression string.
"""
if coefficients is None:
coefficients = {}
symbols = []
for symbol in map(str, raw_symbols):
if symbol in coefficients:
symbols.append(str(coefficients[symbol]))
else:
symbols.append(symbol)
return ' '.join(symbols)
| |
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""gcloud storage client for interacting with API."""
from gcloud._helpers import _LocalStack
from gcloud.client import JSONClient
from gcloud.exceptions import NotFound
from gcloud.iterator import Iterator
from gcloud.storage.batch import Batch
from gcloud.storage.bucket import Bucket
from gcloud.storage.connection import Connection
class Client(JSONClient):
"""Client to bundle configuration needed for API requests.
:type project: string
:param project: the project which the client acts on behalf of. Will be
passed when creating a topic. If not passed,
falls back to the default inferred from the environment.
:type credentials: :class:`oauth2client.client.OAuth2Credentials` or
:class:`NoneType`
:param credentials: The OAuth2 Credentials to use for the connection
owned by this client. If not passed (and if no ``http``
object is passed), falls back to the default inferred
from the environment.
:type http: :class:`httplib2.Http` or class that defines ``request()``.
:param http: An optional HTTP object to make requests. If not passed, an
``http`` object is created that is bound to the
``credentials`` for the current object.
"""
_connection_class = Connection
def __init__(self, project=None, credentials=None, http=None):
self._connection = None
super(Client, self).__init__(project=project, credentials=credentials,
http=http)
self._batch_stack = _LocalStack()
@property
def connection(self):
"""Get connection or batch on the client.
:rtype: :class:`gcloud.storage.connection.Connection`
:returns: The connection set on the client, or the batch
if one is set.
"""
if self.current_batch is not None:
return self.current_batch
else:
return self._connection
@connection.setter
def connection(self, value):
"""Set connection on the client.
Intended to be used by constructor (since the base class calls)
self.connection = connection
Will raise if the connection is set more than once.
:type value: :class:`gcloud.storage.connection.Connection`
:param value: The connection set on the client.
:raises: :class:`ValueError` if connection has already been set.
"""
if self._connection is not None:
raise ValueError('Connection already set on client')
self._connection = value
def _push_batch(self, batch):
"""Push a batch onto our stack.
"Protected", intended for use by batch context mgrs.
:type batch: :class:`gcloud.storage.batch.Batch`
:param batch: newly-active batch
"""
self._batch_stack.push(batch)
def _pop_batch(self):
"""Pop a batch from our stack.
"Protected", intended for use by batch context mgrs.
:raises: IndexError if the stack is empty.
:rtype: :class:`gcloud.storage.batch.Batch`
:returns: the top-most batch/transaction, after removing it.
"""
return self._batch_stack.pop()
@property
def current_batch(self):
"""Currently-active batch.
:rtype: :class:`gcloud.storage.batch.Batch` or ``NoneType`` (if
no batch is active).
:returns: The batch at the top of the batch stack.
"""
return self._batch_stack.top
def bucket(self, bucket_name):
"""Factory constructor for bucket object.
.. note::
This will not make an HTTP request; it simply instantiates
a bucket object owned by this client.
:type bucket_name: string
:param bucket_name: The name of the bucket to be instantiated.
:rtype: :class:`gcloud.storage.bucket.Bucket`
:returns: The bucket object created.
"""
return Bucket(client=self, name=bucket_name)
def batch(self):
"""Factory constructor for batch object.
.. note::
This will not make an HTTP request; it simply instantiates
a batch object owned by this client.
:rtype: :class:`gcloud.storage.batch.Batch`
:returns: The batch object created.
"""
return Batch(client=self)
def get_bucket(self, bucket_name):
"""Get a bucket by name.
If the bucket isn't found, this will raise a
:class:`gcloud.storage.exceptions.NotFound`.
For example::
>>> try:
>>> bucket = client.get_bucket('my-bucket')
>>> except gcloud.exceptions.NotFound:
>>> print 'Sorry, that bucket does not exist!'
This implements "storage.buckets.get".
:type bucket_name: string
:param bucket_name: The name of the bucket to get.
:rtype: :class:`gcloud.storage.bucket.Bucket`
:returns: The bucket matching the name provided.
:raises: :class:`gcloud.exceptions.NotFound`
"""
bucket = Bucket(self, name=bucket_name)
bucket.reload(client=self)
return bucket
def lookup_bucket(self, bucket_name):
"""Get a bucket by name, returning None if not found.
You can use this if you would rather check for a None value
than catching an exception::
>>> bucket = client.lookup_bucket('doesnt-exist')
>>> print bucket
None
>>> bucket = client.lookup_bucket('my-bucket')
>>> print bucket
<Bucket: my-bucket>
:type bucket_name: string
:param bucket_name: The name of the bucket to get.
:rtype: :class:`gcloud.storage.bucket.Bucket`
:returns: The bucket matching the name provided or None if not found.
"""
try:
return self.get_bucket(bucket_name)
except NotFound:
return None
def create_bucket(self, bucket_name):
"""Create a new bucket.
For example::
>>> bucket = client.create_bucket('my-bucket')
>>> print bucket
<Bucket: my-bucket>
This implements "storage.buckets.insert".
If the bucket already exists, will raise
:class:`gcloud.exceptions.Conflict`.
:type bucket_name: string
:param bucket_name: The bucket name to create.
:rtype: :class:`gcloud.storage.bucket.Bucket`
:returns: The newly created bucket.
"""
bucket = Bucket(self, name=bucket_name)
bucket.create(client=self)
return bucket
def list_buckets(self, max_results=None, page_token=None, prefix=None,
projection='noAcl', fields=None):
"""Get all buckets in the project associated to the client.
This will not populate the list of blobs available in each
bucket.
>>> for bucket in client.list_buckets():
>>> print bucket
This implements "storage.buckets.list".
:type max_results: integer or ``NoneType``
:param max_results: Optional. Maximum number of buckets to return.
:type page_token: string or ``NoneType``
:param page_token: Optional. Opaque marker for the next "page" of
buckets. If not passed, will return the first page
of buckets.
:type prefix: string or ``NoneType``
:param prefix: Optional. Filter results to buckets whose names begin
with this prefix.
:type projection: string or ``NoneType``
:param projection: If used, must be 'full' or 'noAcl'. Defaults to
'noAcl'. Specifies the set of properties to return.
:type fields: string or ``NoneType``
:param fields: Selector specifying which fields to include in a
partial response. Must be a list of fields. For example
to get a partial response with just the next page token
and the language of each bucket returned:
'items/id,nextPageToken'
:rtype: iterable of :class:`gcloud.storage.bucket.Bucket` objects.
:returns: All buckets belonging to this project.
"""
extra_params = {'project': self.project}
if max_results is not None:
extra_params['maxResults'] = max_results
if prefix is not None:
extra_params['prefix'] = prefix
extra_params['projection'] = projection
if fields is not None:
extra_params['fields'] = fields
result = _BucketIterator(client=self,
extra_params=extra_params)
# Page token must be handled specially since the base `Iterator`
# class has it as a reserved property.
if page_token is not None:
result.next_page_token = page_token
return result
class _BucketIterator(Iterator):
"""An iterator listing all buckets.
You shouldn't have to use this directly, but instead should use the
helper methods on :class:`gcloud.storage.connection.Connection`
objects.
:type client: :class:`gcloud.storage.client.Client`
:param client: The client to use for making connections.
:type extra_params: dict or ``NoneType``
:param extra_params: Extra query string parameters for the API call.
"""
def __init__(self, client, extra_params=None):
super(_BucketIterator, self).__init__(client=client, path='/b',
extra_params=extra_params)
def get_items_from_response(self, response):
"""Factory method which yields :class:`.Bucket` items from a response.
:type response: dict
:param response: The JSON API response for a page of buckets.
"""
for item in response.get('items', []):
name = item.get('name')
bucket = Bucket(self.client, name)
bucket._set_properties(item)
yield bucket
| |
import re
import numpy as np
def parse_file(self):
if self.prog == "GAUSSIAN":
parse_file_gaussian(self)
elif self.prog == "CQ":
parse_file_cq(self)
def parse_file_cq(self):
# All CQ quantities are in AU
# Parse AppliedField
FieldData = np.genfromtxt(self.fieldFile,delimiter = ',')
FieldData = np.delete(FieldData,0,0)
self.time = np.asarray(FieldData[:,0])
self.electricField.x = np.asarray(FieldData[:,1])
self.electricField.y = np.asarray(FieldData[:,2])
self.electricField.z = np.asarray(FieldData[:,3])
self.total_steps = len(self.time)
if self.total_steps:
self.step_size = self.time[1] - self.time[0]
# Parse Dipole (also has energy)
DipoleData = np.genfromtxt(self.dipoleFile,delimiter = ',')
DipoleData = np.delete(DipoleData,0,0)
self.energy = np.asarray(DipoleData[:,1])
self.electricDipole.x = np.asarray(DipoleData[:,2])*0.393456
self.electricDipole.y = np.asarray(DipoleData[:,3])*0.393456
self.electricDipole.z = np.asarray(DipoleData[:,4])*0.393456
def parse_file_gaussian(self):
"""Extract important attributes from the Gaussian realtime logfile."""
filename = self.logfile
lines = [line.rstrip('\n') for line in open(filename)]
muX = []
muY = []
muZ = []
mX = []
mY = []
mZ = []
eX = []
eY = []
eZ = []
bX = []
bY = []
bZ = []
t = []
en = []
#FIXME: FOR H2+ RABI ONLY
HOMO= []
LUMO= []
for idx, line in enumerate(lines):
r = re.findall(r'5/.*/12',line)
if line[1:26] == 'External field Parameters':
self.envelope['Field'] = True
for jdx in range(1,16):
# control for newlines (length zero)
#print lines[idx+jdx].split()
if not len(lines[idx+jdx]):
continue
elif 'Envelope' in lines[idx+jdx].split()[0]:
self.envelope['Envelope'] = lines[idx+jdx].split()[2] # string
elif 'Gauge' in lines[idx+jdx].split()[0]:
self.envelope['Gauge'] = lines[idx+jdx].split()[2] # string
elif 'Ex' in lines[idx+jdx].split()[0]:
self.envelope['Ex'] = float(lines[idx+jdx].split()[2]) # au
elif 'Ey' in lines[idx+jdx].split()[0]:
self.envelope['Ey'] = float(lines[idx+jdx].split()[2]) # au
elif 'Ez' in lines[idx+jdx].split()[0]:
self.envelope['Ez'] = float(lines[idx+jdx].split()[2]) # au
elif 'Bx' in lines[idx+jdx].split()[0]:
self.envelope['Bx'] = float(lines[idx+jdx].split()[2]) # au
elif 'By' in lines[idx+jdx].split()[0]:
self.envelope['By'] = float(lines[idx+jdx].split()[2]) # au
elif 'Bz' in lines[idx+jdx].split()[0]:
self.envelope['Bz'] = float(lines[idx+jdx].split()[2]) # au
elif 'Range' in lines[idx+jdx].split()[0]:
self.envelope['Sigma'] = float(lines[idx+jdx].split()[5]) # au
elif 'Frequency' in lines[idx+jdx].split()[0]:
self.envelope['Frequency'] = float(lines[idx+jdx].split()[2]) # au
elif 'Phase' in lines[idx+jdx].split()[0]:
self.envelope['Phase'] = float(lines[idx+jdx].split()[2]) # au
elif 't(on)' in lines[idx+jdx].split()[0]:
self.envelope['TOn'] = float(lines[idx+jdx].split()[2]) # au
elif 't(off)' in lines[idx+jdx].split()[0]:
# Exception to fix user setting Toff to obscenely large values
try:
self.envelope['TOff'] = float(lines[idx+jdx].split()[2]) # au
except ValueError:
self.envelope['TOff'] = 100000000.000 # au
elif 'Terms' in lines[idx+jdx].split()[0]:
self.envelope['Terms'] = lines[idx+jdx].split()[3:] # multistring
#break
elif line[1:27] == 'No external field applied.':
self.envelope['Field'] = False
elif r:
iops = r[0].split('/')[1:-1][0].split(',')
for iop in iops:
key = iop.split('=')[0]
val = iop.split('=')[1]
self.iops[key] = [val]
elif line[1:33] == ' Number of steps =':
self.total_steps = int(lines[idx].split()[4])
elif line[1:33] == ' Step size =':
self.step_size = float(lines[idx].split()[3])
elif line[1:33] == ' Orthonormalization method =':
self.orthonorm = lines[idx].split()[3]
elif line[1:34] == 'Alpha orbital occupation numbers:':
#FIXME ONLY FOR H2+ RABI
HOMO.append(float(lines[idx+1].split()[0]))
try:
LUMO.append(float(lines[idx+1].split()[1]))
except IndexError:
LUMO.append(0.0)
elif line[1:7] == 'Time =':
time = line.split()
t.append(float(time[2]))
elif line[1:22] == 'Dipole Moment (Debye)':
dipole = lines[idx+1].split()
muX.append(float(dipole[1])*0.393456)
muY.append(float(dipole[3])*0.393456)
muZ.append(float(dipole[5])*0.393456)
elif line[1:31] == 'Magnetic Dipole Moment (a.u.):':
dipole = lines[idx+1].split()
mX.append(float(dipole[1]))
mY.append(float(dipole[3]))
mZ.append(float(dipole[5]))
elif line[1:9] == 'Energy =':
energy = line.split()
en.append(float(energy[2]))
elif line[1:38] == 'Current electromagnetic field (a.u.):':
efield = lines[idx+1].split()
bfield = lines[idx+2].split()
eX.append(float(efield[1]))
eY.append(float(efield[3]))
eZ.append(float(efield[5]))
bX.append(float(bfield[1]))
bY.append(float(bfield[3]))
bZ.append(float(bfield[5]))
elif line[1:27] == ' Restart MMUT every':
self.mmut_restart = line.split()[3]
# Save to object, if it exists
if(muX and muY and muZ):
self.electricDipole.x = np.asarray(muX)
self.electricDipole.y = np.asarray(muY)
self.electricDipole.z = np.asarray(muZ)
if(mX and mY and mZ):
self.magneticDipole.x = np.asarray(mX)
self.magneticDipole.y = np.asarray(mY)
self.magneticDipole.z = np.asarray(mZ)
if(eX and eY and eZ):
self.electricField.x = np.asarray(eX)
self.electricField.y = np.asarray(eY)
self.electricField.z = np.asarray(eZ)
if(bX and bY and bZ):
self.magneticField.x = np.asarray(bX)
self.magneticField.y = np.asarray(bY)
self.magneticField.z = np.asarray(bZ)
if(t):
self.time = np.asarray(t)
if(en):
self.energy = np.asarray(en)
#FIXME FOR H2+ RABI ONLY
if(HOMO):
self.HOMO = np.asarray(HOMO)
if(LUMO):
self.LUMO = np.asarray(LUMO)
def clean_data(self):
"""Make all the data arrays the same length, in case the log file
did not finish a full time step (e.g. you killed the job early or are
monitoring a job in progess. Furthermore, delete redundant time steps
corresponding to when MMUT restarts"""
def get_length(data):
"""Get length of array. If array is 'None', make it seem impossibly
large"""
if data.size:
return len(data)
else:
return 1e100
# if doMMUT == True, we will delete duplicate data from MMUT restart
doMMUT = False
lengths = []
for x in self.propertyarrays:
try:
# If it is an array, remove MMUT steps, and grab its length
#FIXME Not sure if MMUT steps are actually double printed in latest
if (doMMUT):
self.__dict__[x] = np.delete(self.__dict__[x],
list(range(int(self.mmut_restart)-1,
self.__dict__[x].shape[0],
int(self.mmut_restart))),
axis=0)
lengths.append(get_length(self.__dict__[x]))
except AttributeError:
try:
# Dipoles, fields, etc., are objects and we want their x/y/z
for q in ['_x','_y','_z']:
#FIXME Again, not sure about MMUT duplicates
if (doMMUT):
self.__dict__[x].__dict__[q] = \
np.delete(self.__dict__[x].__dict__[q],
list(range(int(self.mmut_restart)-1,
self.__dict__[x].__dict__[q].shape[0],
int(self.mmut_restart))),
axis=0)
lengths.append(get_length(self.__dict__[x].__dict__[q]))
except:
#print "Unknown data type: "+str(x)+str(q)
pass
self.min_length = min(lengths)
# truncate all the arrays so they are the same length
truncate(self,self.min_length)
def truncate(self,length):
""" Truncates the property arrays to a given *length* (integer) """
for x in self.propertyarrays:
try:
# If it is an array, truncate its length
self.__dict__[x] = self.__dict__[x][:length]
except TypeError:
try:
# Dipoles, fields, etc., are objects and we want their x/y/z
for q in ['_x','_y','_z']:
self.__dict__[x].__dict__[q] = \
self.__dict__[x].__dict__[q][:length]
except:
#print "Unknown data type: "+str(x)+str(q)
pass
def decode_iops(self):
for iop in self.iops:
# OLD
if iop == '132':
key = int(self.iops[iop][0])
if key == 0:
self.iops[iop].append('Ehrenfest: do 10 Microiterations')
elif key < 0:
self.iops[iop].append('Ehrenfest: Frozen Nuclei')
else:
self.iops[iop].append(str(key)+' Fock updates per nuclear step')
elif iop == '134':
key = int(self.iops[iop][0])
if key == 0:
self.iops[iop].append('0.05 au step size')
else:
self.iops[iop].append(str(key*0.00001)+' au step size')
elif iop == '133':
key = int(self.iops[iop][0])
if (key % 10) == 0:
self.iops[iop].append('First call to l512')
elif (key % 10) == 1:
self.iops[iop].append('First call to l512')
elif (key % 10) == 2:
self.iops[iop].append('Not first call to l512')
elif iop == '177':
key = int(self.iops[iop][0])
if key == 0:
self.iops[iop].append('Propagation for 50 steps')
else:
self.iops[iop].append('Propagation for '+str(abs(key))+' steps')
elif iop == '136':
key = int(self.iops[iop][0])
if key == 0:
self.iops[iop].append('Lowdin')
elif key == 1:
self.iops[iop].append('Lowdin')
elif key == 2:
self.iops[iop].append('Cholesky')
elif iop == '137':
key = int(self.iops[iop][0])
if key == 0:
self.iops[iop].append('')
else:
self.iops[iop].append('')
elif iop == '138':
key = int(self.iops[iop][0])
if key == 0:
self.iops[iop].append('No external field')
if (key % 1000 % 10) == 1:
self.iops[iop].append('Electric Dipole')
if (key % 1000 % 100)/10 == 1:
self.iops[iop].append('Electric Quadrupole')
if (key % 1000 % 1000)/100 == 1:
self.iops[iop].append('Magnetic Dipole')
if (key // 1000) == 1:
self.iops[iop].append('Velocity Gauge')
else:
self.iops[iop].append('Length Gauge')
elif iop == '139':
key = int(self.iops[iop][0])
if key == 0:
self.iops[iop].append('')
else:
self.iops[iop].append('')
elif iop == '140':
key = int(self.iops[iop][0])
if key == -1:
self.iops[iop].append('Overlay 6 Pop at very end')
elif key == 0:
self.iops[iop].append('Overlay 6 Pop every 50 steps')
else:
self.iops[iop].append('Overlay 6 Pop every '+str(key)+' steps')
elif iop == '141':
key = int(self.iops[iop][0])
if key == -1:
self.iops[iop].append('No additional print')
elif (key % 10) == 1:
self.iops[iop].append('Print orbital occu. num')
elif (key % 10) == 2:
self.iops[iop].append('Print orbital energy + orbital occu. num')
elif (key % 100)/10 == 1:
self.iops[iop].append('Print electron density difference')
elif (key % 100)/100 == 1:
self.iops[iop].append('Debug print')
elif iop == '142':
key = int(self.iops[iop][0])
if key == 0:
self.iops[iop].append('Print every step')
else:
self.iops[iop].append('Print every '+str(key)+' steps')
elif iop == '143':
key = int(self.iops[iop][0])
if key <= 0:
self.iops[iop].append('Do not restart MMUT')
elif key == 0:
self.iops[iop].append('Restart MMUT every 50 steps')
else:
self.iops[iop].append('Restart MMUT every '+str(key)+' steps')
elif iop == '144':
key = int(self.iops[iop][0])
if key == 0:
self.iops[iop].append('Print HOMO-6 to LUMO+10')
elif key == -1:
self.iops[iop].append('Print all orbitals')
else:
self.iops[iop].append('Print HOMO-6*N to LUMO+6*N+4')
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# name: file.py
# author: Harold Bradley III
# email: harold@bradleystudio.net
# created on: 11/04/2015
#
# pylint: disable=no-member,line-too-long
"""
ext_pylib.files.file
~~~~~~~~~~~~~~~~~~~~
A class to manage and create files. Also includes three mixin classes Parsable,
Section, and Template.
"""
from __future__ import absolute_import, print_function, unicode_literals
from os import remove
import re
from .dir import Dir
from .node import Node
from ..input import prompt
from ..meta import setdynattr
class File(Node):
"""An class to manage a file's permissions, ownership, and path. Extends Node.
See Node class for atts to pass in at init.
The Section mixin adds methods useful for processing template section
files. A section file is a template of a configuration file that only
represents a particular section of that file. It begins and ends with a
delineator (for example: ## START:SECTION_NAME ## and ## END:SECTION_NAME
##). A use case would be how WordPress delineates a particular section of
the htaccess file in its root directory with a start line and an end line.
This is a section of the full htaccess file and could be managed by a
Section mixin.
The Template mixin adds a method useful for processing a regular template
file: apply_using(). It assumes that the file contains placeholder text to
be replaced by actual data. The placeholders and actual data are passsed
into the method as a dict. The resulting data is returned (presumably to be
saved in another file.)
The Parsable mixin adds a method useful for parsing (presumably)
configuration files. It takes a dict of attribute names and regexes to be
used. When setup_parsing() is called, a dynamic property is created for
getting and setting a value in self.data based on the regex.
:param atts: See notes in node.py
Usage::
>>> from ext_pylib.files import File
>>> a_file = File({'path' : '/the/path/file', 'perms' : 0o600, 'owner' : 'root', 'group' : 'root'})
>>> a_file.path
'/the/path/file'
>>> a_file.read()
'The data...
"""
def __init__(self, atts=None):
"""Initializes a new File instance."""
super(File, self).__init__(atts)
self.data = '' # Initialize data as an empty string.
def __str__(self):
"""Returns a string with the path."""
if not self.path:
return '<file.File:stub>'
return self.path
def create(self, data=None): # pylint: disable=arguments-differ
"""Creates the file/directory."""
# pylint: disable=attribute-defined-outside-init
if not self.path: # For stubs, just return True
return True
if self.exists():
print(self.path + ' already exists.')
if not prompt('Replace it?', False):
return False
print('Creating ' + self.path + '... ', end=' ')
# Create parent directories
if not self.parent_dir.exists():
try:
print('')
self.parent_dir.create()
except Exception as error: # pylint: disable=broad-except
print('[ERROR]')
print(error)
# Create the file
try:
file_handle = open(self.path, 'w')
if data: # If data was passed or data exists, write it.
self.data = data
if getattr(self, 'data', None):
self.write(self.data, False, file_handle)
file_handle.close()
print('[OK]')
except Exception as error: # pylint: disable=broad-except
print('[ERROR]')
print(error)
return False
return all([self.chmod(), self.chown()])
def remove(self, ask=True): # pylint: disable=arguments-differ
"""Removes the file/directory."""
if not self.path:
return True
if not self.exists():
print(self.path + ' doesn\'t exist.')
return True
if not ask or prompt('Remove ' + self.path + '?'):
remove(self.path)
return True
def read(self, flush_memory=False):
"""Returns the contents of the file.
If the file doesn't exist, returns an empty string.
Note that method first attempts to return the contents as in memory
(which might differ from what is on disk)."""
if flush_memory: # Empty memory to force reading from disk
self.data = ''
if self.data != '':
return self.data
if not self.exists(): # If no data in memory and doesn't exist,
self.data = '' # return an empty string.
return self.data
try: # Otherwise, try to read the file
file_handle = open(self.path, 'r')
self.data = file_handle.read()
file_handle.close()
return self.data
except Exception: # pylint: disable=broad-except
print('[ERROR]')
raise
def readlines(self):
"""Returns the contents of the file as a list for iteration."""
return self.read().split('\n')
def write(self, data=None, append=True, handle=None):
"""Writes data to the file."""
# pylint: disable=attribute-defined-outside-init
if data:
self.data = data # Keep the data we're saving in memory.
else:
if self.data == '':
raise UnboundLocalError('Must pass data to write method of File class.')
else:
data = self.data
try:
if handle: # When passed a handle, rely on the caller to open.close the file
file_handle = handle
file_handle.write(data)
else:
flags = 'a' if append else 'w'
file_handle = open(self.path, flags)
file_handle.write(data)
file_handle.close()
return True
except Exception: # pylint: disable=broad-except
print('[ERROR]')
return False
def append(self, data, handle=None):
"""Appends the file with data. Just a wrapper."""
return self.write(data, True, handle)
def overwrite(self, data, handle=None):
"""Overwrites the file with data. Just a wrapper."""
return self.write(data, False, handle)
@Node.path.setter
def path(self, path):
"""Sets the path."""
# Check for None
if path is None:
return
# File cannot end in '/'
if path.endswith('/'):
raise ValueError('"path" cannot end in "/" in a file.File class.')
Node.path.fset(self, path)
@property
def parent_dir(self):
"""Returns a Dir instance representing the parent directory."""
return Dir(self.parent_node.get_atts())
class Section(object):
"""A mixin class to work with a section template file.
See Node class for atts to pass in at init.
:param atts: See notes in node.py
Usage::
>>> from ext_pylib.files import File, Section
>>> class SectionFile(Section, File): pass
>>> a_file = SectionFile({'path' : '/the/path/file', 'perms' : 0o600, 'owner' : 'root', 'group' : 'root'})
>>> a_file.is_applied(File({'path' : '/another/path/file'}).read())
True
"""
def is_applied(self, data):
"""Returns true if data has this section applied exactly."""
return self.read() in data
def is_in(self, data):
"""Returns true if data has the section, whether or not it is applied
exactly."""
# pylint: disable=attribute-defined-outside-init
self._start_pos = data.find(self.start_section)
self._end_pos = data.find(self.end_section)
if self._start_pos < 0 and self._end_pos < 0:
return False
elif self._start_pos < self._end_pos:
return True
else:
raise ValueError('Data passed to is_in() not formatted properly.')
def apply_to(self, data, overwrite=False):
"""Returns a string in which the section is applied to the data."""
if self.is_applied(data):
return data
if self.is_in(data):
if overwrite:
return data[:self._start_pos] + self.read() + '\n' + \
data[self._end_pos + len(self.end_section) + 1:]
else:
raise ValueError('[WARN] Section already exists, but overwrite flag was not set.')
else:
return data + '\n' + self.read() + '\n'
@property
def start_section(self):
"""Returns the string that denotes the start of the section."""
if self.read() == '':
raise EOFError('Section file has no data')
return self.readlines()[0]
@property
def end_section(self):
"""Returns the string that denotes the end of the section."""
if self.read() == '':
raise EOFError('Section file has no data')
lines = self.readlines()
if len(lines) < 2:
raise ValueError('Not a valid section file.')
if lines[-1] != '': # If the last line is blank, use the line before it.
return lines[-1]
return lines[-2]
class SectionFile(Section, File):
"""A File class implementing the Section Mixin."""
class Template(object): # pylint: disable=too-few-public-methods
"""A mixin to work with a template file with placeholders.
See Node class for atts to pass in at init.
:param atts: See notes in node.py
Usage::
>>> from ext_pylib.files import File, Template
>>> class TemplateFile(Template, File): pass
>>> a_file = File({'path' : '/the/path/file', 'perms' : 0o600, 'owner' : 'root', 'group' : 'root'})
>>> a_file.apply_using({'placeholder' : 'value'})
The data...
"""
def apply_using(self, placeholders):
"""Returns a string with placeholders replaced.
Takes a dict of placeholders and values to replace."""
data = self.read() # temp, throw-away (after returning) data value
for placeholder, value in list(placeholders.items()):
data = data.replace(placeholder, value)
return data
class TemplateFile(Template, File):
"""A File class implementing the Template Mixin."""
class Parsable(object):
"""A mixin to be used with a File class to allow parsing.
See Node class for atts to pass in at init.
:param atts: See notes in node.py
Usage::
>>> from ext_pylib.files import File, Parsable
>>> class ParsableFile(Parsable, File): pass
>>> a_file = File({'path' : '/the/path/file', 'perms' : 0o600, 'owner' : 'root', 'group' : 'root'})
>>> a_file.setup_parsing('htdocs' : 'DocumentRoot (.*)')
>>> a_file.htdocs
'example.com'
"""
def setup_parsing(self, regexes=None):
"""Takes a dict of name:regex to parse self.data with.
regex is either a string, a tuple of one, or a tuple of two with the
second element being the regex mask used for assigning a new value
to this property. It must contain '{}' to be the marker for the
placeholder of the new value."""
if not regexes:
regexes = self.regexes
for attribute, regex in regexes.items():
att = getattr(self.__class__, attribute, None)
if hasattr(self, attribute) and \
not att.__class__.__name__ == 'DynamicProperty':
raise AttributeError('Attribute "' + attribute + \
'" already in use.')
self.create_parseable_attr(attribute, regex)
def create_parseable_attr(self, attribute, regex_tuple):
"""Creates a dynamic attribure on the Parsable class.
This dynamically creates a property with a getter and setter. The regex
is a closure. Each time the attribute is accessed, the regex is run
against the data in memory. When the attribute is set to a new value,
this value is changed in memory. file.write() must be called to write
the changes to memory.
NOTE: Because these are dynamic properties they are on the Class NOT
the instance. This can cause difficult-to-find bugs when using the
class with mulitple regexes and the same named attributes. Be sure to
call setup_parsing() every time you change these regexes (expecially
when changing to a regex/mask tuple or just a regex string).
"""
if isinstance(regex_tuple, tuple):
if len(regex_tuple) == 2:
regex, mask = regex_tuple
else:
regex, mask = regex_tuple[0], '{}'
else:
regex, mask = regex_tuple, '{}'
def getter_func(self):
"""Parsable dynamic attribute getter function."""
results = re.findall(regex, self.read())
if not results:
return None
elif len(results) == 1:
return results[0]
return results
def setter_func(self, value):
"""Parsable dynamic attribute setter function.
Note that this is only changing the value in memory. You must call
write()."""
if not re.findall(regex, self.read()):
# If the value doesn't exist, add it to the end of data
self.data = self.data + '\n' + mask.format(value)
else: # otherwise just change it everywhere it exists
self.data = re.sub(regex, mask.format(value), self.read())
setdynattr(self, attribute, getter_func, setter_func)
class ParsableFile(Parsable, File):
"""A File class implementing the Parsable Mixin."""
| |
# Copyright 2013, Mirantis Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from django.utils.datastructures import SortedDict
from horizon.utils.memoized import memoized # noqa
from openstack_dashboard.api import neutron
neutronclient = neutron.neutronclient
class IKEPolicy(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron VPN IKEPolicy."""
def __init__(self, apiresource):
super(IKEPolicy, self).__init__(apiresource)
class IPSecPolicy(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron VPN IPSecPolicy."""
def __init__(self, apiresource):
super(IPSecPolicy, self).__init__(apiresource)
class IPSecSiteConnection(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron IPSecSiteConnection."""
def __init__(self, apiresource):
super(IPSecSiteConnection, self).__init__(apiresource)
class VPNService(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron VPNService."""
def __init__(self, apiresource):
super(VPNService, self).__init__(apiresource)
def vpnservice_create(request, **kwargs):
"""Create VPNService
:param request: request context
:param admin_state_up: admin state (default on)
:param name: name for VPNService
:param description: description for VPNService
:param router_id: router id for router of VPNService
:param subnet_id: subnet id for subnet of VPNService
"""
body = {'vpnservice':
{'admin_state_up': kwargs['admin_state_up'],
'name': kwargs['name'],
'description': kwargs['description'],
'router_id': kwargs['router_id'],
'subnet_id': kwargs['subnet_id']}
}
vpnservice = neutronclient(request).create_vpnservice(body).get(
'vpnservice')
return VPNService(vpnservice)
def vpnservice_list(request, **kwargs):
return _vpnservice_list(request, expand_subnet=True, expand_router=True,
expand_conns=True, **kwargs)
def _vpnservice_list(request, expand_subnet=False, expand_router=False,
expand_conns=False, **kwargs):
vpnservices = neutronclient(request).list_vpnservices(
**kwargs).get('vpnservices')
if expand_subnet:
subnets = neutron.subnet_list(request)
subnet_dict = SortedDict((s.id, s) for s in subnets)
for s in vpnservices:
s['subnet_name'] = subnet_dict.get(s['subnet_id']).cidr
if expand_router:
routers = neutron.router_list(request)
router_dict = SortedDict((r.id, r) for r in routers)
for s in vpnservices:
s['router_name'] = router_dict.get(s['router_id']).name_or_id
if expand_conns:
ipsecsiteconns = _ipsecsiteconnection_list(request, **kwargs)
for s in vpnservices:
s['ipsecsiteconns'] = [c.id for c in ipsecsiteconns
if c.vpnservice_id == s['id']]
return [VPNService(v) for v in vpnservices]
def vpnservice_get(request, vpnservice_id):
return _vpnservice_get(request, vpnservice_id, expand_subnet=True,
expand_router=True, expand_conns=True)
def _vpnservice_get(request, vpnservice_id, expand_subnet=False,
expand_router=False, expand_conns=False):
vpnservice = neutronclient(request).show_vpnservice(vpnservice_id).get(
'vpnservice')
if expand_subnet:
vpnservice['subnet'] = neutron.subnet_get(
request, vpnservice['subnet_id'])
if expand_router:
vpnservice['router'] = neutron.router_get(
request, vpnservice['router_id'])
if expand_conns:
ipsecsiteconns = _ipsecsiteconnection_list(request)
vpnservice['ipsecsiteconns'] = [c for c in ipsecsiteconns
if c.vpnservice_id == vpnservice['id']]
return VPNService(vpnservice)
def vpnservice_update(request, vpnservice_id, **kwargs):
vpnservice = neutronclient(request).update_vpnservice(
vpnservice_id, kwargs).get('vpnservice')
return VPNService(vpnservice)
def vpnservice_delete(request, vpnservice_id):
neutronclient(request).delete_vpnservice(vpnservice_id)
def ikepolicy_create(request, **kwargs):
"""Create IKEPolicy
:param request: request context
:param name: name for IKEPolicy
:param description: description for IKEPolicy
:param auth_algorithm: authorization algorithm for IKEPolicy
:param encryption_algorithm: encryption algorithm for IKEPolicy
:param ike_version: IKE version for IKEPolicy
:param lifetime: Lifetime Units and Value for IKEPolicy
:param pfs: Perfect Forward Secrecy for IKEPolicy
:param phase1_negotiation_mode: IKE Phase1 negotiation mode for IKEPolicy
"""
body = {'ikepolicy':
{'name': kwargs['name'],
'description': kwargs['description'],
'auth_algorithm': kwargs['auth_algorithm'],
'encryption_algorithm': kwargs['encryption_algorithm'],
'ike_version': kwargs['ike_version'],
'lifetime': kwargs['lifetime'],
'pfs': kwargs['pfs'],
'phase1_negotiation_mode': kwargs['phase1_negotiation_mode']}
}
ikepolicy = neutronclient(request).create_ikepolicy(body).get(
'ikepolicy')
return IKEPolicy(ikepolicy)
def ikepolicy_list(request, **kwargs):
return _ikepolicy_list(request, expand_conns=True, **kwargs)
def _ikepolicy_list(request, expand_conns=False, **kwargs):
ikepolicies = neutronclient(request).list_ikepolicies(
**kwargs).get('ikepolicies')
if expand_conns:
ipsecsiteconns = _ipsecsiteconnection_list(request, **kwargs)
for p in ikepolicies:
p['ipsecsiteconns'] = [c.id for c in ipsecsiteconns
if c.ikepolicy_id == p['id']]
return [IKEPolicy(v) for v in ikepolicies]
def ikepolicy_get(request, ikepolicy_id):
return _ikepolicy_get(request, ikepolicy_id, expand_conns=True)
def _ikepolicy_get(request, ikepolicy_id, expand_conns=False):
ikepolicy = neutronclient(request).show_ikepolicy(
ikepolicy_id).get('ikepolicy')
if expand_conns:
ipsecsiteconns = _ipsecsiteconnection_list(request)
ikepolicy['ipsecsiteconns'] = [c for c in ipsecsiteconns
if c.ikepolicy_id == ikepolicy['id']]
return IKEPolicy(ikepolicy)
def ikepolicy_update(request, ikepolicy_id, **kwargs):
ikepolicy = neutronclient(request).update_ikepolicy(
ikepolicy_id, kwargs).get('ikepolicy')
return IKEPolicy(ikepolicy)
def ikepolicy_delete(request, ikepolicy_id):
neutronclient(request).delete_ikepolicy(ikepolicy_id)
def ipsecpolicy_create(request, **kwargs):
"""Create IPSecPolicy
:param request: request context
:param name: name for IPSecPolicy
:param description: description for IPSecPolicy
:param auth_algorithm: authorization algorithm for IPSecPolicy
:param encapsulation_mode: encapsulation mode for IPSecPolicy
:param encryption_algorithm: encryption algorithm for IPSecPolicy
:param lifetime: Lifetime Units and Value for IPSecPolicy
:param pfs: Perfect Forward Secrecy for IPSecPolicy
:param transform_protocol: Transform Protocol for IPSecPolicy
"""
body = {'ipsecpolicy':
{'name': kwargs['name'],
'description': kwargs['description'],
'auth_algorithm': kwargs['auth_algorithm'],
'encapsulation_mode': kwargs['encapsulation_mode'],
'encryption_algorithm': kwargs['encryption_algorithm'],
'lifetime': kwargs['lifetime'],
'pfs': kwargs['pfs'],
'transform_protocol': kwargs['transform_protocol']}
}
ipsecpolicy = neutronclient(request).create_ipsecpolicy(body).get(
'ipsecpolicy')
return IPSecPolicy(ipsecpolicy)
def ipsecpolicy_list(request, **kwargs):
return _ipsecpolicy_list(request, expand_conns=True, **kwargs)
def _ipsecpolicy_list(request, expand_conns=False, **kwargs):
ipsecpolicies = neutronclient(request).list_ipsecpolicies(
**kwargs).get('ipsecpolicies')
if expand_conns:
ipsecsiteconns = _ipsecsiteconnection_list(request, **kwargs)
for p in ipsecpolicies:
p['ipsecsiteconns'] = [c.id for c in ipsecsiteconns
if c.ipsecpolicy_id == p['id']]
return [IPSecPolicy(v) for v in ipsecpolicies]
def ipsecpolicy_get(request, ipsecpolicy_id):
return _ipsecpolicy_get(request, ipsecpolicy_id, expand_conns=True)
def _ipsecpolicy_get(request, ipsecpolicy_id, expand_conns=False):
ipsecpolicy = neutronclient(request).show_ipsecpolicy(
ipsecpolicy_id).get('ipsecpolicy')
if expand_conns:
ipsecsiteconns = _ipsecsiteconnection_list(request)
ipsecpolicy['ipsecsiteconns'] = [c for c in ipsecsiteconns
if (c.ipsecpolicy_id ==
ipsecpolicy['id'])]
return IPSecPolicy(ipsecpolicy)
def ipsecpolicy_update(request, ipsecpolicy_id, **kwargs):
ipsecpolicy = neutronclient(request).update_ipsecpolicy(
ipsecpolicy_id, kwargs).get('ipsecpolicy')
return IPSecPolicy(ipsecpolicy)
def ipsecpolicy_delete(request, ipsecpolicy_id):
neutronclient(request).delete_ipsecpolicy(ipsecpolicy_id)
def ipsecsiteconnection_create(request, **kwargs):
"""Create IPSecSiteConnection
:param request: request context
:param name: name for IPSecSiteConnection
:param description: description for IPSecSiteConnection
:param dpd: dead peer detection action, interval and timeout
:param ikepolicy_id: IKEPolicy associated with this connection
:param initiator: initiator state
:param ipsecpolicy_id: IPsecPolicy associated with this connection
:param mtu: MTU size for the connection
:param peer_address: Peer gateway public address
:param peer_cidrs: remote subnet(s) in CIDR format
:param peer_id: Peer router identity for authentication"
:param psk: Pre-Shared Key string
:param vpnservice_id: VPNService associated with this connection
:param admin_state_up: admin state (default on)
"""
body = {'ipsec_site_connection':
{'name': kwargs['name'],
'description': kwargs['description'],
'dpd': kwargs['dpd'],
'ikepolicy_id': kwargs['ikepolicy_id'],
'initiator': kwargs['initiator'],
'ipsecpolicy_id': kwargs['ipsecpolicy_id'],
'mtu': kwargs['mtu'],
'peer_address': kwargs['peer_address'],
'peer_cidrs': kwargs['peer_cidrs'],
'peer_id': kwargs['peer_id'],
'psk': kwargs['psk'],
'vpnservice_id': kwargs['vpnservice_id'],
'admin_state_up': kwargs['admin_state_up']}
}
ipsecsiteconnection = neutronclient(request).create_ipsec_site_connection(
body).get('ipsec_site_connection')
return IPSecSiteConnection(ipsecsiteconnection)
@memoized
def ipsecsiteconnection_list(request, **kwargs):
return _ipsecsiteconnection_list(request, expand_ikepolicies=True,
expand_ipsecpolicies=True,
expand_vpnservices=True, **kwargs)
@memoized
def _ipsecsiteconnection_list(request, expand_ikepolicies=False,
expand_ipsecpolicies=False,
expand_vpnservices=False, **kwargs):
ipsecsiteconnections = neutronclient(request).list_ipsec_site_connections(
**kwargs).get('ipsec_site_connections')
if expand_ikepolicies:
ikepolicies = _ikepolicy_list(request, **kwargs)
policy_dict = SortedDict((p.id, p) for p in ikepolicies)
for c in ipsecsiteconnections:
c['ikepolicy_name'] = policy_dict.get(c['ikepolicy_id']).name_or_id
if expand_ipsecpolicies:
ipsecpolicies = _ipsecpolicy_list(request, **kwargs)
policy_dict = SortedDict((p.id, p) for p in ipsecpolicies)
for c in ipsecsiteconnections:
c['ipsecpolicy_name'] = policy_dict.get(c['ipsecpolicy_id']
).name_or_id
if expand_vpnservices:
vpnservices = _vpnservice_list(request, **kwargs)
service_dict = SortedDict((s.id, s) for s in vpnservices)
for c in ipsecsiteconnections:
c['vpnservice_name'] = service_dict.get(c['vpnservice_id']
).name_or_id
return [IPSecSiteConnection(v) for v in ipsecsiteconnections]
def ipsecsiteconnection_get(request, ipsecsiteconnection_id):
return _ipsecsiteconnection_get(request, ipsecsiteconnection_id,
expand_ikepolicies=True,
expand_ipsecpolicies=True,
expand_vpnservices=True)
def _ipsecsiteconnection_get(request, ipsecsiteconnection_id,
expand_ikepolicies, expand_ipsecpolicies,
expand_vpnservices):
ipsecsiteconnection = neutronclient(request).show_ipsec_site_connection(
ipsecsiteconnection_id).get('ipsec_site_connection')
if expand_ikepolicies:
ipsecsiteconnection['ikepolicy'] = _ikepolicy_get(
request, ipsecsiteconnection['ikepolicy_id'])
if expand_ipsecpolicies:
ipsecsiteconnection['ipsecpolicy'] = _ipsecpolicy_get(
request, ipsecsiteconnection['ipsecpolicy_id'])
if expand_vpnservices:
ipsecsiteconnection['vpnservice'] = _vpnservice_get(
request, ipsecsiteconnection['vpnservice_id'])
return IPSecSiteConnection(ipsecsiteconnection)
def ipsecsiteconnection_update(request, ipsecsiteconnection_id, **kwargs):
ipsecsiteconnection = neutronclient(request).update_ipsec_site_connection(
ipsecsiteconnection_id, kwargs).get('ipsec_site_connection')
return IPSecSiteConnection(ipsecsiteconnection)
def ipsecsiteconnection_delete(request, ipsecsiteconnection_id):
neutronclient(request).delete_ipsec_site_connection(ipsecsiteconnection_id)
| |
# -*- coding: utf-8 -*-
""" General approach to calucations """
import math
from dynamic_dynamodb.log_handler import LOGGER as logger
def decrease_reads_in_percent(
current_provisioning, percent, min_provisioned_reads, log_tag):
""" Decrease the current_provisioning with percent %
:type current_provisioning: int
:param current_provisioning: The current provisioning
:type percent: int
:param percent: How many percent should we decrease with
:type min_provisioned_reads: int
:param min_provisioned_reads: Configured min provisioned reads
:type log_tag: str
:param log_tag: Prefix for the log
:returns: int -- New provisioning value
"""
percent = float(percent)
decrease = int(float(current_provisioning)*(float(percent)/100))
updated_provisioning = current_provisioning - decrease
min_provisioned_reads = __get_min_reads(
current_provisioning,
min_provisioned_reads,
log_tag)
if updated_provisioning < min_provisioned_reads:
logger.info(
'{0} - Reached provisioned reads min limit: {1:d}'.format(
log_tag, int(min_provisioned_reads)))
return min_provisioned_reads
logger.debug(
'{0} - Read provisioning will be decreased to {1:d} units'.format(
log_tag, int(updated_provisioning)))
return updated_provisioning
def decrease_reads_in_units(
current_provisioning, units, min_provisioned_reads, log_tag):
""" Decrease the current_provisioning with units units
:type current_provisioning: int
:param current_provisioning: The current provisioning
:type units: int
:param units: How many units should we decrease with
:returns: int -- New provisioning value
:type min_provisioned_reads: int
:param min_provisioned_reads: Configured min provisioned reads
:type log_tag: str
:param log_tag: Prefix for the log
"""
updated_provisioning = int(current_provisioning) - int(units)
min_provisioned_reads = __get_min_reads(
current_provisioning,
min_provisioned_reads,
log_tag)
if updated_provisioning < min_provisioned_reads:
logger.info(
'{0} - Reached provisioned reads min limit: {1:d}'.format(
log_tag,
int(min_provisioned_reads)))
return min_provisioned_reads
logger.debug(
'{0} - Read provisioning will be decreased to {1:d} units'.format(
log_tag,
int(updated_provisioning)))
return updated_provisioning
def decrease_writes_in_percent(
current_provisioning, percent, min_provisioned_writes, log_tag):
""" Decrease the current_provisioning with percent %
:type current_provisioning: int
:param current_provisioning: The current provisioning
:type percent: int
:param percent: How many percent should we decrease with
:returns: int -- New provisioning value
:type min_provisioned_writes: int
:param min_provisioned_writes: Configured min provisioned writes
:type log_tag: str
:param log_tag: Prefix for the log
"""
percent = float(percent)
decrease = int(float(current_provisioning)*(float(percent)/100))
updated_provisioning = current_provisioning - decrease
min_provisioned_writes = __get_min_writes(
current_provisioning,
min_provisioned_writes,
log_tag)
if updated_provisioning < min_provisioned_writes:
logger.info(
'{0} - Reached provisioned writes min limit: {1:d}'.format(
log_tag,
int(min_provisioned_writes)))
return min_provisioned_writes
logger.debug(
'{0} - Write provisioning will be decreased to {1:d} units'.format(
log_tag,
int(updated_provisioning)))
return updated_provisioning
def decrease_writes_in_units(
current_provisioning, units, min_provisioned_writes, log_tag):
""" Decrease the current_provisioning with units units
:type current_provisioning: int
:param current_provisioning: The current provisioning
:type units: int
:param units: How many units should we decrease with
:returns: int -- New provisioning value
:type min_provisioned_writes: int
:param min_provisioned_writes: Configured min provisioned writes
:type log_tag: str
:param log_tag: Prefix for the log
"""
updated_provisioning = int(current_provisioning) - int(units)
min_provisioned_writes = __get_min_writes(
current_provisioning,
min_provisioned_writes,
log_tag)
if updated_provisioning < min_provisioned_writes:
logger.info(
'{0} - Reached provisioned writes min limit: {1:d}'.format(
log_tag,
int(min_provisioned_writes)))
return min_provisioned_writes
logger.debug(
'{0} - Write provisioning will be decreased to {1:d} units'.format(
log_tag,
int(updated_provisioning)))
return updated_provisioning
def increase_reads_in_percent(
current_provisioning, percent, max_provisioned_reads,
consumed_read_units_percent, log_tag):
""" Increase the current_provisioning with percent %
:type current_provisioning: int
:param current_provisioning: The current provisioning
:type percent: int
:param percent: How many percent should we increase with
:type max_provisioned_reads: int
:param max_provisioned_reads: Configured max provisioned reads
:returns: int -- New provisioning value
:type consumed_read_units_percent: float
:param consumed_read_units_percent: Number of consumed read units
:type log_tag: str
:param log_tag: Prefix for the log
"""
current_provisioning = float(current_provisioning)
consumed_read_units_percent = float(consumed_read_units_percent)
percent = float(percent)
consumption_based_current_provisioning = \
float(math.ceil(current_provisioning*(consumed_read_units_percent/100)))
if consumption_based_current_provisioning > current_provisioning:
increase = int(
math.ceil(consumption_based_current_provisioning*(percent/100)))
updated_provisioning = consumption_based_current_provisioning + increase
else:
increase = int(math.ceil(current_provisioning*(percent/100)))
updated_provisioning = current_provisioning + increase
if max_provisioned_reads > 0:
if updated_provisioning > max_provisioned_reads:
logger.info(
'{0} - Reached provisioned reads max limit: {1}'.format(
log_tag,
max_provisioned_reads))
return max_provisioned_reads
logger.debug(
'{0} - Read provisioning will be increased to {1} units'.format(
log_tag,
updated_provisioning))
return updated_provisioning
def increase_reads_in_units(
current_provisioning, units, max_provisioned_reads,
consumed_read_units_percent, log_tag):
""" Increase the current_provisioning with units units
:type current_provisioning: int
:param current_provisioning: The current provisioning
:type units: int
:param units: How many units should we increase with
:returns: int -- New provisioning value
:type max_provisioned_reads: int
:param max_provisioned_reads: Configured max provisioned reads
:returns: int -- New provisioning value
:type consumed_read_units_percent: float
:param consumed_read_units_percent: Number of consumed read units
:type log_tag: str
:param log_tag: Prefix for the log
"""
units = int(units)
current_provisioning = float(current_provisioning)
consumed_read_units_percent = float(consumed_read_units_percent)
consumption_based_current_provisioning = \
int(math.ceil(current_provisioning*(consumed_read_units_percent/100)))
if consumption_based_current_provisioning > current_provisioning:
updated_provisioning = consumption_based_current_provisioning + units
else:
updated_provisioning = int(current_provisioning) + units
if max_provisioned_reads > 0:
if updated_provisioning > max_provisioned_reads:
logger.info(
'{0} - Reached provisioned reads max limit: {1}'.format(
log_tag,
max_provisioned_reads))
return max_provisioned_reads
logger.debug(
'{0} - Read provisioning will be increased to {1:d} units'.format(
log_tag,
int(updated_provisioning)))
return updated_provisioning
def increase_writes_in_percent(
current_provisioning, percent, max_provisioned_writes,
consumed_write_units_percent, log_tag):
""" Increase the current_provisioning with percent %
:type current_provisioning: int
:param current_provisioning: The current provisioning
:type percent: int
:param percent: How many percent should we increase with
:returns: int -- New provisioning value
:type max_provisioned_writes: int
:param max_provisioned_writes: Configured max provisioned writes
:type consumed_write_units_percent: float
:param consumed_write_units_percent: Number of consumed write units
:type log_tag: str
:param log_tag: Prefix for the log
"""
current_provisioning = float(current_provisioning)
consumed_write_units_percent = float(consumed_write_units_percent)
percent = float(percent)
consumption_based_current_provisioning = \
int(math.ceil(current_provisioning*(consumed_write_units_percent/100)))
if consumption_based_current_provisioning > current_provisioning:
increase = int(
math.ceil(consumption_based_current_provisioning*(percent/100)))
updated_provisioning = consumption_based_current_provisioning + increase
else:
increase = int(math.ceil(current_provisioning*(float(percent)/100)))
updated_provisioning = current_provisioning + increase
if max_provisioned_writes > 0:
if updated_provisioning > max_provisioned_writes:
logger.info(
'{0} - Reached provisioned writes max limit: {1}'.format(
log_tag,
max_provisioned_writes))
return max_provisioned_writes
logger.debug(
'{0} - Write provisioning will be increased to {1:d} units'.format(
log_tag,
int(updated_provisioning)))
return updated_provisioning
def increase_writes_in_units(
current_provisioning, units, max_provisioned_writes,
consumed_write_units_percent, log_tag):
""" Increase the current_provisioning with units units
:type current_provisioning: int
:param current_provisioning: The current provisioning
:type units: int
:param units: How many units should we increase with
:returns: int -- New provisioning value
:type max_provisioned_writes: int
:param max_provisioned_writes: Configured max provisioned writes
:type consumed_write_units_percent: float
:param consumed_write_units_percent: Number of consumed write units
:type log_tag: str
:param log_tag: Prefix for the log
"""
units = int(units)
current_provisioning = float(current_provisioning)
consumed_write_units_percent = float(consumed_write_units_percent)
consumption_based_current_provisioning = \
int(math.ceil(current_provisioning*(consumed_write_units_percent/100)))
if consumption_based_current_provisioning > current_provisioning:
updated_provisioning = consumption_based_current_provisioning + units
else:
updated_provisioning = int(current_provisioning) + units
if max_provisioned_writes > 0:
if updated_provisioning > max_provisioned_writes:
logger.info(
'{0} - Reached provisioned writes max limit: {1}'.format(
log_tag,
max_provisioned_writes))
return max_provisioned_writes
logger.debug(
'{0} - Write provisioning will be increased to {1:d} units'.format(
log_tag,
int(updated_provisioning)))
return updated_provisioning
def __get_min_reads(current_provisioning, min_provisioned_reads, log_tag):
""" Get the minimum number of reads to current_provisioning
:type current_provisioning: int
:param current_provisioning: Current provisioned reads
:type min_provisioned_reads: int
:param min_provisioned_reads: Configured min provisioned reads
:type log_tag: str
:param log_tag: Prefix for the log
:returns: int -- Minimum number of reads
"""
# Fallback value to ensure that we always have at least 1 read
reads = 1
if min_provisioned_reads:
reads = int(min_provisioned_reads)
if reads > int(current_provisioning * 2):
reads = int(current_provisioning * 2)
logger.debug(
'{0} - '
'Cannot reach min-provisioned-reads as max scale up '
'is 100% of current provisioning'.format(log_tag))
logger.debug(
'{0} - Setting min provisioned reads to {1}'.format(
log_tag, min_provisioned_reads))
return reads
def __get_min_writes(current_provisioning, min_provisioned_writes, log_tag):
""" Get the minimum number of writes to current_provisioning
:type current_provisioning: int
:param current_provisioning: Current provisioned writes
:type min_provisioned_writes: int
:param min_provisioned_writes: Configured min provisioned writes
:type log_tag: str
:param log_tag: Prefix for the log
:returns: int -- Minimum number of writes
"""
# Fallback value to ensure that we always have at least 1 read
writes = 1
if min_provisioned_writes:
writes = int(min_provisioned_writes)
if writes > int(current_provisioning * 2):
writes = int(current_provisioning * 2)
logger.debug(
'{0} - '
'Cannot reach min-provisioned-writes as max scale up '
'is 100% of current provisioning'.format(log_tag))
logger.debug(
'{0} - Setting min provisioned writes to {1}'.format(
log_tag, min_provisioned_writes))
return writes
| |
#------------------------------------------------------------------------------
# Copyright (c) 2009 Richard W. Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#------------------------------------------------------------------------------
""" Defines convenient pyparsing constructs and token converters.
References:
sparser.py by Tim Cera timcera@earthlink.net
"""
#------------------------------------------------------------------------------
# Imports:
#------------------------------------------------------------------------------
import re
import itertools
from pyparsing import \
TokenConverter, oneOf, string, Literal, Group, Word, Optional, Combine, \
sglQuotedString, dblQuotedString, restOfLine, nums, removeQuotes, Regex, \
OneOrMore, hexnums, alphas, alphanums, CaselessLiteral, And, NotAny, Or, \
White, QuotedString
from godot.common import color_schemes
#------------------------------------------------------------------------------
# Punctuation:
#------------------------------------------------------------------------------
colon = Literal(":")
lbrace = Literal("{")
rbrace = Literal("}")
lbrack = Literal("[")
rbrack = Literal("]")
lparen = Literal("(")
rparen = Literal(")")
equals = Literal("=")
comma = Literal(",")
dot = Literal(".")
slash = Literal("/")
bslash = Literal("\\")
star = Literal("*")
semi = Literal(";")
at = Literal("@")
minus = Literal("-")
pluss = Literal("+")
quote = Literal('"')# | Literal("'")
#------------------------------------------------------------------------------
# Compass point:
#------------------------------------------------------------------------------
north = CaselessLiteral("n")
northeast = CaselessLiteral("ne")
east = CaselessLiteral("e")
southeast = CaselessLiteral("se")
south = CaselessLiteral("s")
southwest = CaselessLiteral("sw")
west = CaselessLiteral("w")
northwest = CaselessLiteral("nw")
middle = CaselessLiteral("c")
underscore = CaselessLiteral("_")
compass_pt = (north | northeast | east | southeast | south | southwest |
west | northwest | middle | underscore)
#------------------------------------------------------------------------------
# Convenient pyparsing constructs.
#------------------------------------------------------------------------------
decimal_sep = "."
sign = oneOf("+ -")
scolon = Literal(";").suppress()
matlab_comment = Group(Literal('%') + restOfLine).suppress()
psse_comment = Literal('@!') + Optional(restOfLine)
# part of printables without decimal_sep, +, -
special_chars = string.replace(
'!"#$%&\'()*,./:;<=>?@[\\]^_`{|}~', decimal_sep, ""
)
#------------------------------------------------------------------------------
# "ToBoolean" class:
#------------------------------------------------------------------------------
class ToBoolean(TokenConverter):
""" Converter to make token boolean """
def postParse(self, instring, loc, tokenlist):
""" Converts the first token to boolean """
return bool(tokenlist[0])
#------------------------------------------------------------------------------
# "ToInteger" class:
#------------------------------------------------------------------------------
class ToInteger(TokenConverter):
""" Converter to make token into an integer """
def postParse(self, instring, loc, tokenlist):
""" Converts the first token to an integer """
return int(tokenlist[0])
#------------------------------------------------------------------------------
# "ToFloat" class:
#------------------------------------------------------------------------------
class ToFloat(TokenConverter):
""" Converter to make token into a float """
def postParse(self, instring, loc, tokenlist):
""" Converts the first token into a float """
return float(tokenlist[0])
#------------------------------------------------------------------------------
# "ToTuple" class:
#------------------------------------------------------------------------------
class ToTuple(TokenConverter):
""" Converter to make token sequence into a tuple. """
def postParse(self, instring, loc, tokenlist):
""" Returns a tuple initialised from the token sequence. """
return tuple(tokenlist)
#------------------------------------------------------------------------------
# "ToList" class:
#------------------------------------------------------------------------------
class ToList(TokenConverter):
""" Converter to make token sequence into a list. """
def postParse(self, instring, loc, tokenlist):
""" Returns a list initialised from the token sequence. """
return list(tokenlist)
# Integer ---------------------------------------------------------------------
integer = ToInteger(
Optional(quote).suppress() +
Combine(Optional(sign) + Word(nums)) +
Optional(quote).suppress()
).setName("integer")
positive_integer = ToInteger(
Combine(Optional("+") + Word(nums))
).setName("integer")
negative_integer = ToInteger(
Combine("-" + Word(nums))
).setName("integer")
# Boolean ---------------------------------------------------------------------
#boolean = ToBoolean(ToInteger(Word("01", exact=1))).setName("bool")
true = CaselessLiteral("True") | Literal("1") #And(integer, NotAny(Literal("0")))
false = CaselessLiteral("False") | Literal("0")
boolean = ToBoolean(true | false).setResultsName("boolean")
# Real ------------------------------------------------------------------------
real = ToFloat(
Optional(quote).suppress() +
Combine(
Optional(sign) +
(Word(nums) + Optional(decimal_sep + Word(nums))) |
(decimal_sep + Word(nums)) +
Optional(oneOf("E e") + Word(nums))
) +
Optional(quote).suppress()
).setName("real")
# TODO: Positive real number between zero and one.
decimal = real
# String ----------------------------------------------------------------------
q_string = (sglQuotedString | dblQuotedString).setName("q_string")
#double_quoted_string = QuotedString('"', multiline=True,escChar="\\",
# unquoteResults=True) # dblQuotedString
double_quoted_string = Regex(r'\"(?:\\\"|\\\\|[^"])*\"', re.MULTILINE)
double_quoted_string.setParseAction(removeQuotes)
quoted_string = Combine(
double_quoted_string+
Optional(OneOrMore(pluss+double_quoted_string)), adjacent=False
)
word = quoted_string.setName("word") # Word(alphanums)
# Graph attributes ------------------------------------------------------------
hex_color = Word(hexnums, exact=2) #TODO: Optional whitespace
rgb = Literal("#").suppress() + hex_color.setResultsName("red") + \
hex_color.setResultsName("green") + hex_color.setResultsName("blue")
rgba = rgb + hex_color.setResultsName("alpha")
hsv = decimal.setResultsName("hue") + decimal.setResultsName("saturation") + \
decimal.setResultsName("value")
color_name = double_quoted_string | Word(alphas)
colour = rgba | rgb | hsv | color_name
#------------------------------------------------------------------------------
# A convenient function for calculating a unique name given a list of
# existing names.
#------------------------------------------------------------------------------
def make_unique_name(base, existing=[], format="%s_%s"):
"""
Return a name, unique within a context, based on the specified name.
base: the desired base name of the generated unique name.
existing: a sequence of the existing names to avoid returning.
format: a formatting specification for how the name is made unique.
"""
count = 2
name = base
while name in existing:
name = format % (base, count)
count += 1
return name
#------------------------------------------------------------------------------
# "nsplit" function:
#------------------------------------------------------------------------------
def nsplit(seq, n=2):
""" Split a sequence into pieces of length n
If the length of the sequence isn't a multiple of n, the rest is discarded.
Note that nsplit will split strings into individual characters.
Examples:
>>> nsplit("aabbcc")
[("a", "a"), ("b", "b"), ("c", "c")]
>>> nsplit("aabbcc",n=3)
[("a", "a", "b"), ("b", "c", "c")]
# Note that cc is discarded
>>> nsplit("aabbcc",n=4)
[("a", "a", "b", "b")]
"""
return [xy for xy in itertools.izip(*[iter(seq)]*n)]
#------------------------------------------------------------------------------
# "windows" function:
#------------------------------------------------------------------------------
def windows(iterable, length=2, overlap=0, padding=True):
""" Code snippet from Python Cookbook, 2nd Edition by David Ascher,
Alex Martelli and Anna Ravenscroft; O'Reilly 2005
Problem: You have an iterable s and need to make another iterable whose
items are sublists (i.e., sliding windows), each of the same given length,
over s' items, with successive windows overlapping by a specified amount.
"""
it = iter(iterable)
results = list(itertools.islice(it, length))
while len(results) == length:
yield results
results = results[length-overlap:]
results.extend(itertools.islice(it, length-overlap))
if padding and results:
results.extend(itertools.repeat(None, length-len(results)))
yield results
if __name__ == "__main__":
l = [1,2,3,4]
for j, k in windows(l, length=2, overlap=1, padding=False):
print j, k
print nsplit(l)
# EOF -------------------------------------------------------------------------
| |
import os
import re
import json
import time
import logging
import pytest
try:
from unittest.mock import patch, call, MagicMock # Python 3
except ImportError:
from mock import patch, call, MagicMock # Python 2
from requests_oauthlib import OAuth1Session
import requests
import twarc
"""
You will need to have these environment variables set to run these tests:
* CONSUMER_KEY
* CONSUMER_SECRET
* ACCESS_TOKEN
* ACCESS_TOKEN_SECRET
"""
logging.basicConfig(filename="test.log", level=logging.INFO)
T = twarc.Twarc()
def test_version():
import setup
assert setup.__version__ == twarc.__version__
def test_search():
count = 0
for tweet in T.search('obama'):
assert tweet['id_str']
count += 1
if count == 10:
break
assert count == 10
def test_search_max_pages():
tweets = list(T.search('obama', max_pages=1))
assert 0 < len(tweets) <= 100
tweets = list(T.search('obama', max_pages=2))
assert 100 < len(tweets) <= 200
def test_since_id():
for tweet in T.search('obama'):
id = tweet['id_str']
break
assert id
time.sleep(5)
for tweet in T.search('obama', since_id=id):
assert tweet['id_str'] > id
def test_max_id():
for tweet in T.search('obama'):
id = tweet['id_str']
break
assert id
time.sleep(5)
count = 0
for tweet in T.search('obama', max_id=id):
count += 1
assert tweet['id_str'] <= id
if count > 100:
break
def test_max_and_since_ids():
max_id = since_id = None
count = 0
for tweet in T.search('obama'):
count += 1
if not max_id:
max_id = tweet['id_str']
since_id = tweet['id_str']
if count > 500:
break
count = 0
for tweet in T.search('obama', max_id=max_id, since_id=since_id):
count += 1
assert tweet['id_str'] <= max_id
assert tweet['id_str'] > since_id
def test_paging():
# pages are 100 tweets big so if we can get 500 paging is working
count = 0
for tweet in T.search('obama'):
count += 1
if count == 500:
break
assert count == 500
def test_geocode():
# look for tweets from New York ; the search radius is larger than NYC
# so hopefully we'll find one from New York in the first 100?
count = 0
found = False
for tweet in T.search(None, geocode='40.7484,-73.9857,1mi'):
if (tweet['place'] or {}).get('name') == 'Manhattan':
found = True
break
if count > 100:
break
count += 1
assert found
def test_track():
tweet = next(T.filter(track="obama"))
json_str = json.dumps(tweet)
assert re.search('obama', json_str, re.IGNORECASE)
# reconnect to close streaming connection for other tests
T.connect()
def test_keepalive():
for event in T.filter(track="abcdefghiklmno", record_keepalive=True):
if event == 'keep-alive':
break
# reconnect to close streaming connection for other tests
T.connect()
def test_follow():
user_ids = [
"87818409", # @guardian
"428333", # @cnnbrk
"5402612", # @BBCBreaking
"2467791", # @washingtonpost
"1020058453", # @BuzzFeedNews
"23484039", # WSJbreakingnews
"384438102", # ABCNewsLive
"15108702", # ReutersLive
"87416722" # SkyNewsBreak
]
found = False
for tweet in T.filter(follow=','.join(user_ids)):
assert tweet['id_str']
if tweet['user']['id_str'] in user_ids:
found = True
elif tweet['in_reply_to_user_id_str'] in user_ids:
found = True
elif tweet['retweeted_status']['user']['id_str'] in user_ids:
found = True
elif 'quoted_status' in tweet and \
tweet['quoted_status']['user']['id_str'] in user_ids:
found = True
break
if not found:
logging.warn("couldn't find user in response: %s", json.dumps(tweet, indent=2))
assert found
# reconnect to close streaming connection for other tests
T.connect()
def test_locations():
# look for tweets from New York ; the bounding box is larger than NYC
# so hopefully we'll find one from New York in the first 100?
count = 0
found = False
for tweet in T.filter(locations="-74,40,-73,41"):
if tweet['place']['name'] == 'Manhattan':
found = True
break
if count > 100:
break
count += 1
assert found
# reconnect to close streaming connection for other tests
T.connect()
def test_languages():
count = 0
ok = True
langs = ['fr', 'es']
for tweet in T.filter('paris,madrid', lang=langs):
if tweet['lang'] not in langs:
ok = False
break
if count > 25:
break
count += 1
assert ok
# reconnect to close streaming connection for other tests
T.connect()
def test_timeline_by_user_id():
# looks for recent tweets and checks if tweets are of provided user_id
user_id = "87818409"
for tweet in T.timeline(user_id=user_id):
assert tweet['user']['id_str'] == user_id
# Make sure that passing an int user_id behaves as expected. Issue #235
user_id = 87818409
all_tweets = list(T.timeline(user_id=user_id))
assert len(all_tweets)
for tweet in all_tweets:
assert tweet['user']['id'] == user_id
def test_timeline_max_pages():
# looks for recent tweets and checks if tweets are of provided user_id
user_id = "87818409"
first_page = list(T.timeline(user_id=user_id, max_pages=1))
assert 0 < len(first_page) <= 200
all_pages = list(T.timeline(user_id=user_id))
assert len(all_pages) > len(first_page)
def test_timeline_by_screen_name():
# looks for recent tweets and checks if tweets are of provided screen_name
screen_name = "guardian"
for tweet in T.timeline(screen_name=screen_name):
assert tweet['user']['screen_name'].lower() == screen_name.lower()
def test_home_timeline():
found = False
for tweet in T.timeline():
found = True
break
assert found
def test_timeline_arg_handling():
# Confirm that only user_id *or* screen_name is valid for timeline
screen_name = "guardian"
user_id = "87818409"
with pytest.raises(ValueError):
for t in T.timeline(screen_name=screen_name, user_id=user_id):
pass
def test_timeline_with_since_id():
count = 0
tweet_id = None
for tweet in T.timeline(screen_name='guardian'):
tweet_id = tweet['id_str']
count += 1
if count > 10:
break
tweets = list(T.timeline(screen_name='guardian', since_id=tweet_id))
assert len(tweets) == 10
def test_trends_available():
# fetches all available trend regions and checks presence of likely member
trends = T.trends_available()
worldwide = [t for t in trends if t['placeType']['name'] == 'Supername']
assert worldwide[0]['name'] == 'Worldwide'
def test_trends_place():
# fetches recent trends for Amsterdam, WOEID 727232
trends = T.trends_place(727232)
assert len(list(trends[0]['trends'])) > 0
def test_trends_closest():
# fetches regions bounding the specified lat and lon
trends = T.trends_closest(38.883137, -76.990228)
assert len(list(trends)) > 0
def test_trends_place_exclude():
# fetches recent trends for Amsterdam, WOEID 727232, sans hashtags
trends = T.trends_place(727232, exclude='hashtags')[0]['trends']
hashtag_trends = [t for t in trends if t['name'].startswith('#')]
assert len(hashtag_trends) == 0
def test_follower_ids():
count = 0
for id in T.follower_ids('justinbieber'):
count += 1
if count == 10001:
break
assert count == 10001
def test_follower_ids_with_user_id():
count = 0
for id in T.follower_ids(27260086):
count += 1
if count > 10001:
break
assert count > 10001
def test_follower_ids_max_pages():
ids = list(T.follower_ids(27260086, max_pages=1))
assert 0 < len(ids) <= 5000
ids = list(T.follower_ids(27260086, max_pages=2))
assert 5000 < len(ids) <= 10000
def test_friend_ids():
count = 0
for id in T.friend_ids('justinbieber'):
count += 1
if count == 10001:
break
assert count == 10001
def test_friend_ids_with_user_id():
count = 0
for id in T.friend_ids(27260086):
count += 1
if count > 10001:
break
assert count > 10001
def test_friend_ids_max_pages():
ids = list(T.friend_ids(27260086, max_pages=1))
assert 0 < len(ids) <= 5000
ids = list(T.friend_ids(27260086, max_pages=2))
assert 5000 < len(ids) <= 10000
def test_user_lookup_by_user_id():
# looks for the user with given user_id
user_ids = [
'87818409', # @guardian
'807095', # @nytimes
'428333', # @cnnbrk
'5402612', # @BBCBreaking
'2467791', # @washingtonpost
'1020058453', # @BuzzFeedNews
'23484039', # WSJbreakingnews
'384438102', # ABCNewsLive
'15108702', # ReutersLive
'87416722' # SkyNewsBreak
]
uids = []
for user in T.user_lookup(ids=user_ids):
uids.append(user['id_str'])
assert set(user_ids) == set(uids)
def test_user_lookup_by_screen_name():
# looks for the user with given screen_names
screen_names = ["guardian", "nytimes", "cnnbrk", "BBCBreaking",
"washingtonpost", "BuzzFeedNews", "WSJbreakingnews",
"ABCNewsLive", "ReutersLive", "SkyNewsBreak"]
names = []
for user in T.user_lookup(ids=screen_names, id_type="screen_name"):
names.append(user['screen_name'].lower())
assert set(names) == set(map(lambda x: x.lower(), screen_names))
def test_tweet():
t = T.tweet("20")
assert t['full_text'] == 'just setting up my twttr'
def test_dehydrate():
tweets = [
'{"text": "test tweet 1", "id_str": "800000000000000000"}',
'{"text": "test tweet 2", "id_str": "800000000000000001"}',
]
ids = list(T.dehydrate(iter(tweets)))
assert len(ids) == 2
assert "800000000000000000" in ids
assert "800000000000000001" in ids
def test_hydrate():
ids = [
"501064188211765249", "501064196642340864", "501064197632167936",
"501064196931330049", "501064198005481472", "501064198009655296",
"501064198059597824", "501064198513000450", "501064180468682752",
"501064199142117378", "501064171707170816", "501064200186118145",
"501064200035516416", "501064201041743872", "501064201251880961",
"501064198973960192", "501064201256071168", "501064202027798529",
"501064202245521409", "501064201503113216", "501064202363359232",
"501064202295848960", "501064202380115971", "501064202904403970",
"501064203135102977", "501064203508412416", "501064203516407810",
"501064203546148864", "501064203697156096", "501064204191690752",
"501064204288540672", "501064197396914176", "501064194309906436",
"501064204989001728", "501064204980592642", "501064204661850113",
"501064205400039424", "501064205089665024", "501064206666702848",
"501064207274868736", "501064197686296576", "501064207623000064",
"501064207824351232", "501064208083980290", "501064208277319680",
"501064208398573568", "501064202794971136", "501064208789045248",
"501064209535614976", "501064209551994881", "501064141332029440",
"501064207387742210", "501064210177331200", "501064210395037696",
"501064210693230592", "501064210840035329", "501064211855069185",
"501064192024006657", "501064200316125184", "501064205642903552",
"501064212547137536", "501064205382848512", "501064213843169280",
"501064208562135042", "501064214211870720", "501064214467731457",
"501064215160172545", "501064209648848896", "501064215990648832",
"501064216241897472", "501064215759568897", "501064211858870273",
"501064216522932227", "501064216930160640", "501064217667960832",
"501064211997274114", "501064212303446016", "501064213675012096",
"501064218343661568", "501064213951823873", "501064219467341824",
"501064219677044738", "501064210080473088", "501064220415229953",
"501064220847656960", "501064222340423681", "501064222772445187",
"501064222923440130", "501064220121632768", "501064222948593664",
"501064224936714240", "501064225096499201", "501064225142624256",
"501064225314185216", "501064225926561794", "501064226451259392",
"501064226816143361", "501064227302674433", "501064227344646144",
"501064227688558592", "501064228288364546", "501064228627705857",
"501064229764751360", "501064229915729921", "501064231304065026",
"501064231366983681", "501064231387947008", "501064231488200704",
"501064231941570561", "501064232188665856", "501064232449114112",
"501064232570724352", "501064232700350464", "501064233186893824",
"501064233438568450", "501064233774510081", "501064235107897344",
"619172347640201216", "619172347275116548", "619172341944332288",
"619172340891578368", "619172338177843200", "619172335426244608",
"619172332100284416", "619172331592773632", "619172331584376832",
"619172331399725057", "619172328249757696", "619172328149118976",
"619172326886674432", "619172324600745984", "619172323447324672",
"619172321564098560", "619172320880533504", "619172320360333312",
"619172319047647232", "619172314710609920", "619172313846693890",
"619172312122814464", "619172306338709504", "619172304191401984",
"619172303654518784", "619172302878408704", "619172300689031168",
"619172298310840325", "619172295966392320", "619172293936291840",
"619172293680345089", "619172285501456385", "619172282183725056",
"619172281751711748", "619172281294655488", "619172278086070272",
"619172275741298688", "619172274235535363", "619172257789706240",
"619172257278111744", "619172253075378176", "619172242736308224",
"619172236134588416", "619172235488718848", "619172232120692736",
"619172227813126144", "619172221349662720", "619172216349917184",
"619172214475108352", "619172209857327104", "619172208452182016",
"619172208355749888", "619172193730199552", "619172193482768384",
"619172184922042368", "619172182548049920", "619172179960328192",
"619172175820357632", "619172174872469504", "619172173568053248",
"619172170233679872", "619172165959708672", "619172163912908801",
"619172162608463873", "619172158741303297", "619172157197819905",
"501064235175399425", "501064235456401410", "615973042443956225",
"618602288781860864"
]
count = 0
for tweet in T.hydrate(iter(ids)):
assert tweet['id_str']
count += 1
assert count > 100 # may need to adjust as these might get deleted
@patch("twarc.client.OAuth1Session", autospec=True)
def test_connection_error_get(oauth1session_class):
mock_oauth1session = MagicMock(spec=OAuth1Session)
oauth1session_class.return_value = mock_oauth1session
mock_oauth1session.get.side_effect = requests.exceptions.ConnectionError
t = twarc.Twarc("consumer_key", "consumer_secret", "access_token",
"access_token_secret", connection_errors=3,
validate_keys=False)
with pytest.raises(requests.exceptions.ConnectionError):
t.get("https://api.twitter.com")
assert 3 == mock_oauth1session.get.call_count
@patch("twarc.client.OAuth1Session", autospec=True)
def test_connection_error_post(oauth1session_class):
mock_oauth1session = MagicMock(spec=OAuth1Session)
oauth1session_class.return_value = mock_oauth1session
mock_oauth1session.post.side_effect = requests.exceptions.ConnectionError
t = twarc.Twarc("consumer_key", "consumer_secret", "access_token",
"access_token_secret", connection_errors=2,
validate_keys=False)
with pytest.raises(requests.exceptions.ConnectionError):
t.post("https://api.twitter.com")
assert 2 == mock_oauth1session.post.call_count
def test_http_error_sample():
t = twarc.Twarc("consumer_key", "consumer_secret", "access_token",
"access_token_secret", http_errors=2, validate_keys=False)
with pytest.raises(requests.exceptions.HTTPError):
next(t.sample())
def test_http_error_filter():
t = twarc.Twarc("consumer_key", "consumer_secret", "access_token",
"access_token_secret", http_errors=3, validate_keys=False)
with pytest.raises(requests.exceptions.HTTPError):
next(t.filter(track="test"))
def test_retweets():
assert len(list(T.retweets('795972820413140992'))) == 2
def test_oembed():
t = next(T.search('obama'))
url = 'https://twitter.com/{}/status/{}'.format(t['user']['screen_name'], t['id_str'])
tweet_json = T.oembed(url)
assert url == tweet_json['url']
def test_oembed_params():
t = next(T.search('obama'))
url = 'https://twitter.com/{}/status/{}'.format(t['user']['screen_name'], t['id_str'])
tweet_json = T.oembed(url, theme="dark")
assert 'data-theme="dark"' in tweet_json['html']
def test_replies():
# this test will look at trending hashtags, and do a search
# to find a popular tweet that uses it, and then makes a
# big assumption that someone must have responded to the tweet
# get the top hashtag that is trending
trends = T.trends_place("1")[0]["trends"]
trends.sort(key=lambda a: a['tweet_volume'] or 0, reverse=True)
top_hashtag = trends[0]["name"].strip('#')
logging.info("top hashtag %s" % top_hashtag)
tries = 0
for top_tweet in T.search(top_hashtag, result_type="popular"):
logging.info("testing %s" % top_tweet['id_str'])
# get replies to the top tweet
replies = T.replies(top_tweet)
# the first tweet should be the base tweet, or the tweet that
# we are looking for replies to
me = next(replies)
assert me['id_str'] == top_tweet['id_str']
try:
reply = next(replies)
assert reply['in_reply_to_status_id_str'] == top_tweet['id_str']
break
except StopIteration:
pass # didn't find a reply
tries += 1
if tries > 10:
break
def test_lists_members():
slug = 'bots'
screen_name = 'edsu'
members = list(T.list_members(slug=slug, owner_screen_name=screen_name))
assert len(members) > 0
assert members[0]['screen_name']
def test_lists_members_owner_id():
slug = 'bots'
owner_id = '14331818'
members = list(T.list_members(slug=slug, owner_id=owner_id))
assert len(members) > 0
assert members[0]['screen_name']
def test_lists_list_id():
members = list(T.list_members(list_id='197880909'))
assert len(members) > 0
assert members[0]['screen_name']
def test_extended_compat():
t_compat = twarc.Twarc(tweet_mode="compat")
assert 'full_text' in next(T.search('obama'))
assert 'text' in next(t_compat.search("obama"))
assert 'full_text' in next(T.timeline(screen_name="BarackObama"))
assert 'text' in next(t_compat.timeline(screen_name="BarackObama"))
def test_invalid_credentials():
old_consumer_key = T.consumer_key
T.consumer_key = None
with pytest.raises(RuntimeError):
T.validate_keys()
T.consumer_key = 'Definitely not a valid key'
with pytest.raises(RuntimeError):
T.validate_keys()
T.consumer_key = old_consumer_key
| |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from oscar.core.compat import AUTH_USER_MODEL, AUTH_USER_MODEL_NAME
class Migration(SchemaMigration):
depends_on = (
('catalogue', '0001_initial'),
)
def forwards(self, orm):
# Adding model 'ProductAlert'
db.create_table('customer_productalert', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('product', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['catalogue.Product'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='alerts', null=True, to=orm[AUTH_USER_MODEL])),
('email', self.gf('django.db.models.fields.EmailField')(db_index=True, max_length=75, null=True, blank=True)),
('key', self.gf('django.db.models.fields.CharField')(max_length=128, null=True, db_index=True)),
('status', self.gf('django.db.models.fields.CharField')(default='Active', max_length=20)),
('date_created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('date_confirmed', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('date_cancelled', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('date_closed', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
))
db.send_create_signal('customer', ['ProductAlert'])
def backwards(self, orm):
# Deleting model 'ProductAlert'
db.delete_table('customer_productalert')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
AUTH_USER_MODEL: {
'Meta': {'object_name': AUTH_USER_MODEL_NAME},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 9, 26, 13, 49, 39, 401244)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 9, 26, 13, 49, 39, 401151)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'catalogue.attributeentity': {
'Meta': {'object_name': 'AttributeEntity'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entities'", 'to': "orm['catalogue.AttributeEntityType']"})
},
'catalogue.attributeentitytype': {
'Meta': {'object_name': 'AttributeEntityType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'})
},
'catalogue.attributeoption': {
'Meta': {'object_name': 'AttributeOption'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': "orm['catalogue.AttributeOptionGroup']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'option': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'catalogue.attributeoptiongroup': {
'Meta': {'object_name': 'AttributeOptionGroup'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'catalogue.category': {
'Meta': {'ordering': "['full_name']", 'object_name': 'Category'},
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '1024', 'db_index': 'True'})
},
'catalogue.option': {
'Meta': {'object_name': 'Option'},
'code': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'Required'", 'max_length': '128'})
},
'catalogue.product': {
'Meta': {'ordering': "['-date_created']", 'object_name': 'Product'},
'attributes': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.ProductAttribute']", 'through': "orm['catalogue.ProductAttributeValue']", 'symmetrical': 'False'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Category']", 'through': "orm['catalogue.ProductCategory']", 'symmetrical': 'False'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_discountable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'variants'", 'null': 'True', 'to': "orm['catalogue.Product']"}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.ProductClass']", 'null': 'True'}),
'product_options': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'recommended_products': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Product']", 'symmetrical': 'False', 'through': "orm['catalogue.ProductRecommendation']", 'blank': 'True'}),
'related_products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'relations'", 'blank': 'True', 'to': "orm['catalogue.Product']"}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'upc': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'catalogue.productattribute': {
'Meta': {'ordering': "['code']", 'object_name': 'ProductAttribute'},
'code': ('django.db.models.fields.SlugField', [], {'max_length': '128', 'db_index': 'True'}),
'entity_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeEntityType']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'option_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeOptionGroup']", 'null': 'True', 'blank': 'True'}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'attributes'", 'null': 'True', 'to': "orm['catalogue.ProductClass']"}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '20'})
},
'catalogue.productattributevalue': {
'Meta': {'object_name': 'ProductAttributeValue'},
'attribute': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.ProductAttribute']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attribute_values'", 'to': "orm['catalogue.Product']"}),
'value_boolean': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'value_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'value_entity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeEntity']", 'null': 'True', 'blank': 'True'}),
'value_float': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'value_integer': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'value_option': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeOption']", 'null': 'True', 'blank': 'True'}),
'value_richtext': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'value_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'catalogue.productcategory': {
'Meta': {'ordering': "['-is_canonical']", 'object_name': 'ProductCategory'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_canonical': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"})
},
'catalogue.productclass': {
'Meta': {'ordering': "['name']", 'object_name': 'ProductClass'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'requires_shipping': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'track_stock': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'catalogue.productrecommendation': {
'Meta': {'object_name': 'ProductRecommendation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'primary': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'primary_recommendations'", 'to': "orm['catalogue.Product']"}),
'ranking': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'recommendation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'customer.communicationeventtype': {
'Meta': {'object_name': 'CommunicationEventType'},
'category': ('django.db.models.fields.CharField', [], {'default': "u'Order related'", 'max_length': '255'}),
'code': ('django.db.models.fields.SlugField', [], {'max_length': '128', 'db_index': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'email_body_html_template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email_body_template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email_subject_template': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'sms_template': ('django.db.models.fields.CharField', [], {'max_length': '170', 'blank': 'True'})
},
'customer.email': {
'Meta': {'object_name': 'Email'},
'body_html': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'body_text': ('django.db.models.fields.TextField', [], {}),
'date_sent': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'subject': ('django.db.models.fields.TextField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'emails'", 'to': "orm['{0}']".format(AUTH_USER_MODEL)})
},
'customer.notification': {
'Meta': {'ordering': "('-date_sent',)", 'object_name': 'Notification'},
'body': ('django.db.models.fields.TextField', [], {}),
'category': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'date_read': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'date_sent': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'default': "'Inbox'", 'max_length': '32'}),
'recipient': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notifications'", 'to': "orm['{0}']".format(AUTH_USER_MODEL)}),
'sender': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['{0}']".format(AUTH_USER_MODEL), 'null': 'True'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'customer.productalert': {
'Meta': {'object_name': 'ProductAlert'},
'date_cancelled': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_closed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_confirmed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'db_index': 'True', 'max_length': '75', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'Active'", 'max_length': '20'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'alerts'", 'null': 'True', 'to': "orm['{0}']".format(AUTH_USER_MODEL)})
}
}
complete_apps = ['customer']
| |
# pyOCD debugger
# Copyright (c) 2015-2020 Arm Limited
# Copyright (c) 2021 Chris Reed
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
from typing import (IO, Any, Callable, Dict, Iterator, List, NamedTuple, Optional, Sequence, TYPE_CHECKING)
import six
import pprint
import subprocess
from shutil import get_terminal_size
from ..core import exceptions
from ..coresight.ap import MEM_AP
from ..utility.strings import UniquePrefixMatcher
from ..utility.cmdline import split_command_line
if TYPE_CHECKING:
from ..debug.svd.model import SVDPeripheral
LOG = logging.getLogger(__name__)
class CommandSet:
"""@brief Holds a set of command classes."""
## Whether command and infos modules have been loaded yet.
DID_LOAD_COMMAND_MODULES = False
def __init__(self):
self._commands = {} # Dict of all available commands.
self._command_classes = set()
self._command_matcher = UniquePrefixMatcher()
self._values = {}
self._value_classes = set()
self._value_matcher = UniquePrefixMatcher()
# Ensure these modules get loaded so the ALL_COMMANDS dicts are filled.
self._load_modules()
@classmethod
def _load_modules(cls):
# Load these modules in order to populate the dicts with standard commands. This must be
# done lazily because the commands import lots of modules from pyocd that would cause import
# cycles otherwise.
if not cls.DID_LOAD_COMMAND_MODULES:
from . import commands
from . import values
cls.DID_LOAD_COMMAND_MODULES = True
@property
def commands(self):
return self._commands
@property
def command_classes(self):
return self._command_classes
@property
def command_matcher(self):
return self._command_matcher
@property
def values(self):
return self._values
@property
def value_classes(self):
return self._value_classes
@property
def value_matcher(self):
return self._value_matcher
def add_command_group(self, group_name):
"""@brief Add commands belonging to a group to the command set.
@param self The command set.
@param group_name String with the name of the group to add.
"""
from .base import ALL_COMMANDS
self.add_commands(ALL_COMMANDS.get(group_name, set()))
def add_commands(self, commands):
"""@brief Add some commands to the command set.
@param self The command set.
@param commands List of command classes.
"""
from .base import ValueBase
value_classes = {klass for klass in commands if issubclass(klass, ValueBase)}
cmd_classes = commands - value_classes
cmd_names = {name: klass for klass in cmd_classes for name in klass.INFO['names']}
self._commands.update(cmd_names)
self._command_classes.update(cmd_classes)
self._command_matcher.add_items(cmd_names.keys())
value_names = {name: klass for klass in value_classes for name in klass.INFO['names']}
self._values.update(value_names)
self._value_classes.update(value_classes)
self._value_matcher.add_items(value_names.keys())
class CommandInvocation(NamedTuple):
"""@brief Groups the command name with an iterable of args and a handler function.
The handler is a callable that will evaluate the command. It accepts a single argument of the
CommandInvocation instance.
"""
cmd: str
args: Sequence[str]
handler: Callable[["CommandInvocation"], None] # type:ignore # mypy doesn't support recursive types yet!
class CommandExecutionContext:
"""@brief Manages command execution.
This class holds persistent state for command execution, and provides the interface for executing
commands and command lines.
"""
def __init__(self, no_init: bool = False, output_stream: Optional[IO[str]] = None):
"""@brief Constructor.
@param self This object.
@param no_init Whether the board and target will be initialized when attach_session() is called.
Defaults to False.
@param output_stream Stream object to which command output and errors will be written. If not provided,
output will be written to sys.stdout.
@param elf_path Optional path to an ELF file.
"""
self._no_init = no_init
self._output = output_stream or sys.stdout
self._python_namespace: Dict[str, Any] = {}
self._command_set = CommandSet()
# State attributes.
self._session = None
self._selected_core = None
self._selected_ap_address = None
self._peripherals: Dict[str, "SVDPeripheral"] = {}
self._loaded_peripherals = False
# Add in the standard commands.
self._command_set.add_command_group('standard')
def write(self, message='', **kwargs):
"""@brief Write a fixed message to the output stream.
The message is written to the output stream passed to the constructor, terminated with
a newline by default. The `end` keyword argument can be passed to change the terminator. No
formatting is applied to the message. If formatting is required, use the writei() or writef()
methods instead.
@param self This object.
@param message The text to write to the output. If not a string object, it is run through str().
"""
if self._output is None:
return
end = kwargs.pop('end', "\n")
if not isinstance(message, str):
message = str(message)
self._output.write(message + end)
def writei(self, fmt, *args, **kwargs):
"""@brief Write an interpolated string to the output stream.
The formatted string is written to the output stream passed to the constructor, terminated with
a newline by default. The `end` keyword argument can be passed to change the terminator.
@param self This object.
@param fmt Format string using printf-style "%" formatters.
"""
assert isinstance(fmt, str)
message = fmt % args
self.write(message, **kwargs)
def writef(self, fmt, *args, **kwargs):
"""@brief Write a formatted string to the output stream.
The formatted string is written to the output stream passed to the constructor, terminated with
a newline by default. The `end` keyword argument can be passed to change the terminator.
@param self This object.
@param fmt Format string using the format() mini-language.
"""
assert isinstance(fmt, str)
message = fmt.format(*args, **kwargs)
self.write(message, **kwargs)
def attach_session(self, session):
"""@brief Associate a session with the command context.
Various data for the context are initialized. This includes selecting the initially selected core and MEM-AP,
and getting an ELF file that was set on the target.
@param self This object.
@param session A @ref pyocd.core.session.Session "Session" instance.
@retval True Session attached and context state inited successfully.
@retval False An error occurred when opening the session or initing the context state.
"""
assert self._session is None
assert session.is_open or self._no_init
self._session = session
assert self.target
# Select the first core's MEM-AP by default.
if not self._no_init:
try:
# Selected core defaults to the target's default selected core.
if self.selected_core is None:
self.selected_core = self.target.selected_core
# Get the AP for the selected core.
if self.selected_core is not None:
self.selected_ap_address = self.selected_core.ap.address
except IndexError:
pass
# Fall back to the first MEM-AP.
if self.selected_ap_address is None:
for ap_num in sorted(self.target.aps.keys()):
if isinstance(self.target.aps[ap_num], MEM_AP):
self.selected_ap_address = ap_num
break
# Add user-defined commands once we know we have a session created.
self.command_set.add_command_group('user')
return True
@property
def session(self):
return self._session
@property
def board(self):
return self._session and self._session.board
@property
def target(self):
return self._session and self._session.target
@property
def probe(self):
return self._session and self._session.probe
@property
def elf(self):
return self.target and self.target.elf
@property
def command_set(self):
"""@brief CommandSet with commands available in this context."""
return self._command_set
@property
def peripherals(self):
"""@brief Dict of SVD peripherals."""
assert self.target
if self.target.svd_device and not self._loaded_peripherals:
for p in self.target.svd_device.peripherals:
self._peripherals[p.name.lower()] = p
self._loaded_peripherals = True
return self._peripherals
@property
def output_stream(self):
return self._output
@output_stream.setter
def output_stream(self, stream):
self._output = stream
@property
def selected_core(self):
"""@brief The Target instance for the selected core."""
return self._selected_core
@selected_core.setter
def selected_core(self, value):
self._selected_core = value
@property
def selected_ap_address(self):
return self._selected_ap_address
@selected_ap_address.setter
def selected_ap_address(self, value):
self._selected_ap_address = value
@property
def selected_ap(self):
if self.selected_ap_address is None:
return None
else:
assert self.target
return self.target.aps[self.selected_ap_address]
def process_command_line(self, line: str) -> None:
"""@brief Run a command line consisting of one or more semicolon-separated commands.
@param self
@param line Complete command line string.
"""
for args in self._split_commands(line):
assert args
invoc = self.parse_command(args)
invoc.handler(invoc)
def process_command_file(self, cmd_file: IO[str]) -> None:
"""@brief Run commands contained in a file.
@param self
@param cmd_file File object containing commands to run. Must be opened in text mode. When this method returns,
the file will be closed. This is true even if an exception is raised during command execution.
"""
try:
for line in cmd_file:
line = line.strip()
# Skip empty or comment lines.
if (len(line) == 0) or (line[0] == '#'):
continue
self.process_command_line(line)
finally:
cmd_file.close()
def _split_commands(self, line: str) -> Iterator[List[str]]:
"""@brief Generator yielding commands separated by semicolons.
Python and system commands are handled specially. For these we yield a list of 2 elements: the command,
either "$" or "!", followed by the unmodified remainder of the command line. For these commands,
splitting on semicolons is not supported.
"""
parts = split_command_line(line.strip())
# Check for Python or system command. For these we yield a list of 2 elements: the command
# followed by the rest of the command line as it was originally.
if parts and (parts[0] in '$!'):
# Remove the Python/system command prefix from the command line. Can't use str.removeprefix()
# since it was added in 3.9.
line_remainder = line.strip()
assert line_remainder.find(parts[0]) == 0
line_remainder = line_remainder[len(parts[0]):].strip()
yield [parts[0], line_remainder]
return
result: List[str] = []
for p in parts:
if p == ';':
if result:
yield result
result = []
else:
result.append(p)
if result:
yield result
def parse_command(self, cmdline: List[str]) -> CommandInvocation:
"""@brief Create a CommandInvocation from a single command."""
# Check for Python or system command lines.
first_char = cmdline[0]
if first_char in '$!':
# cmdline parameters that are for Python and system commands must be a 2-element list,
# as generated by _split_commands().
assert len(cmdline) == 2
# Return the invocation instance with the handler set appropriately.
if first_char == '$':
return CommandInvocation(cmdline[1], [], self.handle_python)
elif first_char == '!':
return CommandInvocation(cmdline[1], [], self.handle_system)
# Split command into words.
args = split_command_line(cmdline)
cmd = args[0].lower()
args = args[1:]
# Look up shortened unambiguous match for the command name.
matched_command = self._command_set.command_matcher.find_one(cmd)
# Check for valid command.
if matched_command is None:
all_matches = self._command_set.command_matcher.find_all(cmd)
if len(all_matches) > 1:
raise exceptions.CommandError("command '%s' is ambiguous; matches are %s" % (cmd,
", ".join("'%s'" % c for c in all_matches)))
else:
raise exceptions.CommandError("unrecognized command '%s'" % cmd)
return CommandInvocation(matched_command, args, self.execute_command)
def execute_command(self, invocation: CommandInvocation) -> None:
"""@brief Execute a single command."""
# Must have an attached session to run commands, except for certain commands.
assert (self.session is not None) or (invocation.cmd in ('list', 'help', 'exit'))
# Run command.
cmd_class = self._command_set.commands[invocation.cmd]
cmd_object = cmd_class(self)
cmd_object.check_arg_count(invocation.args)
cmd_object.parse(invocation.args)
if self.session:
# Reroute print() in user-defined functions so it will come out our output stream.
with self.session.user_script_print_proxy.push_target(self.write):
cmd_object.execute()
else:
cmd_object.execute()
def _build_python_namespace(self) -> None:
"""@brief Construct the dictionary used as the namespace for python commands."""
assert self.session
assert self.target
ns = self.session.user_script_proxy.namespace
ns.update({
'elf': self.elf,
'map': self.target.memory_map,
})
self._python_namespace = ns
def handle_python(self, invocation: CommandInvocation) -> None:
"""@brief Evaluate a python expression."""
assert self.session
try:
# Lazily build the python environment.
if not self._python_namespace:
self._build_python_namespace()
# Reroute print() in user-defined functions so it will come out our output stream. Not that
# we expect much use of print() from expressions...
with self.session.user_script_print_proxy.push_target(self.write):
result = eval(invocation.cmd, self._python_namespace)
if result is not None:
if isinstance(result, int):
self.writei("0x%08x (%d)", result, result)
else:
w, h = get_terminal_size()
self.write(pprint.pformat(result, indent=2, width=w, depth=10))
except Exception as e:
# Log the traceback before raising the exception.
if self.session and self.session.log_tracebacks:
LOG.error("Exception while executing expression: %s", e, exc_info=True)
raise exceptions.CommandError("exception while executing expression: %s" % e)
def handle_system(self, invocation: CommandInvocation) -> None:
"""@brief Evaluate a system call command."""
try:
output = subprocess.check_output(invocation.cmd, stderr=subprocess.STDOUT, shell=True)
self.write(six.ensure_str(output), end='')
except subprocess.CalledProcessError as err:
raise exceptions.CommandError(str(err)) from err
| |
# -*- coding: utf-8 -*-
# Copyright 2015 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
felix.test.test_frules
~~~~~~~~~~~~~~~~~~~~~~~~~
Tests of iptables rules generation function.
"""
import logging
from mock import Mock, patch, call, ANY
from calico.felix import frules
from calico.felix.config import Config
from calico.felix.fiptables import IptablesUpdater
from calico.felix.frules import (
profile_to_chain_name, rules_to_chain_rewrite_lines, UnsupportedICMPType,
_rule_to_iptables_fragment
)
from calico.felix.test.base import BaseTestCase
_log = logging.getLogger(__name__)
DEFAULT_MARK = ('--append chain-foo --match comment '
'--comment "Mark as not matched" --jump MARK --set-mark 1')
RULES_TESTS = [
([{"src_net": "10.0.0.0/8"},], 4,
["--append chain-foo --source 10.0.0.0/8 --jump RETURN",
DEFAULT_MARK]),
([{"protocol": "icmp",
"src_net": "10.0.0.0/8",
"icmp_type": 7,
"icmp_code": 123},], 4,
["--append chain-foo --protocol icmp --source 10.0.0.0/8 "
"--match icmp --icmp-type 7/123 "
"--jump RETURN",
DEFAULT_MARK]),
([{"protocol": "icmp",
"src_net": "10.0.0.0/8",
"icmp_type": 7},], 4,
["--append chain-foo --protocol icmp --source 10.0.0.0/8 "
"--match icmp --icmp-type 7 "
"--jump RETURN",
DEFAULT_MARK]),
([{"protocol": "icmpv6",
"src_net": "1234::beef",
"icmp_type": 7},], 6,
["--append chain-foo --protocol icmpv6 --source 1234::beef "
"--match icmp6 --icmpv6-type 7 "
"--jump RETURN",
DEFAULT_MARK]),
([{"protocol": "tcp",
"src_tag": "tag-foo",
"src_ports": ["0:12", 13]}], 4,
["--append chain-foo --protocol tcp "
"--match set --match-set ipset-foo src "
"--match multiport --source-ports 0:12,13 --jump RETURN",
DEFAULT_MARK]),
([{"protocol": "tcp",
"src_ports": [0, "2:3", 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]}], 4,
["--append chain-foo --protocol tcp "
"--match multiport --source-ports 0,2:3,4,5,6,7,8,9,10,11,12,13,14,15 "
"--jump RETURN",
"--append chain-foo --protocol tcp "
"--match multiport --source-ports 16,17 "
"--jump RETURN",
DEFAULT_MARK]),
]
IP_SET_MAPPING = {
"tag-foo": "ipset-foo",
"tag-bar": "ipset-bar",
}
class TestRules(BaseTestCase):
def test_profile_to_chain_name(self):
self.assertEqual(profile_to_chain_name("inbound", "prof1"),
"felix-p-prof1-i")
self.assertEqual(profile_to_chain_name("outbound", "prof1"),
"felix-p-prof1-o")
def test_split_port_lists(self):
self.assertEqual(
frules._split_port_lists([1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15]),
[['1', '2', '3', '4', '5', '6', '7', '8', '9',
'10', '11', '12', '13', '14', '15']]
)
self.assertEqual(
frules._split_port_lists([1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16]),
[['1', '2', '3', '4', '5', '6', '7', '8', '9',
'10', '11', '12', '13', '14', '15'],
['16']]
)
self.assertEqual(
frules._split_port_lists([1, "2:3", 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17]),
[['1', '2:3', '4', '5', '6', '7', '8', '9',
'10', '11', '12', '13', '14', '15'],
['16', '17']]
)
def test_rules_generation(self):
for rules, ip_version, expected_output in RULES_TESTS:
fragments = rules_to_chain_rewrite_lines(
"chain-foo",
rules,
ip_version,
IP_SET_MAPPING,
on_allow="RETURN",
)
self.assertEqual(fragments, expected_output)
def test_bad_icmp_type(self):
with self.assertRaises(UnsupportedICMPType):
_rule_to_iptables_fragment("foo", {"icmp_type": 255}, 4, {})
def test_bad_protocol_with_ports(self):
with self.assertRaises(AssertionError):
_rule_to_iptables_fragment("foo", {"protocol": "10",
"src_ports": [1]}, 4, {})
def test_build_input_chain(self):
chain, deps = frules._build_input_chain("tap+",
"123.0.0.1",
1234,
546, 547,
False,
"DROP")
self.assertEqual(chain, [
'--append felix-INPUT ! --in-interface tap+ --jump RETURN',
'--append felix-INPUT --match conntrack --ctstate INVALID --jump DROP',
'--append felix-INPUT --match conntrack --ctstate RELATED,ESTABLISHED --jump ACCEPT',
'--append felix-INPUT --protocol tcp --destination 123.0.0.1 --dport 1234 --jump ACCEPT',
'--append felix-INPUT --protocol udp --sport 546 --dport 547 --jump ACCEPT',
'--append felix-INPUT --protocol udp --dport 53 --jump ACCEPT',
'--append felix-INPUT --jump DROP',
])
self.assertEqual(deps, set())
def test_build_input_chain_ipip(self):
chain, deps = frules._build_input_chain("tap+",
"123.0.0.1",
1234,
546, 547,
False,
"DROP",
"felix-hosts")
self.assertEqual(chain, [
'--append felix-INPUT --protocol ipencap --match set ! --match-set felix-hosts src --jump DROP',
'--append felix-INPUT ! --in-interface tap+ --jump RETURN',
'--append felix-INPUT --match conntrack --ctstate INVALID --jump DROP',
'--append felix-INPUT --match conntrack --ctstate RELATED,ESTABLISHED --jump ACCEPT',
'--append felix-INPUT --protocol tcp --destination 123.0.0.1 --dport 1234 --jump ACCEPT',
'--append felix-INPUT --protocol udp --sport 546 --dport 547 --jump ACCEPT',
'--append felix-INPUT --protocol udp --dport 53 --jump ACCEPT',
'--append felix-INPUT --jump DROP',
])
self.assertEqual(deps, set())
def test_build_input_chain_return(self):
chain, deps = frules._build_input_chain("tap+",
None,
None,
546, 547,
True,
"RETURN")
self.assertEqual(chain, [
'--append felix-INPUT ! --in-interface tap+ --jump RETURN',
'--append felix-INPUT --match conntrack --ctstate INVALID --jump DROP',
'--append felix-INPUT --match conntrack --ctstate RELATED,ESTABLISHED --jump ACCEPT',
'--append felix-INPUT --jump ACCEPT --protocol ipv6-icmp --icmpv6-type 130',
'--append felix-INPUT --jump ACCEPT --protocol ipv6-icmp --icmpv6-type 131',
'--append felix-INPUT --jump ACCEPT --protocol ipv6-icmp --icmpv6-type 132',
'--append felix-INPUT --jump ACCEPT --protocol ipv6-icmp --icmpv6-type 133',
'--append felix-INPUT --jump ACCEPT --protocol ipv6-icmp --icmpv6-type 135',
'--append felix-INPUT --jump ACCEPT --protocol ipv6-icmp --icmpv6-type 136',
'--append felix-INPUT --protocol udp --sport 546 --dport 547 --jump ACCEPT',
'--append felix-INPUT --protocol udp --dport 53 --jump ACCEPT',
'--append felix-INPUT --jump felix-FROM-ENDPOINT',
])
self.assertEqual(deps, set(["felix-FROM-ENDPOINT"]))
@patch("calico.felix.futils.check_call", autospec=True)
@patch("calico.felix.frules.devices", autospec=True)
@patch("calico.felix.frules.HOSTS_IPSET_V4", autospec=True)
def test_install_global_rules(self, m_ipset, m_devices, m_check_call):
m_devices.interface_exists.return_value = False
m_devices.interface_up.return_value = False
m_config = Mock(spec=Config)
m_config.IP_IN_IP_ENABLED = True
m_config.IP_IN_IP_MTU = 1480
m_config.METADATA_IP = "123.0.0.1"
m_config.METADATA_PORT = 1234
m_config.DEFAULT_INPUT_CHAIN_ACTION = "RETURN"
m_config.IFACE_PREFIX = "tap"
m_v4_upd = Mock(spec=IptablesUpdater)
m_v6_upd = Mock(spec=IptablesUpdater)
m_v4_nat_upd = Mock(spec=IptablesUpdater)
frules.install_global_rules(m_config, m_v4_upd, m_v6_upd, m_v4_nat_upd)
m_ipset.ensure_exists.assert_called_once_with()
self.assertEqual(
m_check_call.mock_calls,
[
call(["ip", "tunnel", "add", "tunl0", "mode", "ipip"]),
call(["ip", "link", "set", "tunl0", "mtu", "1480"]),
call(["ip", "link", "set", "tunl0", "up"]),
]
)
expected_chains = {
'felix-INPUT': [
'--append felix-INPUT ! --in-interface tap+ --jump RETURN',
'--append felix-INPUT --match conntrack --ctstate INVALID --jump DROP',
'--append felix-INPUT --match conntrack --ctstate RELATED,ESTABLISHED --jump ACCEPT',
'--append felix-INPUT --protocol tcp --destination 123.0.0.1 --dport 1234 --jump ACCEPT',
'--append felix-INPUT --protocol udp --sport 68 --dport 67 --jump ACCEPT',
'--append felix-INPUT --protocol udp --dport 53 --jump ACCEPT',
'--append felix-INPUT --jump felix-FROM-ENDPOINT'
],
'felix-FORWARD': [
'--append felix-FORWARD --in-interface tap+ --match conntrack --ctstate INVALID --jump DROP',
'--append felix-FORWARD --out-interface tap+ --match conntrack --ctstate INVALID --jump DROP',
'--append felix-FORWARD --in-interface tap+ --match conntrack --ctstate RELATED,ESTABLISHED --jump RETURN',
'--append felix-FORWARD --out-interface tap+ --match conntrack --ctstate RELATED,ESTABLISHED --jump RETURN',
'--append felix-FORWARD --jump felix-FROM-ENDPOINT --in-interface tap+',
'--append felix-FORWARD --jump felix-TO-ENDPOINT --out-interface tap+',
'--append felix-FORWARD --jump ACCEPT --in-interface tap+',
'--append felix-FORWARD --jump ACCEPT --out-interface tap+'
]
}
m_v4_upd.rewrite_chains.assert_called_once_with(
expected_chains,
{
'felix-INPUT': set(['felix-FROM-ENDPOINT']),
'felix-FORWARD': set([
'felix-FROM-ENDPOINT',
'felix-TO-ENDPOINT'
])
},
async=False
)
self.assertEqual(
m_v4_upd.ensure_rule_inserted.mock_calls,
[
call("INPUT --jump felix-INPUT", async=False),
call("FORWARD --jump felix-FORWARD", async=False),
]
)
| |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals, print_function
from six import iteritems, text_type, string_types, PY2
"""
frappe.translate
~~~~~~~~~~~~~~~~
Translation tools for frappe
"""
import frappe, os, re, io, codecs, json
from frappe.model.utils import render_include, InvalidIncludePath
from frappe.utils import strip
from jinja2 import TemplateError
import itertools, operator
def guess_language(lang_list=None):
"""Set `frappe.local.lang` from HTTP headers at beginning of request"""
lang_codes = frappe.request.accept_languages.values()
if not lang_codes:
return frappe.local.lang
guess = None
if not lang_list:
lang_list = get_all_languages() or []
for l in lang_codes:
code = l.strip()
if not isinstance(code, text_type):
code = text_type(code, 'utf-8')
if code in lang_list or code == "en":
guess = code
break
# check if parent language (pt) is setup, if variant (pt-BR)
if "-" in code:
code = code.split("-")[0]
if code in lang_list:
guess = code
break
return guess or frappe.local.lang
def get_user_lang(user=None):
"""Set frappe.local.lang from user preferences on session beginning or resumption"""
if not user:
user = frappe.session.user
# via cache
lang = frappe.cache().hget("lang", user)
if not lang:
# if defined in user profile
lang = frappe.db.get_value("User", user, "language")
if not lang:
lang = frappe.db.get_default("lang")
if not lang:
lang = frappe.local.lang or 'en'
frappe.cache().hset("lang", user, lang)
return lang
def get_lang_code(lang):
return frappe.db.get_value('Language', {'language_name': lang}) or lang
def set_default_language(lang):
"""Set Global default language"""
frappe.db.set_default("lang", lang)
frappe.local.lang = lang
def get_all_languages():
"""Returns all language codes ar, ch etc"""
def _get():
if not frappe.db:
frappe.connect()
return frappe.db.sql_list('select name from tabLanguage')
return frappe.cache().get_value('languages', _get)
def get_lang_dict():
"""Returns all languages in dict format, full name is the key e.g. `{"english":"en"}`"""
return dict(frappe.db.sql('select language_name, name from tabLanguage'))
def get_dict(fortype, name=None):
"""Returns translation dict for a type of object.
:param fortype: must be one of `doctype`, `page`, `report`, `include`, `jsfile`, `boot`
:param name: name of the document for which assets are to be returned.
"""
fortype = fortype.lower()
cache = frappe.cache()
asset_key = fortype + ":" + (name or "-")
translation_assets = cache.hget("translation_assets", frappe.local.lang, shared=True) or {}
if not asset_key in translation_assets:
if fortype=="doctype":
messages = get_messages_from_doctype(name)
elif fortype=="page":
messages = get_messages_from_page(name)
elif fortype=="report":
messages = get_messages_from_report(name)
elif fortype=="include":
messages = get_messages_from_include_files()
elif fortype=="jsfile":
messages = get_messages_from_file(name)
elif fortype=="boot":
messages = get_messages_from_include_files()
messages += frappe.db.sql("select 'Print Format:', name from `tabPrint Format`")
messages += frappe.db.sql("select 'DocType:', name from tabDocType")
messages += frappe.db.sql("select 'Role:', name from tabRole")
messages += frappe.db.sql("select 'Module:', name from `tabModule Def`")
messages += frappe.db.sql("select 'Module:', label from `tabDesktop Icon` where standard=1 or owner=%s",
frappe.session.user)
message_dict = make_dict_from_messages(messages)
message_dict.update(get_dict_from_hooks(fortype, name))
# remove untranslated
message_dict = {k:v for k, v in iteritems(message_dict) if k!=v}
translation_assets[asset_key] = message_dict
cache.hset("translation_assets", frappe.local.lang, translation_assets, shared=True)
return translation_assets[asset_key]
def get_dict_from_hooks(fortype, name):
translated_dict = {}
hooks = frappe.get_hooks("get_translated_dict")
for (hook_fortype, fortype_name) in hooks:
if hook_fortype == fortype and fortype_name == name:
for method in hooks[(hook_fortype, fortype_name)]:
translated_dict.update(frappe.get_attr(method)())
return translated_dict
def add_lang_dict(code):
"""Extracts messages and returns Javascript code snippet to be appened at the end
of the given script
:param code: Javascript code snippet to which translations needs to be appended."""
messages = extract_messages_from_code(code)
messages = [message for pos, message in messages]
code += "\n\n$.extend(frappe._messages, %s)" % json.dumps(make_dict_from_messages(messages))
return code
def make_dict_from_messages(messages, full_dict=None):
"""Returns translated messages as a dict in Language specified in `frappe.local.lang`
:param messages: List of untranslated messages
"""
out = {}
if full_dict==None:
full_dict = get_full_dict(frappe.local.lang)
for m in messages:
if m[1] in full_dict:
out[m[1]] = full_dict[m[1]]
return out
def get_lang_js(fortype, name):
"""Returns code snippet to be appended at the end of a JS script.
:param fortype: Type of object, e.g. `DocType`
:param name: Document name
"""
return "\n\n$.extend(frappe._messages, %s)" % json.dumps(get_dict(fortype, name))
def get_full_dict(lang):
"""Load and return the entire translations dictionary for a language from :meth:`frape.cache`
:param lang: Language Code, e.g. `hi`
"""
if not lang:
return {}
# found in local, return!
if getattr(frappe.local, 'lang_full_dict', None) and frappe.local.lang_full_dict.get(lang, None):
return frappe.local.lang_full_dict
frappe.local.lang_full_dict = load_lang(lang)
try:
# get user specific transaltion data
user_translations = get_user_translations(lang)
except Exception:
user_translations = None
if user_translations:
frappe.local.lang_full_dict.update(user_translations)
return frappe.local.lang_full_dict
def load_lang(lang, apps=None):
"""Combine all translations from `.csv` files in all `apps`.
For derivative languages (es-GT), take translations from the
base language (es) and then update translations from the child (es-GT)"""
if lang=='en':
return {}
out = frappe.cache().hget("lang_full_dict", lang, shared=True)
if not out:
out = {}
for app in (apps or frappe.get_all_apps(True)):
path = os.path.join(frappe.get_pymodule_path(app), "translations", lang + ".csv")
out.update(get_translation_dict_from_file(path, lang, app) or {})
if '-' in lang:
parent = lang.split('-')[0]
parent_out = load_lang(parent)
parent_out.update(out)
out = parent_out
frappe.cache().hset("lang_full_dict", lang, out, shared=True)
return out or {}
def get_translation_dict_from_file(path, lang, app):
"""load translation dict from given path"""
cleaned = {}
if os.path.exists(path):
csv_content = read_csv_file(path)
for item in csv_content:
if len(item)==3:
# with file and line numbers
cleaned[item[1]] = strip(item[2])
elif len(item)==2:
cleaned[item[0]] = strip(item[1])
elif item:
raise Exception("Bad translation in '{app}' for language '{lang}': {values}".format(
app=app, lang=lang, values=repr(item).encode("utf-8")
))
return cleaned
def get_user_translations(lang):
out = frappe.cache().hget('lang_user_translations', lang)
if out is None:
out = {}
for fields in frappe.get_all('Translation',
fields= ["source_name", "target_name"], filters={'language': lang}):
out.update({fields.source_name: fields.target_name})
frappe.cache().hset('lang_user_translations', lang, out)
return out
def clear_cache():
"""Clear all translation assets from :meth:`frappe.cache`"""
cache = frappe.cache()
cache.delete_key("langinfo")
# clear translations saved in boot cache
cache.delete_key("bootinfo")
cache.delete_key("lang_full_dict", shared=True)
cache.delete_key("translation_assets", shared=True)
cache.delete_key("lang_user_translations")
def get_messages_for_app(app):
"""Returns all messages (list) for a specified `app`"""
messages = []
modules = ", ".join(['"{}"'.format(m.title().replace("_", " ")) \
for m in frappe.local.app_modules[app]])
# doctypes
if modules:
for name in frappe.db.sql_list("""select name from tabDocType
where module in ({})""".format(modules)):
messages.extend(get_messages_from_doctype(name))
# pages
for name, title in frappe.db.sql("""select name, title from tabPage
where module in ({})""".format(modules)):
messages.append((None, title or name))
messages.extend(get_messages_from_page(name))
# reports
for name in frappe.db.sql_list("""select tabReport.name from tabDocType, tabReport
where tabReport.ref_doctype = tabDocType.name
and tabDocType.module in ({})""".format(modules)):
messages.append((None, name))
messages.extend(get_messages_from_report(name))
for i in messages:
if not isinstance(i, tuple):
raise Exception
# workflow based on app.hooks.fixtures
messages.extend(get_messages_from_workflow(app_name=app))
# custom fields based on app.hooks.fixtures
messages.extend(get_messages_from_custom_fields(app_name=app))
# app_include_files
messages.extend(get_all_messages_from_js_files(app))
# server_messages
messages.extend(get_server_messages(app))
return deduplicate_messages(messages)
def get_messages_from_doctype(name):
"""Extract all translatable messages for a doctype. Includes labels, Python code,
Javascript code, html templates"""
messages = []
meta = frappe.get_meta(name)
messages = [meta.name, meta.module]
if meta.description:
messages.append(meta.description)
# translations of field labels, description and options
for d in meta.get("fields"):
messages.extend([d.label, d.description])
if d.fieldtype=='Select' and d.options:
options = d.options.split('\n')
if not "icon" in options[0]:
messages.extend(options)
# translations of roles
for d in meta.get("permissions"):
if d.role:
messages.append(d.role)
messages = [message for message in messages if message]
messages = [('DocType: ' + name, message) for message in messages if is_translatable(message)]
# extract from js, py files
if not meta.custom:
doctype_file_path = frappe.get_module_path(meta.module, "doctype", meta.name, meta.name)
messages.extend(get_messages_from_file(doctype_file_path + ".js"))
messages.extend(get_messages_from_file(doctype_file_path + "_list.js"))
messages.extend(get_messages_from_file(doctype_file_path + "_list.html"))
messages.extend(get_messages_from_file(doctype_file_path + "_calendar.js"))
messages.extend(get_messages_from_file(doctype_file_path + "_dashboard.html"))
# workflow based on doctype
messages.extend(get_messages_from_workflow(doctype=name))
return messages
def get_messages_from_workflow(doctype=None, app_name=None):
assert doctype or app_name, 'doctype or app_name should be provided'
# translations for Workflows
workflows = []
if doctype:
workflows = frappe.get_all('Workflow', filters={'document_type': doctype})
else:
fixtures = frappe.get_hooks('fixtures', app_name=app_name) or []
for fixture in fixtures:
if isinstance(fixture, string_types) and fixture == 'Worflow':
workflows = frappe.get_all('Workflow')
break
elif isinstance(fixture, dict) and fixture.get('dt', fixture.get('doctype')) == 'Workflow':
workflows.extend(frappe.get_all('Workflow', filters=fixture.get('filters')))
messages = []
for w in workflows:
states = frappe.db.sql(
'select distinct state from `tabWorkflow Document State` where parent=%s',
(w['name'],), as_dict=True)
messages.extend([('Workflow: ' + w['name'], state['state']) for state in states if is_translatable(state['state'])])
states = frappe.db.sql(
'select distinct message from `tabWorkflow Document State` where parent=%s and message is not null',
(w['name'],), as_dict=True)
messages.extend([("Workflow: " + w['name'], state['message'])
for state in states if is_translatable(state['message'])])
actions = frappe.db.sql(
'select distinct action from `tabWorkflow Transition` where parent=%s',
(w['name'],), as_dict=True)
messages.extend([("Workflow: " + w['name'], action['action']) \
for action in actions if is_translatable(action['action'])])
return messages
def get_messages_from_custom_fields(app_name):
fixtures = frappe.get_hooks('fixtures', app_name=app_name) or []
custom_fields = []
for fixture in fixtures:
if isinstance(fixture, string_types) and fixture == 'Custom Field':
custom_fields = frappe.get_all('Custom Field', fields=['name','label', 'description', 'fieldtype', 'options'])
break
elif isinstance(fixture, dict) and fixture.get('dt', fixture.get('doctype')) == 'Custom Field':
custom_fields.extend(frappe.get_all('Custom Field', filters=fixture.get('filters'),
fields=['name','label', 'description', 'fieldtype', 'options']))
messages = []
for cf in custom_fields:
for prop in ('label', 'description'):
if not cf.get(prop) or not is_translatable(cf[prop]):
continue
messages.append(('Custom Field - {}: {}'.format(prop, cf['name']), cf[prop]))
if cf['fieldtype'] == 'Selection' and cf.get('options'):
for option in cf['options'].split('\n'):
if option and 'icon' not in option and is_translatable(option):
messages.append(('Custom Field - Description: ' + cf['name'], option))
return messages
def get_messages_from_page(name):
"""Returns all translatable strings from a :class:`frappe.core.doctype.Page`"""
return _get_messages_from_page_or_report("Page", name)
def get_messages_from_report(name):
"""Returns all translatable strings from a :class:`frappe.core.doctype.Report`"""
report = frappe.get_doc("Report", name)
messages = _get_messages_from_page_or_report("Report", name,
frappe.db.get_value("DocType", report.ref_doctype, "module"))
# TODO position here!
if report.query:
messages.extend([(None, message) for message in re.findall('"([^:,^"]*):', report.query) if is_translatable(message)])
messages.append((None,report.report_name))
return messages
def _get_messages_from_page_or_report(doctype, name, module=None):
if not module:
module = frappe.db.get_value(doctype, name, "module")
doc_path = frappe.get_module_path(module, doctype, name)
messages = get_messages_from_file(os.path.join(doc_path, frappe.scrub(name) +".py"))
if os.path.exists(doc_path):
for filename in os.listdir(doc_path):
if filename.endswith(".js") or filename.endswith(".html"):
messages += get_messages_from_file(os.path.join(doc_path, filename))
return messages
def get_server_messages(app):
"""Extracts all translatable strings (tagged with :func:`frappe._`) from Python modules
inside an app"""
messages = []
file_extensions = ('.py', '.html', '.js', '.vue')
for basepath, folders, files in os.walk(frappe.get_pymodule_path(app)):
for dontwalk in (".git", "public", "locale"):
if dontwalk in folders: folders.remove(dontwalk)
for f in files:
f = frappe.as_unicode(f)
if f.endswith(file_extensions):
messages.extend(get_messages_from_file(os.path.join(basepath, f)))
return messages
def get_messages_from_include_files(app_name=None):
"""Returns messages from js files included at time of boot like desk.min.js for desk and web"""
messages = []
for file in (frappe.get_hooks("app_include_js", app_name=app_name) or []) + (frappe.get_hooks("web_include_js", app_name=app_name) or []):
messages.extend(get_messages_from_file(os.path.join(frappe.local.sites_path, file)))
return messages
def get_all_messages_from_js_files(app_name=None):
"""Extracts all translatable strings from app `.js` files"""
messages = []
for app in ([app_name] if app_name else frappe.get_installed_apps()):
if os.path.exists(frappe.get_app_path(app, "public")):
for basepath, folders, files in os.walk(frappe.get_app_path(app, "public")):
if "frappe/public/js/lib" in basepath:
continue
for fname in files:
if fname.endswith(".js") or fname.endswith(".html"):
messages.extend(get_messages_from_file(os.path.join(basepath, fname)))
return messages
def get_messages_from_file(path):
"""Returns a list of transatable strings from a code file
:param path: path of the code file
"""
apps_path = get_bench_dir()
if os.path.exists(path):
with open(path, 'r') as sourcefile:
return [(os.path.relpath(" +".join([path, str(pos)]), apps_path),
message) for pos, message in extract_messages_from_code(sourcefile.read(), path.endswith(".py"))]
else:
# print "Translate: {0} missing".format(os.path.abspath(path))
return []
def extract_messages_from_code(code, is_py=False):
"""Extracts translatable srings from a code file
:param code: code from which translatable files are to be extracted
:param is_py: include messages in triple quotes e.g. `_('''message''')`"""
try:
code = frappe.as_unicode(render_include(code))
except (TemplateError, ImportError, InvalidIncludePath):
# Exception will occur when it encounters John Resig's microtemplating code
pass
messages = []
messages += [(m.start(), m.groups()[0]) for m in re.compile('_\("([^"]*)"').finditer(code)]
messages += [(m.start(), m.groups()[0]) for m in re.compile("_\('([^']*)'").finditer(code)]
if is_py:
messages += [(m.start(), m.groups()[0]) for m in re.compile('_\("{3}([^"]*)"{3}.*\)').finditer(code)]
messages = [(pos, message) for pos, message in messages if is_translatable(message)]
return pos_to_line_no(messages, code)
def is_translatable(m):
if re.search("[a-zA-Z]", m) and not m.startswith("fa fa-") and not m.endswith("px") and not m.startswith("eval:"):
return True
return False
def pos_to_line_no(messages, code):
ret = []
messages = sorted(messages, key=lambda x: x[0])
newlines = [m.start() for m in re.compile('\\n').finditer(code)]
line = 1
newline_i = 0
for pos, message in messages:
while newline_i < len(newlines) and pos > newlines[newline_i]:
line+=1
newline_i+= 1
ret.append((line, message))
return ret
def read_csv_file(path):
"""Read CSV file and return as list of list
:param path: File path"""
from csv import reader
if PY2:
with codecs.open(path, 'r', 'utf-8') as msgfile:
data = msgfile.read()
# for japanese! #wtf
data = data.replace(chr(28), "").replace(chr(29), "")
data = reader([r.encode('utf-8') for r in data.splitlines()])
newdata = [[text_type(val, 'utf-8') for val in row] for row in data]
else:
with io.open(path, mode='r', encoding='utf-8', newline='') as msgfile:
data = reader(msgfile)
newdata = [[ val for val in row ] for row in data]
return newdata
def write_csv_file(path, app_messages, lang_dict):
"""Write translation CSV file.
:param path: File path, usually `[app]/translations`.
:param app_messages: Translatable strings for this app.
:param lang_dict: Full translated dict.
"""
app_messages.sort(key = lambda x: x[1])
from csv import writer
with open(path, 'wb') as msgfile:
w = writer(msgfile, lineterminator='\n')
for p, m in app_messages:
t = lang_dict.get(m, '')
# strip whitespaces
t = re.sub('{\s?([0-9]+)\s?}', "{\g<1>}", t)
w.writerow([p.encode('utf-8') if p else '', m.encode('utf-8'), t.encode('utf-8')])
def get_untranslated(lang, untranslated_file, get_all=False):
"""Returns all untranslated strings for a language and writes in a file
:param lang: Language code.
:param untranslated_file: Output file path.
:param get_all: Return all strings, translated or not."""
clear_cache()
apps = frappe.get_all_apps(True)
messages = []
untranslated = []
for app in apps:
messages.extend(get_messages_for_app(app))
messages = deduplicate_messages(messages)
def escape_newlines(s):
return (s.replace("\\\n", "|||||")
.replace("\\n", "||||")
.replace("\n", "|||"))
if get_all:
print(str(len(messages)) + " messages")
with open(untranslated_file, "w") as f:
for m in messages:
# replace \n with ||| so that internal linebreaks don't get split
f.write((escape_newlines(m[1]) + os.linesep).encode("utf-8"))
else:
full_dict = get_full_dict(lang)
for m in messages:
if not full_dict.get(m[1]):
untranslated.append(m[1])
if untranslated:
print(str(len(untranslated)) + " missing translations of " + str(len(messages)))
with open(untranslated_file, "w") as f:
for m in untranslated:
# replace \n with ||| so that internal linebreaks don't get split
f.write((escape_newlines(m) + os.linesep).encode("utf-8"))
else:
print("all translated!")
def update_translations(lang, untranslated_file, translated_file):
"""Update translations from a source and target file for a given language.
:param lang: Language code (e.g. `en`).
:param untranslated_file: File path with the messages in English.
:param translated_file: File path with messages in language to be updated."""
clear_cache()
full_dict = get_full_dict(lang)
def restore_newlines(s):
return (s.replace("|||||", "\\\n")
.replace("| | | | |", "\\\n")
.replace("||||", "\\n")
.replace("| | | |", "\\n")
.replace("|||", "\n")
.replace("| | |", "\n"))
translation_dict = {}
for key, value in zip(frappe.get_file_items(untranslated_file, ignore_empty_lines=False),
frappe.get_file_items(translated_file, ignore_empty_lines=False)):
# undo hack in get_untranslated
translation_dict[restore_newlines(key)] = restore_newlines(value)
full_dict.update(translation_dict)
for app in frappe.get_all_apps(True):
write_translations_file(app, lang, full_dict)
def import_translations(lang, path):
"""Import translations from file in standard format"""
clear_cache()
full_dict = get_full_dict(lang)
full_dict.update(get_translation_dict_from_file(path, lang, 'import'))
for app in frappe.get_all_apps(True):
write_translations_file(app, lang, full_dict)
def rebuild_all_translation_files():
"""Rebuild all translation files: `[app]/translations/[lang].csv`."""
for lang in get_all_languages():
for app in frappe.get_all_apps():
write_translations_file(app, lang)
def write_translations_file(app, lang, full_dict=None, app_messages=None):
"""Write a translation file for a given language.
:param app: `app` for which translations are to be written.
:param lang: Language code.
:param full_dict: Full translated language dict (optional).
:param app_messages: Source strings (optional).
"""
if not app_messages:
app_messages = get_messages_for_app(app)
if not app_messages:
return
tpath = frappe.get_pymodule_path(app, "translations")
frappe.create_folder(tpath)
write_csv_file(os.path.join(tpath, lang + ".csv"),
app_messages, full_dict or get_full_dict(lang))
def send_translations(translation_dict):
"""Append translated dict in `frappe.local.response`"""
if "__messages" not in frappe.local.response:
frappe.local.response["__messages"] = {}
frappe.local.response["__messages"].update(translation_dict)
def deduplicate_messages(messages):
ret = []
op = operator.itemgetter(1)
messages = sorted(messages, key=op)
for k, g in itertools.groupby(messages, op):
ret.append(next(g))
return ret
def get_bench_dir():
return os.path.join(frappe.__file__, '..', '..', '..', '..')
def rename_language(old_name, new_name):
if not frappe.db.exists('Language', new_name):
return
language_in_system_settings = frappe.db.get_single_value("System Settings", "language")
if language_in_system_settings == old_name:
frappe.db.set_value("System Settings", "System Settings", "language", new_name)
frappe.db.sql("""update `tabUser` set language=%(new_name)s where language=%(old_name)s""",
{ "old_name": old_name, "new_name": new_name })
@frappe.whitelist()
def update_translations_for_source(source=None, translation_dict=None):
if not (source and translation_dict):
return
translation_dict = json.loads(translation_dict)
# for existing records
translation_records = frappe.db.get_values('Translation', { 'source_name': source }, ['name', 'language'], as_dict=1)
for d in translation_records:
if translation_dict.get(d.language, None):
doc = frappe.get_doc('Translation', d.name)
doc.target_name = translation_dict.get(d.language)
doc.save()
# done with this lang value
translation_dict.pop(d.language)
else:
frappe.delete_doc('Translation', d.name)
# remaining values are to be inserted
for lang, target_name in iteritems(translation_dict):
doc = frappe.new_doc('Translation')
doc.language = lang
doc.source_name = source
doc.target_name = target_name
doc.save()
return translation_records
| |
import unittest
from unittest import mock
import lib.bitcoin as bitcoin
import lib.keystore as keystore
import lib.storage as storage
import lib.wallet as wallet
from plugins.trustedcoin import trustedcoin
# TODO passphrase/seed_extension
class TestWalletKeystoreAddressIntegrity(unittest.TestCase):
gap_limit = 1 # make tests run faster
def _check_seeded_keystore_sanity(self, ks):
self.assertTrue (ks.is_deterministic())
self.assertFalse(ks.is_watching_only())
self.assertFalse(ks.can_import())
self.assertTrue (ks.has_seed())
def _check_xpub_keystore_sanity(self, ks):
self.assertTrue (ks.is_deterministic())
self.assertTrue (ks.is_watching_only())
self.assertFalse(ks.can_import())
self.assertFalse(ks.has_seed())
def _create_standard_wallet(self, ks):
store = storage.WalletStorage('if_this_exists_mocking_failed_648151893')
store.put('keystore', ks.dump())
store.put('gap_limit', self.gap_limit)
w = wallet.Standard_Wallet(store)
w.synchronize()
return w
def _create_multisig_wallet(self, ks1, ks2, ks3=None):
"""Creates a 2-of-2 or 2-of-3 multisig wallet."""
store = storage.WalletStorage('if_this_exists_mocking_failed_648151893')
store.put('x%d/' % 1, ks1.dump())
store.put('x%d/' % 2, ks2.dump())
if ks3 is None:
multisig_type = "%dof%d" % (2, 2)
else:
multisig_type = "%dof%d" % (2, 3)
store.put('x%d/' % 3, ks3.dump())
store.put('wallet_type', multisig_type)
store.put('gap_limit', self.gap_limit)
w = wallet.Multisig_Wallet(store)
w.synchronize()
return w
@mock.patch.object(storage.WalletStorage, '_write')
def test_electrum_seed_standard(self, mock_write):
seed_words = 'cycle rocket west magnet parrot shuffle foot correct salt library feed song'
self.assertEqual(bitcoin.seed_type(seed_words), 'standard')
ks = keystore.from_seed(seed_words, '', False)
self._check_seeded_keystore_sanity(ks)
self.assertTrue(isinstance(ks, keystore.BIP32_KeyStore))
self.assertEqual(ks.xprv, 'xprv9s21ZrQH143K32jECVM729vWgGq4mUDJCk1ozqAStTphzQtCTuoFmFafNoG1g55iCnBTXUzz3zWnDb5CVLGiFvmaZjuazHDL8a81cPQ8KL6')
self.assertEqual(ks.xpub, 'xpub661MyMwAqRbcFWohJWt7PHsFEJfZAvw9ZxwQoDa4SoMgsDDM1T7WK3u9E4edkC4ugRnZ8E4xDZRpk8Rnts3Nbt97dPwT52CwBdDWroaZf8U')
w = self._create_standard_wallet(ks)
self.assertEqual(w.txin_type, 'p2pkh')
self.assertEqual(w.get_receiving_addresses()[0], '1NNkttn1YvVGdqBW4PR6zvc3Zx3H5owKRf')
self.assertEqual(w.get_change_addresses()[0], '1KSezYMhAJMWqFbVFB2JshYg69UpmEXR4D')
@mock.patch.object(storage.WalletStorage, '_write')
def test_electrum_seed_segwit(self, mock_write):
seed_words = 'bitter grass shiver impose acquire brush forget axis eager alone wine silver'
self.assertEqual(bitcoin.seed_type(seed_words), 'segwit')
ks = keystore.from_seed(seed_words, '', False)
self._check_seeded_keystore_sanity(ks)
self.assertTrue(isinstance(ks, keystore.BIP32_KeyStore))
self.assertEqual(ks.xprv, 'zprvAZswDvNeJeha8qZ8g7efN3FXYVJLaEUsE9TW6qXDEbVe74AZ75c2sZFZXPNFzxnhChDQ89oC8C5AjWwHmH1HeRKE1c4kKBQAmjUDdKDUZw2')
self.assertEqual(ks.xpub, 'zpub6nsHdRuY92FsMKdbn9BfjBCG6X8pyhCibNP6uDvpnw2cyrVhecvHRMa3Ne8kdJZxjxgwnpbHLkcR4bfnhHy6auHPJyDTQ3kianeuVLdkCYQ')
w = self._create_standard_wallet(ks)
self.assertEqual(w.txin_type, 'p2wpkh')
self.assertEqual(w.get_receiving_addresses()[0], 'bc1q3g5tmkmlvxryhh843v4dz026avatc0zzr6h3af')
self.assertEqual(w.get_change_addresses()[0], 'bc1qdy94n2q5qcp0kg7v9yzwe6wvfkhnvyzje7nx2p')
@mock.patch.object(storage.WalletStorage, '_write')
def test_electrum_seed_old(self, mock_write):
seed_words = 'powerful random nobody notice nothing important anyway look away hidden message over'
self.assertEqual(bitcoin.seed_type(seed_words), 'old')
ks = keystore.from_seed(seed_words, '', False)
self._check_seeded_keystore_sanity(ks)
self.assertTrue(isinstance(ks, keystore.Old_KeyStore))
self.assertEqual(ks.mpk, 'e9d4b7866dd1e91c862aebf62a49548c7dbf7bcc6e4b7b8c9da820c7737968df9c09d5a3e271dc814a29981f81b3faaf2737b551ef5dcc6189cf0f8252c442b3')
w = self._create_standard_wallet(ks)
self.assertEqual(w.txin_type, 'p2pkh')
self.assertEqual(w.get_receiving_addresses()[0], '1FJEEB8ihPMbzs2SkLmr37dHyRFzakqUmo')
self.assertEqual(w.get_change_addresses()[0], '1KRW8pH6HFHZh889VDq6fEKvmrsmApwNfe')
@mock.patch.object(storage.WalletStorage, '_write')
def test_electrum_seed_2fa(self, mock_write):
seed_words = 'kiss live scene rude gate step hip quarter bunker oxygen motor glove'
self.assertEqual(bitcoin.seed_type(seed_words), '2fa')
xprv1, xpub1, xprv2, xpub2 = trustedcoin.TrustedCoinPlugin.xkeys_from_seed(seed_words, '')
ks1 = keystore.from_xprv(xprv1)
self.assertTrue(isinstance(ks1, keystore.BIP32_KeyStore))
self.assertEqual(ks1.xprv, 'xprv9uraXy9F3HP7i8QDqwNTBiD8Jf4bPD4Epif8cS8qbUbgeidUesyZpKmzfcSeHutsGfFnjgih7kzwTB5UQVRNB5LoXaNc8pFusKYx3KVVvYR')
self.assertEqual(ks1.xpub, 'xpub68qvwUg8sewQvcUgwxuTYr9rrgu5nfn6BwajQpYT9p8fXWxdCRHpN86UWruWJAD1ede8Sv8ERrTa22Gyc4SBfm7zFpcyoVWVBKCVwnw6s1J')
self.assertEqual(ks1.xpub, xpub1)
ks2 = keystore.from_xprv(xprv2)
self.assertTrue(isinstance(ks2, keystore.BIP32_KeyStore))
self.assertEqual(ks2.xprv, 'xprv9uraXy9F3HP7kKSiRAvLV7Nrjj7YzspDys7dvGLLu4tLZT49CEBxPWp88dHhVxvZ69SHrPQMUCWjj4Ka2z9kNvs1HAeEf3extGGeSWqEVqf')
self.assertEqual(ks2.xpub, 'xpub68qvwUg8sewQxoXBXCTLrFKbHkx3QLY5M63EiejxTQRKSFPHjmWCwK8byvZMM2wZNYA3SmxXoma3M1zxhGESHZwtB7SwrxRgKXAG8dCD2eS')
self.assertEqual(ks2.xpub, xpub2)
long_user_id, short_id = trustedcoin.get_user_id(
{'x1/': {'xpub': xpub1},
'x2/': {'xpub': xpub2}})
xpub3 = trustedcoin.make_xpub(trustedcoin.signing_xpub, long_user_id)
ks3 = keystore.from_xpub(xpub3)
self._check_xpub_keystore_sanity(ks3)
self.assertTrue(isinstance(ks3, keystore.BIP32_KeyStore))
w = self._create_multisig_wallet(ks1, ks2, ks3)
self.assertEqual(w.txin_type, 'p2sh')
self.assertEqual(w.get_receiving_addresses()[0], '35L8XmCDoEBKeaWRjvmZvoZvhp8BXMMMPV')
self.assertEqual(w.get_change_addresses()[0], '3PeZEcumRqHSPNN43hd4yskGEBdzXgY8Cy')
@mock.patch.object(storage.WalletStorage, '_write')
def test_bip39_seed_bip44_standard(self, mock_write):
seed_words = 'treat dwarf wealth gasp brass outside high rent blood crowd make initial'
self.assertEqual(keystore.bip39_is_checksum_valid(seed_words), (True, True))
ks = keystore.from_bip39_seed(seed_words, '', "m/44'/0'/0'")
self.assertTrue(isinstance(ks, keystore.BIP32_KeyStore))
self.assertEqual(ks.xprv, 'xprv9zGLcNEb3cHUKizLVBz6RYeE9bEZAVPjH2pD1DEzCnPcsemWc3d3xTao8sfhfUmDLMq6e3RcEMEvJG1Et8dvfL8DV4h7mwm9J6AJsW9WXQD')
self.assertEqual(ks.xpub, 'xpub6DFh1smUsyqmYD4obDX6ngaxhd53Zx7aeFjoobebm7vbkT6f9awJWFuGzBT9FQJEWFBL7UyhMXtYzRcwDuVbcxtv9Ce2W9eMm4KXLdvdbjv')
w = self._create_standard_wallet(ks)
self.assertEqual(w.txin_type, 'p2pkh')
self.assertEqual(w.get_receiving_addresses()[0], '16j7Dqk3Z9DdTdBtHcCVLaNQy9MTgywUUo')
self.assertEqual(w.get_change_addresses()[0], '1GG5bVeWgAp5XW7JLCphse14QaC4qiHyWn')
@mock.patch.object(storage.WalletStorage, '_write')
def test_bip39_seed_bip49_p2sh_segwit(self, mock_write):
seed_words = 'treat dwarf wealth gasp brass outside high rent blood crowd make initial'
self.assertEqual(keystore.bip39_is_checksum_valid(seed_words), (True, True))
ks = keystore.from_bip39_seed(seed_words, '', "m/49'/0'/0'")
self.assertTrue(isinstance(ks, keystore.BIP32_KeyStore))
self.assertEqual(ks.xprv, 'yprvAJEYHeNEPcyBoQYM7sGCxDiNCTX65u4ANgZuSGTrKN5YCC9MP84SBayrgaMyZV7zvkHrr3HVPTK853s2SPk4EttPazBZBmz6QfDkXeE8Zr7')
self.assertEqual(ks.xpub, 'ypub6XDth9u8DzXV1tcpDtoDKMf6kVMaVMn1juVWEesTshcX4zUVvfNgjPJLXrD9N7AdTLnbHFL64KmBn3SNaTe69iZYbYCqLCCNPZKbLz9niQ4')
w = self._create_standard_wallet(ks)
self.assertEqual(w.txin_type, 'p2wpkh-p2sh')
self.assertEqual(w.get_receiving_addresses()[0], '35ohQTdNykjkF1Mn9nAVEFjupyAtsPAK1W')
self.assertEqual(w.get_change_addresses()[0], '3KaBTcviBLEJajTEMstsA2GWjYoPzPK7Y7')
@mock.patch.object(storage.WalletStorage, '_write')
def test_bip39_seed_bip84_native_segwit(self, mock_write):
# test case from bip84
seed_words = 'abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon about'
self.assertEqual(keystore.bip39_is_checksum_valid(seed_words), (True, True))
ks = keystore.from_bip39_seed(seed_words, '', "m/84'/0'/0'")
self.assertTrue(isinstance(ks, keystore.BIP32_KeyStore))
self.assertEqual(ks.xprv, 'zprvAdG4iTXWBoARxkkzNpNh8r6Qag3irQB8PzEMkAFeTRXxHpbF9z4QgEvBRmfvqWvGp42t42nvgGpNgYSJA9iefm1yYNZKEm7z6qUWCroSQnE')
self.assertEqual(ks.xpub, 'zpub6rFR7y4Q2AijBEqTUquhVz398htDFrtymD9xYYfG1m4wAcvPhXNfE3EfH1r1ADqtfSdVCToUG868RvUUkgDKf31mGDtKsAYz2oz2AGutZYs')
w = self._create_standard_wallet(ks)
self.assertEqual(w.txin_type, 'p2wpkh')
self.assertEqual(w.get_receiving_addresses()[0], 'bc1qcr8te4kr609gcawutmrza0j4xv80jy8z306fyu')
self.assertEqual(w.get_change_addresses()[0], 'bc1q8c6fshw2dlwun7ekn9qwf37cu2rn755upcp6el')
@mock.patch.object(storage.WalletStorage, '_write')
def test_electrum_multisig_seed_standard(self, mock_write):
seed_words = 'blast uniform dragon fiscal ensure vast young utility dinosaur abandon rookie sure'
self.assertEqual(bitcoin.seed_type(seed_words), 'standard')
ks1 = keystore.from_seed(seed_words, '', True)
self._check_seeded_keystore_sanity(ks1)
self.assertTrue(isinstance(ks1, keystore.BIP32_KeyStore))
self.assertEqual(ks1.xprv, 'xprv9s21ZrQH143K3t9vo23J3hajRbzvkRLJ6Y1zFrUFAfU3t8oooMPfb7f87cn5KntgqZs5nipZkCiBFo5ZtaSD2eDo7j7CMuFV8Zu6GYLTpY6')
self.assertEqual(ks1.xpub, 'xpub661MyMwAqRbcGNEPu3aJQqXTydqR9t49Tkwb4Esrj112kw8xLthv8uybxvaki4Ygt9xiwZUQGeFTG7T2TUzR3eA4Zp3aq5RXsABHFBUrq4c')
# electrum seed: ghost into match ivory badge robot record tackle radar elbow traffic loud
ks2 = keystore.from_xpub('xpub661MyMwAqRbcGfCPEkkyo5WmcrhTq8mi3xuBS7VEZ3LYvsgY1cCFDbenT33bdD12axvrmXhuX3xkAbKci3yZY9ZEk8vhLic7KNhLjqdh5ec')
self._check_xpub_keystore_sanity(ks2)
self.assertTrue(isinstance(ks2, keystore.BIP32_KeyStore))
w = self._create_multisig_wallet(ks1, ks2)
self.assertEqual(w.txin_type, 'p2sh')
self.assertEqual(w.get_receiving_addresses()[0], '32ji3QkAgXNz6oFoRfakyD3ys1XXiERQYN')
self.assertEqual(w.get_change_addresses()[0], '36XWwEHrrVCLnhjK5MrVVGmUHghr9oWTN1')
@mock.patch.object(storage.WalletStorage, '_write')
def test_electrum_multisig_seed_segwit(self, mock_write):
seed_words = 'snow nest raise royal more walk demise rotate smooth spirit canyon gun'
self.assertEqual(bitcoin.seed_type(seed_words), 'segwit')
ks1 = keystore.from_seed(seed_words, '', True)
self._check_seeded_keystore_sanity(ks1)
self.assertTrue(isinstance(ks1, keystore.BIP32_KeyStore))
self.assertEqual(ks1.xprv, 'ZprvAjxLRqPiDfPDxXrm8JvcoCGRAW6xUtktucG6AMtdzaEbTEJN8qcECvujfhtDU3jLJ9g3Dr3Gz5m1ypfMs8iSUh62gWyHZ73bYLRWyeHf6y4')
self.assertEqual(ks1.xpub, 'Zpub6xwgqLvc42wXB1wEELTdALD9iXwStMUkGqBgxkJFYumaL2dWgNvUkjEDWyDFZD3fZuDWDzd1KQJ4NwVHS7hs6H6QkpNYSShfNiUZsgMdtNg')
# electrum seed: hedgehog sunset update estate number jungle amount piano friend donate upper wool
ks2 = keystore.from_xpub('Zpub6y4oYeETXAbzLNg45wcFDGwEG3vpgsyMJybiAfi2pJtNF3i3fJVxK2BeZJaw7VeKZm192QHvXP3uHDNpNmNDbQft9FiMzkKUhNXQafUMYUY')
self._check_xpub_keystore_sanity(ks2)
self.assertTrue(isinstance(ks2, keystore.BIP32_KeyStore))
w = self._create_multisig_wallet(ks1, ks2)
self.assertEqual(w.txin_type, 'p2wsh')
self.assertEqual(w.get_receiving_addresses()[0], 'bc1qvzezdcv6vs5h45ugkavp896e0nde5c5lg5h0fwe2xyfhnpkxq6gq7pnwlc')
self.assertEqual(w.get_change_addresses()[0], 'bc1qxqf840dqswcmu7a8v82fj6ej0msx08flvuy6kngr7axstjcaq6us9hrehd')
@mock.patch.object(storage.WalletStorage, '_write')
def test_bip39_multisig_seed_bip45_standard(self, mock_write):
seed_words = 'treat dwarf wealth gasp brass outside high rent blood crowd make initial'
self.assertEqual(keystore.bip39_is_checksum_valid(seed_words), (True, True))
ks1 = keystore.from_bip39_seed(seed_words, '', "m/45'/0")
self.assertTrue(isinstance(ks1, keystore.BIP32_KeyStore))
self.assertEqual(ks1.xprv, 'xprv9vyEFyXf7pYVv4eDU3hhuCEAHPHNGuxX73nwtYdpbLcqwJCPwFKknAK8pHWuHHBirCzAPDZ7UJHrYdhLfn1NkGp9rk3rVz2aEqrT93qKRD9')
self.assertEqual(ks1.xpub, 'xpub69xafV4YxC6o8Yiga5EiGLAtqR7rgNgNUGiYgw3S9g9pp6XYUne1KxdcfYtxwmA3eBrzMFuYcNQKfqsXCygCo4GxQFHfywxpUbKNfYvGJka')
# bip39 seed: tray machine cook badge night page project uncover ritual toward person enact
# der: m/45'/0
ks2 = keystore.from_xpub('xpub6B26nSWddbWv7J3qQn9FbwPPQktSBdPQfLfHhRK4375QoZq8fvM8rQey1koGSTxC5xVoMzNMaBETMUmCqmXzjc8HyAbN7LqrvE4ovGRwNGg')
self._check_xpub_keystore_sanity(ks2)
self.assertTrue(isinstance(ks2, keystore.BIP32_KeyStore))
w = self._create_multisig_wallet(ks1, ks2)
self.assertEqual(w.txin_type, 'p2sh')
self.assertEqual(w.get_receiving_addresses()[0], '3JPTQ2nitVxXBJ1yhMeDwH6q417UifE3bN')
self.assertEqual(w.get_change_addresses()[0], '3FGyDuxgUDn2pSZe5xAJH1yUwSdhzDMyEE')
@mock.patch.object(storage.WalletStorage, '_write')
def test_bip39_multisig_seed_p2sh_segwit(self, mock_write):
# bip39 seed: pulse mixture jazz invite dune enrich minor weapon mosquito flight fly vapor
# der: m/49'/0'/0'
# NOTE: there is currently no bip43 standard derivation path for p2wsh-p2sh
ks1 = keystore.from_xprv('YprvAUXFReVvDjrPerocC3FxVH748sJUTvYjkAhtKop5VnnzVzMEHr1CHrYQKZwfJn1As3X4LYMav6upxd5nDiLb6SCjRZrBH76EFvyQAG4cn79')
self.assertTrue(isinstance(ks1, keystore.BIP32_KeyStore))
self.assertEqual(ks1.xpub, 'Ypub6hWbqA2p47QgsLt5J4nxrR3ngu8xsPGb7PdV8CDh48KyNngNqPKSqertAqYhQ4umELu1UsZUCYfj9XPA6AdSMZWDZQobwF7EJ8uNrECaZg1')
# bip39 seed: slab mixture skin evoke harsh tattoo rare crew sphere extend balcony frost
# der: m/49'/0'/0'
ks2 = keystore.from_xpub('Ypub6iNDhL4WWq5kFZcdFqHHwX4YTH4rYGp8xbndpRrY7WNZFFRfogSrL7wRTajmVHgR46AT1cqUG1mrcRd7h1WXwBsgX2QvT3zFbBCDiSDLkau')
self._check_xpub_keystore_sanity(ks2)
self.assertTrue(isinstance(ks2, keystore.BIP32_KeyStore))
w = self._create_multisig_wallet(ks1, ks2)
self.assertEqual(w.txin_type, 'p2wsh-p2sh')
self.assertEqual(w.get_receiving_addresses()[0], '35LeC45QgCVeRor1tJD6LiDgPbybBXisns')
self.assertEqual(w.get_change_addresses()[0], '39RhtDchc6igmx5tyoimhojFL1ZbQBrXa6')
| |
"""
:codeauthor: Rahul Handay <rahulha@saltstack.com>
"""
import salt.states.iptables as iptables
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import MagicMock, patch
from tests.support.unit import TestCase
class IptablesTestCase(TestCase, LoaderModuleMockMixin):
"""
Validate the iptables state
"""
def setup_loader_modules(self):
return {iptables: {}}
def test_chain_present(self):
"""
Test to verify the chain is exist.
"""
ret = {"name": "salt", "changes": {}, "result": True, "comment": ""}
mock = MagicMock(side_effect=[True, False, False, False])
with patch.dict(iptables.__salt__, {"iptables.check_chain": mock}):
ret.update(
{
"comment": (
"iptables salt chain is already exist in filter table for ipv4"
)
}
)
self.assertDictEqual(iptables.chain_present("salt"), ret)
with patch.dict(iptables.__opts__, {"test": True}):
ret.update(
{
"comment": (
"iptables salt chain in filter"
" table needs to be set for ipv4"
),
"result": None,
}
)
self.assertDictEqual(iptables.chain_present("salt"), ret)
with patch.dict(iptables.__opts__, {"test": False}):
mock = MagicMock(side_effect=[True, ""])
with patch.dict(iptables.__salt__, {"iptables.new_chain": mock}):
ret.update(
{
"result": True,
"comment": (
"iptables salt chain in filter"
" table create success for ipv4"
),
"changes": {"locale": "salt"},
}
)
self.assertDictEqual(iptables.chain_present("salt"), ret)
ret.update(
{
"changes": {},
"result": False,
"comment": (
"Failed to create salt chain in filter table: for ipv4"
),
}
)
self.assertDictEqual(iptables.chain_present("salt"), ret)
def test_chain_absent(self):
"""
Test to verify the chain is absent.
"""
ret = {"name": "salt", "changes": {}, "result": True, "comment": ""}
mock = MagicMock(side_effect=[False, True, True, True])
with patch.dict(iptables.__salt__, {"iptables.check_chain": mock}):
ret.update(
{
"comment": (
"iptables salt chain is already absent in filter table for ipv4"
)
}
)
self.assertDictEqual(iptables.chain_absent("salt"), ret)
with patch.dict(iptables.__opts__, {"test": True}):
ret.update(
{
"comment": (
"iptables salt chain in filter"
" table needs to be removed ipv4"
),
"result": None,
}
)
self.assertDictEqual(iptables.chain_absent("salt"), ret)
with patch.dict(iptables.__opts__, {"test": False}):
mock = MagicMock(side_effect=[False, "a"])
with patch.dict(iptables.__salt__, {"iptables.flush": mock}):
mock = MagicMock(return_value=True)
with patch.dict(iptables.__salt__, {"iptables.delete_chain": mock}):
ret.update(
{
"changes": {"locale": "salt"},
"comment": (
"iptables salt chain in filter"
" table delete success for ipv4"
),
"result": True,
}
)
self.assertDictEqual(iptables.chain_absent("salt"), ret)
ret.update(
{
"changes": {},
"result": False,
"comment": (
"Failed to flush salt chain in filter table: a for ipv4"
),
}
)
self.assertDictEqual(iptables.chain_absent("salt"), ret)
def test_append(self):
"""
Test to append a rule to a chain
"""
ret = {"name": "salt", "changes": {}, "result": None, "comment": ""}
self.assertDictEqual(iptables.append("salt", rules=[]), ret)
mock = MagicMock(return_value=[])
with patch.object(iptables, "_STATE_INTERNAL_KEYWORDS", mock):
mock = MagicMock(return_value="a")
with patch.dict(iptables.__salt__, {"iptables.build_rule": mock}):
mock = MagicMock(side_effect=[True, False, False, False, False, True])
with patch.dict(iptables.__salt__, {"iptables.check": mock}):
ret.update(
{
"comment": (
"iptables rule for salt already set (a) for ipv4"
),
"result": True,
}
)
self.assertDictEqual(
iptables.append("salt", table="", chain=""), ret
)
with patch.dict(iptables.__opts__, {"test": True}):
ret.update(
{
"result": None,
"comment": (
"iptables rule for salt"
" needs to be set (a) for ipv4"
),
}
)
self.assertDictEqual(
iptables.append("salt", table="", chain=""), ret
)
with patch.dict(iptables.__opts__, {"test": False}):
mock = MagicMock(side_effect=[True, False, True, True])
with patch.dict(iptables.__salt__, {"iptables.append": mock}):
ret.update(
{
"changes": {"locale": "salt"},
"result": True,
"comment": (
"Set iptables rule for salt to: a for ipv4"
),
}
)
self.assertDictEqual(
iptables.append("salt", table="", chain=""), ret
)
ret.update(
{
"changes": {},
"result": False,
"comment": (
"Failed to set iptables"
" rule for salt.\nAttempted rule was"
" a for ipv4"
),
}
)
self.assertDictEqual(
iptables.append("salt", table="", chain=""), ret
)
mock_save = MagicMock(
side_effect=['Wrote 1 lines to "/tmp/iptables"', ""]
)
with patch.dict(
iptables.__salt__, {"iptables.save": mock_save}
):
mock_get_saved_rules = MagicMock(side_effect=[""])
with patch.dict(
iptables.__salt__,
{"iptables.get_saved_rules": mock_get_saved_rules},
):
mock = MagicMock(side_effect=[""])
with patch.dict(
iptables.__salt__, {"iptables.get_rules": mock}
):
ret.update(
{
"changes": {"locale": "salt"},
"result": True,
"comment": "Set and saved iptables rule"
' salt for ipv4\na\nWrote 1 lines to "/tmp/iptables"',
}
)
self.assertDictEqual(
iptables.append(
"salt",
table="",
chain="",
save="/tmp/iptables",
),
ret,
)
ret.update(
{
"changes": {},
"result": True,
"comment": "iptables rule for salt already set (a) for ipv4",
}
)
self.assertDictEqual(
iptables.append(
"salt",
table="",
chain="",
save="/tmp/iptables",
),
ret,
)
self.assertEqual(
mock_get_saved_rules.mock_calls[0][2][
"conf_file"
],
"/tmp/iptables",
)
self.assertEqual(
mock_save.mock_calls[0][2]["filename"],
"/tmp/iptables",
)
def test_insert(self):
"""
Test to insert a rule into a chain
"""
ret = {"name": "salt", "changes": {}, "result": None, "comment": ""}
self.assertDictEqual(iptables.insert("salt", rules=[]), ret)
mock = MagicMock(return_value=[])
with patch.object(iptables, "_STATE_INTERNAL_KEYWORDS", mock):
mock = MagicMock(return_value="a")
with patch.dict(iptables.__salt__, {"iptables.build_rule": mock}):
mock = MagicMock(side_effect=[True, False, False, False, False, True])
with patch.dict(iptables.__salt__, {"iptables.check": mock}):
ret.update(
{
"comment": (
"iptables rule for salt already set for ipv4 (a)"
),
"result": True,
}
)
self.assertDictEqual(
iptables.insert("salt", table="", chain=""), ret
)
with patch.dict(iptables.__opts__, {"test": True}):
ret.update(
{
"result": None,
"comment": (
"iptables rule for salt"
" needs to be set for ipv4 (a)"
),
}
)
self.assertDictEqual(
iptables.insert("salt", table="", chain=""), ret
)
with patch.dict(iptables.__opts__, {"test": False}):
mock = MagicMock(side_effect=[False, True, False, True])
with patch.dict(iptables.__salt__, {"iptables.insert": mock}):
ret.update(
{
"changes": {"locale": "salt"},
"result": True,
"comment": (
"Set iptables rule for salt to: a for ipv4"
),
}
)
self.assertDictEqual(
iptables.insert(
"salt", table="", chain="", position=""
),
ret,
)
ret.update(
{
"changes": {},
"result": False,
"comment": (
"Failed to set iptables"
" rule for salt.\nAttempted rule was a"
),
}
)
self.assertDictEqual(
iptables.insert(
"salt", table="", chain="", position=""
),
ret,
)
mock_save = MagicMock(
side_effect=['Wrote 1 lines to "/tmp/iptables"', ""]
)
with patch.dict(
iptables.__salt__, {"iptables.save": mock_save}
):
mock_get_saved_rules = MagicMock(side_effect=[""])
with patch.dict(
iptables.__salt__,
{"iptables.get_saved_rules": mock_get_saved_rules},
):
mock = MagicMock(side_effect=[""])
with patch.dict(
iptables.__salt__, {"iptables.get_rules": mock}
):
ret.update(
{
"changes": {"locale": "salt"},
"result": True,
"comment": "Set and saved iptables rule"
' salt for ipv4\na\nWrote 1 lines to "/tmp/iptables"',
}
)
self.assertDictEqual(
iptables.insert(
"salt",
table="",
chain="",
position="",
save="/tmp/iptables",
),
ret,
)
ret.update(
{
"changes": {},
"result": True,
"comment": "iptables rule for salt already set for ipv4 (a)",
}
)
self.assertDictEqual(
iptables.insert(
"salt",
table="",
chain="",
position="",
save="/tmp/iptables",
),
ret,
)
self.assertEqual(
mock_get_saved_rules.mock_calls[0][2][
"conf_file"
],
"/tmp/iptables",
)
self.assertEqual(
mock_save.mock_calls[0][2]["filename"],
"/tmp/iptables",
)
def test_delete(self):
"""
Test to delete a rule to a chain
"""
ret = {"name": "salt", "changes": {}, "result": None, "comment": ""}
self.assertDictEqual(iptables.delete("salt", rules=[]), ret)
mock = MagicMock(return_value=[])
with patch.object(iptables, "_STATE_INTERNAL_KEYWORDS", mock):
mock = MagicMock(return_value="a")
with patch.dict(iptables.__salt__, {"iptables.build_rule": mock}):
mock = MagicMock(side_effect=[False, True, True, True, True, False])
with patch.dict(iptables.__salt__, {"iptables.check": mock}):
ret.update(
{
"comment": (
"iptables rule for salt already absent for ipv4 (a)"
),
"result": True,
}
)
self.assertDictEqual(
iptables.delete("salt", table="", chain=""), ret
)
with patch.dict(iptables.__opts__, {"test": True}):
ret.update(
{
"result": None,
"comment": (
"iptables rule for salt needs"
" to be deleted for ipv4 (a)"
),
}
)
self.assertDictEqual(
iptables.delete("salt", table="", chain=""), ret
)
with patch.dict(iptables.__opts__, {"test": False}):
mock = MagicMock(side_effect=[False, True, False, False])
with patch.dict(iptables.__salt__, {"iptables.delete": mock}):
ret.update(
{
"result": True,
"changes": {"locale": "salt"},
"comment": "Delete iptables rule for salt a",
}
)
self.assertDictEqual(
iptables.delete(
"salt", table="", chain="", position=""
),
ret,
)
ret.update(
{
"result": False,
"changes": {},
"comment": (
"Failed to delete iptables"
" rule for salt.\nAttempted rule was a"
),
}
)
self.assertDictEqual(
iptables.delete(
"salt", table="", chain="", position=""
),
ret,
)
mock_save = MagicMock(
side_effect=['Wrote 1 lines to "/tmp/iptables"', ""]
)
with patch.dict(
iptables.__salt__, {"iptables.save": mock_save}
):
mock = MagicMock(side_effect=[True, False])
with patch.dict(
iptables.__salt__, {"iptables.check": mock}
):
mock = MagicMock(side_effect=[""])
with patch.dict(
iptables.__salt__, {"iptables.get_rules": mock}
):
ret.update(
{
"changes": {"locale": "salt"},
"result": True,
"comment": "Deleted and saved iptables rule"
' salt for ipv4\na\nWrote 1 lines to "/tmp/iptables"',
}
)
self.assertDictEqual(
iptables.delete(
"salt",
table="",
chain="",
save="/tmp/iptables",
),
ret,
)
ret.update(
{
"changes": {},
"result": True,
"comment": "iptables rule for salt already absent for ipv4 (a)",
}
)
self.assertDictEqual(
iptables.delete(
"salt",
table="",
chain="",
save="/tmp/iptables",
),
ret,
)
self.assertEqual(
mock_save.mock_calls[0][2]["filename"],
"/tmp/iptables",
)
def test_set_policy(self):
"""
Test to sets the default policy for iptables firewall tables
"""
ret = {"name": "salt", "changes": {}, "result": True, "comment": ""}
mock = MagicMock(return_value=[])
with patch.object(iptables, "_STATE_INTERNAL_KEYWORDS", mock):
mock = MagicMock(return_value="stack")
with patch.dict(iptables.__salt__, {"iptables.get_policy": mock}):
ret.update(
{
"comment": (
"iptables default policy for chain"
" on table for ipv4 already set to stack"
)
}
)
self.assertDictEqual(
iptables.set_policy("salt", table="", chain="", policy="stack"), ret
)
with patch.dict(iptables.__opts__, {"test": True}):
ret.update(
{
"comment": (
"iptables default policy for chain"
" on table for ipv4 needs to be set"
" to sal"
),
"result": None,
}
)
self.assertDictEqual(
iptables.set_policy("salt", table="", chain="", policy="sal"),
ret,
)
with patch.dict(iptables.__opts__, {"test": False}):
mock = MagicMock(side_effect=[False, True])
with patch.dict(iptables.__salt__, {"iptables.set_policy": mock}):
ret.update(
{
"changes": {"locale": "salt"},
"comment": "Set default policy for to sal family ipv4",
"result": True,
}
)
self.assertDictEqual(
iptables.set_policy(
"salt", table="", chain="", policy="sal"
),
ret,
)
ret.update(
{
"comment": "Failed to set iptables default policy",
"result": False,
"changes": {},
}
)
self.assertDictEqual(
iptables.set_policy(
"salt", table="", chain="", policy="sal"
),
ret,
)
def test_flush(self):
"""
Test to flush current iptables state
"""
ret = {"name": "salt", "changes": {}, "result": None, "comment": ""}
mock = MagicMock(return_value=[])
with patch.object(iptables, "_STATE_INTERNAL_KEYWORDS", mock):
with patch.dict(iptables.__opts__, {"test": True}):
ret.update(
{
"comment": (
"iptables rules in salt table filter"
" chain ipv4 family needs to be flushed"
)
}
)
self.assertDictEqual(iptables.flush("salt"), ret)
with patch.dict(iptables.__opts__, {"test": False}):
mock = MagicMock(side_effect=[False, True])
with patch.dict(iptables.__salt__, {"iptables.flush": mock}):
ret.update(
{
"changes": {"locale": "salt"},
"comment": (
"Flush iptables rules in table chain ipv4 family"
),
"result": True,
}
)
self.assertDictEqual(
iptables.flush("salt", table="", chain=""), ret
)
ret.update(
{
"changes": {},
"comment": "Failed to flush iptables rules",
"result": False,
}
)
self.assertDictEqual(
iptables.flush("salt", table="", chain=""), ret
)
def test_mod_aggregate(self):
"""
Test to mod_aggregate function
"""
self.assertDictEqual(
iptables.mod_aggregate({"fun": "salt"}, [], []), {"fun": "salt"}
)
self.assertDictEqual(
iptables.mod_aggregate({"fun": "append"}, [], []), {"fun": "append"}
)
| |
"""
Create an author. This is the information we have
0 Blank - this is just the id
1 ORCID
2 acronym
3 Last Name
4 First Name
5 Middle Initial
6 Email Address
7 Department
8 Institution
9 Street Address
10 City
11 State / Region
12 ZIP / Postcode
13 Country
14 Order in the list
15 Contribution
16
17 Department
18 Institution
19 Street Address
20 City
21 State / Region
22 ZIP / Postcode
23 Country
"""
import os
import sys
import argparse
class Address:
"""
The address of an institution
"""
def __init__(self, verbose=False):
"""
Create a new address
:param verbose: add some additional output
:type verbose: bool
"""
self.department = None
self.institution = None
self.street = None
self.city = None
self.state = None
self.zip = None
self.country = None
self.verbose = verbose
def __eq__(self, other):
"""
Two address are equal if they have the same street, city, state, zip, and country
:param other: The other address
:type other: Address
:return: If they are equal
:rtype: bool
"""
if isinstance(other, Address):
return (self.street, self.city, self.state, self.zip) \
== (other.street, other.city, other.state, other.zip)
else:
return NotImplemented
def __cmp__(self, other):
"""
Compare whether two things are the same.
:param other: The other Address
:type other: Address
:return: An int, zero if they are the same
:rtype: int
"""
if isinstance(other, Address):
if __eq__(other):
return 0
else:
return 1
else:
return NotImplemented
def __ne__(self, other):
"""
Are these not equal?
:param other: The other Address
:type other: Address
:return: If they are not equal
:rtype: bool
"""
result = self.__eq__(other)
if result is NotImplemented:
return result
return not result
def __hash__(self):
"""
The hash function is based on the name of the compound.
:rtype: int
"""
return hash(self.__str__)
def __str__(self):
"""
The to string function.
We will develop this as we go along!
:rtype: str
"""
toreturn = ""
if self.department:
toreturn = "{}, ".format(self.department)
if self.institution:
toreturn += "{}, ".format(self.institution)
if self.street:
toreturn += "{}, ".format(self.street)
if self.city:
toreturn += "{}, ".format(self.city)
if self.state:
toreturn += "{}, ".format(self.state)
if self.zip:
toreturn += "{}, ".format(self.zip)
if self.country:
toreturn += "{}".format(self.country)
return toreturn
def get_address(self):
"""
Get the address
:return:
"""
return self.__str__()
def is_valid(self):
"""
Determines if this is a valid address. We need at least institution, street, city, zip and country
:return: whether it is valid
:rtype : bool
"""
if self.verbose:
if not self.institution:
sys.stderr.write("Invalid address. No institution found\n")
if not self.street:
sys.stderr.write("Invalid address. No street found\n")
if not self.city:
sys.stderr.write("Invalid address. No city found\n")
if not self.country:
sys.stderr.write("Invalid address. No country found\n")
if (self.institution and self.street and self.city and self.country):
return True
return False
class Author:
"""
An author is hopefully a person
"""
def __init__(self, abbreviation, verbose=False):
"""
Create a new author
:param abbreviation: a two to three letter acronym for the author. This must be unique among all authors
:type abbreviation: str
:param verbose: print more output
:type verbose: bool
"""
self.abbreviation = abbreviation
self.orcid = None
self.lastname = None
self.lastnamelower = None
self.firstname = None
self.firstnamelower = None
self.middleinitial = None
self.email = None
self.primaryaddress = Address()
self.secondaryaddress = Address()
self.contribution = None
self.order = 100 # this is high by default but we overwrite it if we have a value
self.verbose = verbose
def __eq__(self, other):
"""
Two authors are equal if they have the same abbreviation
:param other: The other author
:type other: Author
:return: If they are equal
:rtype: bool
"""
if isinstance(other, Author):
return self.abbreviation == other.abbreviation
else:
return NotImplemented
def __cmp__(self, other):
"""
Compare whether two things are the same.
:param other: The other author
:type other: Author
:return: An int greater than one if we are bigger, less than one if we are smaller, zero if they are the same
:rtype: int
"""
if isinstance(other, Author):
if self.order and other.order:
if self.order < other.order:
return -10
elif self.order > other.order:
return 10
else:
return self.__str__().lower().__cmp__(other.__str__().lower())
if self.order:
return -10
if other.order:
return 10
return self.__str__().lower().__cmp__(other.__str__().lower())
else:
return NotImplemented
def __ne__(self, other):
"""
Are these not equal?
:param other: The other author
:type other: Author
:return: If they are not equal
:rtype: bool
"""
result = self.__eq__(other)
if result is NotImplemented:
return result
return not result
def __hash__(self):
"""
The hash function is based on the name of the compound.
:rtype: int
"""
return hash(self.abbreviation)
def __str__(self):
"""
The to string function.
We will develop this as we go along!
:rtype: str
"""
toreturn = "{}, {}".format(self.lastname, self.firstname)
if self.middleinitial:
toreturn = "{} {}.".format(toreturn, self.middleinitial)
return toreturn
def get_name(self):
"""
get the authors name
:return:
"""
return self.__str__()
def get_primary_address(self):
"""
Return the primary address as a string
:return:
"""
if not self.primaryaddress.is_valid(self.verbose):
if self.verbose:
sys.stderr.write("Primary address not valid\n")
return None
else:
return self.primaryaddress.__str__()
def get_secondary_address(self):
"""
Return the primary address as a string
:return:
"""
if not self.secondaryaddress.is_valid(self.verbose):
if self.verbose:
sys.stderr.write("Primary address not valid\n")
return None
else:
return self.secondaryaddress.__str__()
def is_valid(self):
"""
valid authors have firstname, lastname, valid address, and contribution
:return: bool
"""
self.primaryaddress.verbose = self.verbose
self.secondaryaddress.verbose = self.verbose
if self.verbose:
if not self.firstname:
sys.stderr.write("No first name for {}\n".format(self.abbreviation))
if not self.lastname:
sys.stderr.write("No last name for {}\n".format(self.abbreviation))
if not self.contribution:
sys.stderr.write("No contribution for {}\n".format(self.abbreviation))
if (self.firstname and self.lastname and self.contribution and self.primaryaddress.is_valid()):
return True
return False
| |
from pyowm.webapi25.location import Location
from pyowm.abstractions.decorators import deprecated
from pkg_resources import resource_stream
"""
Module containing a registry with lookup methods for OWM-provided city IDs
"""
class CityIDRegistry:
MATCHINGS = {
'exact': lambda city_name, toponym: city_name == toponym,
'nocase': lambda city_name, toponym: city_name.lower() == toponym.lower(),
'like': lambda city_name, toponym: city_name.lower() in toponym.lower()
}
def __init__(self, filepath_regex):
"""
Initialise a registry that can be used to lookup info about cities.
:param filepath_regex: Python format string that gives the path of the files
that store the city IDs information.
Eg: ``folder1/folder2/%02d-%02d.txt``
:type filepath_regex: str
:returns: a *CityIDRegistry* instance
"""
self._filepath_regex = filepath_regex
@deprecated(will_be='removed', on_version=(3, 0, 0))
def id_for(self, city_name):
"""
Returns the long ID corresponding to the first city found that matches
the provided city name. The lookup is case insensitive.
.. deprecated:: 3.0.0
Use :func:`ids_for` instead.
:param city_name: the city name whose ID is looked up
:type city_name: str
:returns: a long or ``None`` if the lookup fails
"""
line = self._lookup_line_by_city_name(city_name)
return int(line.split(",")[1]) if line is not None else None
def ids_for(self, city_name, country=None, matching='nocase'):
"""
Returns a list of tuples in the form (long, str, str) corresponding to
the int IDs and relative toponyms and 2-chars country of the cities
matching the provided city name.
The rule for identifying matchings is according to the provided
`matching` parameter value.
If `country` is provided, the search is restricted to the cities of
the specified country.
:param country: two character str representing the country where to
search for the city. Defaults to `None`, which means: search in all
countries.
:param matching: str among `exact` (literal, case-sensitive matching),
`nocase` (literal, case-insensitive matching) and `like` (matches cities
whose name contains as a substring the string fed to the function, no
matter the case). Defaults to `nocase`.
:raises ValueError if the value for `matching` is unknown
:return: list of tuples
"""
if not city_name:
return []
if matching not in self.MATCHINGS:
raise ValueError("Unknown type of matching: "
"allowed values are %s" % ", ".join(self.MATCHINGS))
if country is not None and len(country) != 2:
raise ValueError("Country must be a 2-char string")
splits = self._filter_matching_lines(city_name, country, matching)
return [(int(item[1]), item[0], item[4]) for item in splits]
@deprecated(will_be='removed', on_version=(3, 0, 0))
def location_for(self, city_name):
"""
Returns the *Location* object corresponding to the first city found
that matches the provided city name. The lookup is case insensitive.
:param city_name: the city name you want a *Location* for
:type city_name: str
:returns: a *Location* instance or ``None`` if the lookup fails
.. deprecated:: 3.0.0
Use :func:`locations_for` instead.
"""
line = self._lookup_line_by_city_name(city_name)
if line is None:
return None
tokens = line.split(",")
return Location(tokens[0], float(tokens[3]), float(tokens[2]),
int(tokens[1]), tokens[4])
def locations_for(self, city_name, country=None, matching='nocase'):
"""
Returns a list of Location objects corresponding to
the int IDs and relative toponyms and 2-chars country of the cities
matching the provided city name.
The rule for identifying matchings is according to the provided
`matching` parameter value.
If `country` is provided, the search is restricted to the cities of
the specified country.
:param country: two character str representing the country where to
search for the city. Defaults to `None`, which means: search in all
countries.
:param matching: str among `exact` (literal, case-sensitive matching),
`nocase` (literal, case-insensitive matching) and `like` (matches cities
whose name contains as a substring the string fed to the function, no
matter the case). Defaults to `nocase`.
:raises ValueError if the value for `matching` is unknown
:return: list of `webapi25.location.Location` objects
"""
if not city_name:
return []
if matching not in self.MATCHINGS:
raise ValueError("Unknown type of matching: "
"allowed values are %s" % ", ".join(self.MATCHINGS))
if country is not None and len(country) != 2:
raise ValueError("Country must be a 2-char string")
splits = self._filter_matching_lines(city_name, country, matching)
return [Location(item[0], float(item[3]), float(item[2]),
int(item[1]), item[4]) for item in splits]
# helper functions
def _filter_matching_lines(self, city_name, country, matching):
"""
Returns an iterable whose items are the lists of split tokens of every
text line matched against the city ID files according to the provided
combination of city_name, country and matching style
:param city_name: str
:param country: str or `None`
:param matching: str
:return: list of lists
"""
result = list()
# find the right file to scan and extract its lines. Upon "like"
# matchings, just read all files
if matching == 'like':
lines = [l.strip() for l in self._get_all_lines()]
else:
filename = self._assess_subfile_from(city_name)
lines = [l.strip() for l in self._get_lines(filename)]
# look for toponyms matching the specified city_name and according to
# the specified matching style
for line in lines:
tokens = line.split(",")
# sometimes city names have an inner comma...
if len(tokens) == 6:
tokens = [tokens[0]+','+tokens[1], tokens[2], tokens[3],
tokens[4], tokens[5]]
# check country
if country is not None:
if tokens[4] != country:
continue
# check city_name
if self._city_name_matches(city_name, tokens[0], matching):
result.append(tokens)
return result
def _city_name_matches(self, city_name, toponym, matching):
comparison_function = self.MATCHINGS[matching]
return comparison_function(city_name, toponym)
def _lookup_line_by_city_name(self, city_name):
filename = self._assess_subfile_from(city_name)
lines = self._get_lines(filename)
return self._match_line(city_name, lines)
def _assess_subfile_from(self, city_name):
c = ord(city_name.lower()[0])
if c < 97: # not a letter
raise ValueError('Error: city name must start with a letter')
elif c in range(97, 103): # from a to f
return self._filepath_regex % (97, 102)
elif c in range(103, 109): # from g to l
return self._filepath_regex % (103, 108)
elif c in range(109, 115): # from m to r
return self._filepath_regex % (109, 114)
elif c in range(115, 123): # from s to z
return self._filepath_regex % (115, 122)
else:
raise ValueError('Error: city name must start with a letter')
def _get_lines(self, filename):
with resource_stream(__name__, filename) as f:
lines = f.readlines()
if type(lines[0]) is bytes:
lines = map(lambda l: l.decode("utf-8"), lines)
return lines
def _get_all_lines(self):
all_lines = list()
for city_name in ['a', 'g', 'm', 's']: # all available city ID files
filename = self._assess_subfile_from(city_name)
all_lines.extend(self._get_lines(filename))
return all_lines
def _match_line(self, city_name, lines):
"""
The lookup is case insensitive and returns the first matching line,
stripped.
:param city_name: str
:param lines: list of str
:return: str
"""
for line in lines:
toponym = line.split(',')[0]
if toponym.lower() == city_name.lower():
return line.strip()
return None
def __repr__(self):
return "<%s.%s - filepath_regex=%s>" % (__name__, \
self.__class__.__name__, self._filepath_regex)
| |
#!/usr/bin/python2.5
#
# Copyright 2008 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Views for Groups.
"""
__authors__ = [
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
'"Lennard de Rijk" <ljvderijk@gmail.com>',
]
from django import forms
from django import http
from django.utils.translation import ugettext
from soc.logic import cleaning
from soc.logic import dicts
from soc.logic.models import user as user_logic
from soc.views.helper import decorators
from soc.views.helper import lists as list_helper
from soc.views.helper import redirects
from soc.views.helper import responses
from soc.views.helper import widgets
from soc.views.models import presence
from soc.views.models import document as document_view
from soc.views.models.request import view as request_view
from soc.views.sitemap import sidebar
import soc.views.helper
class View(presence.View):
"""View methods for the Group model.
"""
def __init__(self, params=None):
"""Defines the fields and methods required for the base View class
to provide the user with list, public, create, edit and delete views.
Params:
params: a dict with params for this View
"""
new_params = {}
new_params['extra_dynaexclude'] = ['founder', 'home', 'tos',
'member_template', 'status']
new_params['edit_extra_dynaproperties'] = {
'founded_by': forms.CharField(widget=widgets.ReadOnlyInput(),
required=False),
}
#set the extra_django_patterns and include the one from params
patterns = params.get('extra_django_patterns', [])
patterns += [
(r'^%(url_name)s/(?P<access_type>list_requests)/%(key_fields)s$',
'soc.views.models.%(module_name)s.list_requests',
'List of requests for %(name)s'),
(r'^%(url_name)s/(?P<access_type>list_roles)/%(key_fields)s$',
'soc.views.models.%(module_name)s.list_roles',
'List of roles for %(name)s')]
if params.get('group_applicant_url'):
# add the applicant pattern
patterns += [
(r'^%(url_name)s/(?P<access_type>applicant)/%(key_fields)s$',
'soc.views.models.%(module_name)s.applicant',
"%(name)s Creation via Accepted Application"),]
new_params['extra_django_patterns'] = patterns
# TODO(tlarsen): Add support for Django style template lookup
new_params['public_template'] = 'soc/group/public.html'
new_params['list_row'] = 'soc/group/list/row.html'
new_params['list_heading'] = 'soc/group/list/heading.html'
new_params['create_extra_dynaproperties'] = {
'clean_phone': cleaning.clean_phone_number('phone'),
'clean_contact_street': cleaning.clean_ascii_only('contact_street'),
'clean_contact_city': cleaning.clean_ascii_only('contact_city'),
'clean_contact_state': cleaning.clean_ascii_only('contact_state'),
'clean_contact_postalcode': cleaning.clean_ascii_only(
'contact_postalcode'),
'clean_shipping_street': cleaning.clean_ascii_only('shipping_street'),
'clean_shipping_city': cleaning.clean_ascii_only('shipping_city'),
'clean_shipping_state': cleaning.clean_ascii_only('shipping_state'),
'clean_shipping_postalcode': cleaning.clean_ascii_only(
'shipping_postalcode'),
}
new_params['role_views'] = {}
params = dicts.merge(params, new_params, sub_merge=True)
super(View, self).__init__(params=params)
def _editGet(self, request, entity, form):
"""See base.View._editGet().
"""
# fill in the founded_by with data from the entity
form.fields['founded_by'].initial = entity.founder.name
super(View, self)._editGet(request, entity, form)
def _editPost(self, request, entity, fields):
"""See base.View._editPost().
"""
if not entity:
# only if we are creating a new entity we should fill in founder
user = user_logic.logic.getForCurrentAccount()
fields['founder'] = user
super(View, self)._editPost(request, entity, fields)
@decorators.merge_params
@decorators.check_access
def applicant(self, request, access_type,
page_name=None, params=None, **kwargs):
"""Handles the creation of a group via an approved group application.
Args:
request: the standard Django HTTP request object
access_type : the name of the access type which should be checked
page_name: the page name displayed in templates as page and header title
params: a dict with params for this View
kwargs: the Key Fields for the specified entity
"""
# get the context for this webpage
context = responses.getUniversalContext(request)
responses.useJavaScript(context, params['js_uses_all'])
context['page_name'] = page_name
if request.method == 'POST':
return self.applicantPost(request, context, params, **kwargs)
else:
# request.method == 'GET'
return self.applicantGet(request, context, params, **kwargs)
def applicantGet(self, request, context, params, **kwargs):
"""Handles the GET request concerning the creation of a group via an
approved group application.
Args:
request: the standard Django HTTP request object
context: dictionary containing the context for this view
params: a dict with params for this View
kwargs: the Key Fields for the specified entity
"""
# find the application
application_logic = params['application_logic']
application = application_logic.logic.getFromKeyFields(kwargs)
# extract the application fields
field_names = application.properties().keys()
fields = dict( [(i, getattr(application, i)) for i in field_names] )
# create the form using the fields from the application as the initial value
form = params['applicant_create_form'](initial=fields)
# construct the appropriate response
return super(View, self)._constructResponse(request, entity=None,
context=context, form=form, params=params)
def applicantPost(self, request, context, params, **kwargs):
"""Handles the POST request concerning the creation of a group via an
approved group application.
Args:
request: the standard Django HTTP request object
context: dictionary containing the context for this view
params: a dict with params for this View
kwargs: the Key Fields for the specified entity
"""
# populate the form using the POST data
form = params['applicant_create_form'](request.POST)
if not form.is_valid():
# return the invalid form response
return self._constructResponse(request, entity=None, context=context,
form=form, params=params)
# collect the cleaned data from the valid form
key_name, fields = soc.views.helper.forms.collectCleanedFields(form)
# do post processing
self._applicantPost(request, context, fields)
if not key_name:
key_name = self._logic.getKeyNameFromFields(fields)
# create the group entity
self._logic.updateOrCreateFromKeyName(fields, key_name)
# redirect to notifications list to see the admin invite
return http.HttpResponseRedirect('/notification/list')
def _applicantPost(self, request, context, fields):
"""Performs any required processing on the entity to post its edit page.
Args:
request: the django request object
context: the context for the webpage
fields: the new field values
"""
# fill in the founder of the group
user = user_logic.logic.getForCurrentAccount()
fields['founder'] = user
# If scope_logic is not defined, this entity has no scope
if not self._params['scope_logic']:
return
# If this entity is unscoped, do not try to retrieve a scope
if 'scope_path' not in fields:
return
scope = self._params['scope_logic'].logic.getFromKeyName(
fields['scope_path'])
fields['scope'] = scope
@decorators.merge_params
@decorators.check_access
def listRequests(self, request, access_type,
page_name=None, params=None, **kwargs):
"""Gives an overview of all the requests for a specific group.
Args:
request: the standard Django HTTP request object
access_type : the name of the access type which should be checked
page_name: the page name displayed in templates as page and header title
params: a dict with params for this View
kwargs: the Key Fields for the specified entity
"""
# set the pagename to include the link_id
page_name = '%s %s' % (page_name, kwargs['link_id'])
# get the group from the request
group_logic = params['logic']
group_entity = group_logic.getFromKeyFields(kwargs)
role_names = params['role_views'].keys()
# list all incoming requests
filter = {
'scope': group_entity,
'role': role_names,
'status': 'new'
}
# create the list parameters
inc_req_params = request_view.getParams()
# define the list redirect action to the request processing page
inc_req_params['list_action'] = (redirects.getProcessRequestRedirect, None)
inc_req_params['list_description'] = ugettext(
"An overview of the %(name)s's incoming requests." % params)
inc_req_content = list_helper.getListContent(
request, inc_req_params, filter, idx=0)
# list all outstanding invites
filter = {
'scope': group_entity,
'role': role_names,
'status': 'group_accepted'
}
# create the list parameters
out_inv_params = request_view.getParams()
# define the list redirect action to the request processing page
out_inv_params['list_action'] = (redirects.getProcessRequestRedirect, None)
out_inv_params['list_description'] = ugettext(
"An overview of the %(name)s's outstanding invites." % params)
out_inv_content = list_helper.getListContent(
request, out_inv_params, filter, idx=1)
# list all ignored requests
filter = {
'scope': group_entity,
'role': role_names,
'status': 'ignored'
}
# create the list parameters
ignored_params = request_view.getParams()
# define the list redirect action to the request processing page
ignored_params['list_action'] = (redirects.getProcessRequestRedirect, None)
ignored_params['list_description'] = ugettext(
"An overview of the %(name)s's ignored requests." % params)
ignored_content = list_helper.getListContent(
request, ignored_params, filter, idx=2)
contents = [inc_req_content, out_inv_content, ignored_content]
return self._list(request, params, contents, page_name)
@decorators.merge_params
@decorators.check_access
def listRoles(self, request, access_type,
page_name=None, params=None, **kwargs):
"""Gives an overview of all the roles in a specific group.
Args:
request: the standard Django HTTP request object
access_type : the name of the access type which should be checked
page_name: the page name displayed in templates as page and header title
params: a dict with params for this View
kwargs: the Key Fields for the specified entity
"""
# set the pagename to include the link_id
page_name = '%s %s' % (page_name, kwargs['link_id'])
# get the group from the request
group_logic = params['logic']
group_entity = group_logic.getFromKeyFields(kwargs)
# create the filter
filter = {
'scope' : group_entity,
'status': 'active'
}
role_views = params['role_views']
contents = []
index = 0
# for each role we create a separate list
for role_name in role_views.keys():
# create the list parameters
list_params = role_views[role_name].getParams().copy()
list_params['list_action'] = (redirects.getManageRedirect, list_params)
list_params['list_description'] = ugettext(
"An overview of the %s for this %s." % (
list_params['name_plural'], params['name']))
new_list_content = list_helper.getListContent(
request, list_params, filter, idx=index)
contents += [new_list_content]
index += 1
# call the _list method from base.View to show the list
return self._list(request, params, contents, page_name)
def registerRole(self, role_name, role_view):
"""Adds a role to the role_views param.
Args:
role_name: The name of the role that needs to be added
role_view: The view that needs to be added to role_views.
"""
role_views = self._params['role_views']
role_views[role_name] = role_view
def getExtraMenus(self, id, user, params=None):
"""Returns the extra menu's for this view.
A menu item is generated for each group that the user has an active
role for. The public page for each group is added as menu item,
as well as all public documents for that group.
Args:
params: a dict with params for this View.
"""
params = dicts.merge(params, self._params)
# set fields to match every active role this user has
fields = {'user': user,
'status': 'active'}
# get the role views and start filling group_entities
role_views = self._params['role_views']
role_descriptions = {}
for role_name in role_views.keys():
role_view = role_views[role_name]
role_view_params = role_view.getParams()
role_logic = role_view_params['logic']
roles = role_logic.getForFields(fields)
for role in roles:
group_key_name = role.scope.key().id_or_name()
existing_role_descriptions = role_descriptions.get(group_key_name)
if existing_role_descriptions:
# add this description to existing roles
existing_roles = existing_role_descriptions['roles']
existing_roles[role_name] = role
else:
# create a description of this role
role_description = {'roles': {role_name: role},
'group': role.scope}
# add the new entry to our dictionary
role_descriptions[group_key_name] = role_description
# get the document view params to add the group's documents to the menu
doc_params = document_view.view.getParams()
menus = []
# for each role description in our collection
for role_description in role_descriptions.itervalues():
#start with an empty menu
menu = {}
# get the group for this role description
group_entity = role_description['group']
# set the menu header name
menu['heading'] = group_entity.short_name
# get the documents for this group entity
doc_items = document_view.view.getMenusForScope(group_entity, params)
doc_items = sidebar.getSidebarMenu(id, user, doc_items,
params=doc_params)
# get the group specific items
group_items = self._getExtraMenuItems(role_description, params)
group_items = sidebar.getSidebarMenu(id, user, group_items,
params=self._params)
# add the items together
menu['items'] = doc_items + group_items
menu['group'] = params['name_plural']
# append this as a new menu
menus.append(menu)
return menus
def _getExtraMenuItems(self, role_description, params=None):
"""Used to implement group instance specific needs for the side menu.
Args:
role_description : dict containing all the roles which is a dict of
name and the role entity to which it belongs. Also
group contains the group entity to which these roles
belong.
params: a dict with params for this View.
"""
return []
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ClustersOperations(object):
"""ClustersOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.eventhub.v2018_01_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_available_cluster_region(
self,
**kwargs # type: Any
):
# type: (...) -> "_models.AvailableClustersList"
"""List the quantity of available pre-provisioned Event Hubs Clusters, indexed by Azure region.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AvailableClustersList, or the result of cls(response)
:rtype: ~azure.mgmt.eventhub.v2018_01_01_preview.models.AvailableClustersList
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AvailableClustersList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-01-01-preview"
accept = "application/json"
# Construct URL
url = self.list_available_cluster_region.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('AvailableClustersList', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_available_cluster_region.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.EventHub/availableClusterRegions'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ClusterListResult"]
"""Lists the available Event Hubs Clusters within an ARM resource group.
:param resource_group_name: Name of the resource group within the azure subscription.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ClusterListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.eventhub.v2018_01_01_preview.models.ClusterListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-01-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ClusterListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/clusters'} # type: ignore
def get(
self,
resource_group_name, # type: str
cluster_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.Cluster"
"""Gets the resource description of the specified Event Hubs Cluster.
:param resource_group_name: Name of the resource group within the azure subscription.
:type resource_group_name: str
:param cluster_name: The name of the Event Hubs Cluster.
:type cluster_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Cluster, or the result of cls(response)
:rtype: ~azure.mgmt.eventhub.v2018_01_01_preview.models.Cluster
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Cluster"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-01-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str', max_length=50, min_length=6),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Cluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/clusters/{clusterName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
cluster_name, # type: str
parameters, # type: "_models.Cluster"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.Cluster"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.Cluster"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-01-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str', max_length=50, min_length=6),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'Cluster')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Cluster', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Cluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/clusters/{clusterName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
cluster_name, # type: str
parameters, # type: "_models.Cluster"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.Cluster"]
"""Creates or updates an instance of an Event Hubs Cluster.
:param resource_group_name: Name of the resource group within the azure subscription.
:type resource_group_name: str
:param cluster_name: The name of the Event Hubs Cluster.
:type cluster_name: str
:param parameters: Parameters for creating a eventhub cluster resource.
:type parameters: ~azure.mgmt.eventhub.v2018_01_01_preview.models.Cluster
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either Cluster or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.eventhub.v2018_01_01_preview.models.Cluster]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Cluster"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Cluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str', max_length=50, min_length=6),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/clusters/{clusterName}'} # type: ignore
def _update_initial(
self,
resource_group_name, # type: str
cluster_name, # type: str
parameters, # type: "_models.Cluster"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.Cluster"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.Cluster"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-01-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str', max_length=50, min_length=6),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'Cluster')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Cluster', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Cluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/clusters/{clusterName}'} # type: ignore
def begin_update(
self,
resource_group_name, # type: str
cluster_name, # type: str
parameters, # type: "_models.Cluster"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.Cluster"]
"""Modifies mutable properties on the Event Hubs Cluster. This operation is idempotent.
:param resource_group_name: Name of the resource group within the azure subscription.
:type resource_group_name: str
:param cluster_name: The name of the Event Hubs Cluster.
:type cluster_name: str
:param parameters: The properties of the Event Hubs Cluster which should be updated.
:type parameters: ~azure.mgmt.eventhub.v2018_01_01_preview.models.Cluster
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either Cluster or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.eventhub.v2018_01_01_preview.models.Cluster]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Cluster"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Cluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str', max_length=50, min_length=6),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/clusters/{clusterName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
cluster_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-01-01-preview"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str', max_length=50, min_length=6),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/clusters/{clusterName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
cluster_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes an existing Event Hubs Cluster. This operation is idempotent.
:param resource_group_name: Name of the resource group within the azure subscription.
:type resource_group_name: str
:param cluster_name: The name of the Event Hubs Cluster.
:type cluster_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str', max_length=50, min_length=6),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/clusters/{clusterName}'} # type: ignore
def list_namespaces(
self,
resource_group_name, # type: str
cluster_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.EHNamespaceIdListResult"
"""List all Event Hubs Namespace IDs in an Event Hubs Dedicated Cluster.
:param resource_group_name: Name of the resource group within the azure subscription.
:type resource_group_name: str
:param cluster_name: The name of the Event Hubs Cluster.
:type cluster_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: EHNamespaceIdListResult, or the result of cls(response)
:rtype: ~azure.mgmt.eventhub.v2018_01_01_preview.models.EHNamespaceIdListResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.EHNamespaceIdListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-01-01-preview"
accept = "application/json"
# Construct URL
url = self.list_namespaces.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str', max_length=50, min_length=6),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('EHNamespaceIdListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_namespaces.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/clusters/{clusterName}/namespaces'} # type: ignore
| |
"""Generic lvsm CommandPrompt"""
import cmd
import logging
import shutil
import socket
import subprocess
import sys
import tempfile
import utils
import termcolor
import firewall
import lvs
logger = logging.getLogger('lvsm')
class CommandPrompt(cmd.Cmd):
"""
Generic Class for all command prompts used in lvsm. All prompts should
inherit from CommandPrompt and not from cmd.Cmd directly.
"""
settings = {'numeric': False,
'color': True,
'commands': False}
variables = ['numeric', 'color', 'commands']
doc_header = "Commands (type help <topic>):"
def __init__(self, config, rawprompt='', stdin=sys.stdin, stdout=sys.stdout):
# super(CommandPrompt, self).__init__()
cmd.Cmd.__init__(self)
self.config = config
# Build args dict to pass to director object
args = {'keepalived-mib': self.config['keepalived-mib'],
'snmp_community': self.config['snmp_community'],
'snmp_host': self.config['snmp_host'],
'snmp_user': self.config['snmp_user'],
'snmp_password': self.config['snmp_password'],
'cache_dir': self.config['cache_dir']
}
self.director = lvs.Director(self.config['director'],
self.config['ipvsadm'],
self.config['director_config'],
self.config['director_cmd'],
self.config['nodes'],
args)
self.rawprompt = rawprompt
# disable color if the terminal doesn't support it
if not sys.stdout.isatty():
self.settings['color'] = False
if self.settings['color']:
c = "red"
a = ["bold"]
else:
c = None
a = None
self.prompt = termcolor.colored(self.rawprompt, color=c,
attrs=a)
if logger.getEffectiveLevel() < 30:
self.settings['commands'] = True
def emptyline(self):
"""Override the default emptyline and return a blank line."""
pass
def postcmd(self, stop, line):
"""Hook method executed just after a command dispatch is finished."""
# check to see if the prompt should be colorized
if self.settings['color']:
self.prompt = termcolor.colored(self.rawprompt,
color="red",
attrs=["bold"])
else:
self.prompt = self.rawprompt
return stop
def print_topics(self, header, cmds, cmdlen, maxcol):
if cmds:
self.stdout.write("%s\n"%str(header))
if self.ruler:
self.stdout.write("%s\n"%str(self.ruler * len(header)))
for cmd in cmds:
self.stdout.write(" %s\n" % cmd)
self.stdout.write("\n")
def do_exit(self, line):
"""Exit from lvsm shell."""
modified = list()
if self.config['version_control'] in ['git', 'svn']:
import sourcecontrol
args = { 'git_remote': self.config['git_remote'],
'git_branch': self.config['git_branch'] }
scm = sourcecontrol.SourceControl(self.config['version_control'], args)
# check to see if the files have changed
if (self.config['director_config'] and
scm.modified(self.config['director_config'])):
modified.append(self.config['director_config'])
if (self.config['firewall_config'] and
scm.modified(self.config['firewall_config'])):
modified.append(self.config['firewall_config'])
# If any files are modified ask user if they want to quit
if modified:
print "The following config file(s) were not committed:"
for filename in modified:
print filename
print
while True:
answer = raw_input("Do you want to quit? (y/n) ")
if answer.lower() == "y":
print "goodbye."
sys.exit(0)
elif answer.lower() == "n":
break
if not modified:
print "goodbye."
sys.exit(0)
def do_quit(self, line):
"""Exit from lvsm shell."""
self.do_exit(line)
def do_end(self, line):
"""Return to previous context."""
return True
def do_set(self, line):
"""Set or display different variables."""
if not line:
print
print "Shell Settings"
print "=============="
for key, value in self.settings.items():
print str(key) + " : " + str(value)
print
else:
tokens = line.split()
if len(tokens) == 2:
if tokens[0] == "numeric":
if tokens[1] == "on":
self.settings['numeric'] = True
elif tokens[1] == "off":
self.settings['numeric'] = False
else:
print "*** Syntax: set numeric on|off"
elif tokens[0] == "color":
if tokens[1] == "on":
self.settings['color'] = True
self.prompt = termcolor.colored(self.rawprompt,
color="red",
attrs=["bold"])
elif tokens[1] == "off":
self.settings['color'] = False
self.prompt = self.rawprompt
else:
print "*** Syntax: set color on|off"
elif tokens[0] == "commands":
if tokens[1] == "on":
self.settings['commands'] = True
# logging.INFO = 20
if logger.getEffectiveLevel() > 20:
logger.setLevel(logging.INFO)
elif tokens[1] == "off":
# logging.INFO = 20
# logging.DEBUG = 10
if logger.getEffectiveLevel() >= 20:
logger.setLevel(logging.WARNING)
self.settings['commands'] = False
else:
logger.error("Running in DEBUG mode, cannot disable commands display.")
else:
print "*** Syntax: set numeric on|off"
else:
self.help_set()
else:
self.help_set()
def help_help(self):
print
print "show help"
def help_set(self):
print "Set or display different variables."
print ""
print "syntax: set [<variable> <value>]"
print ""
print "<variable> can be one of:"
print "\tcolor on|off Toggle color display ON/OFF"
print "\tcommands on|off Toggle running commands display ON/OFF"
print "\tnumeric on|off Toggle numeric ipvsadm display ON/OFF"
print ""
def complete_set(self, text, line, begidx, endidx):
"""Tab completion for the set command."""
if len(line) < 12:
if not text:
completions = self.variables[:]
else:
completions = [m for m in self.variables if m.startswith(text)]
else:
completions = []
return completions
class LivePrompt(CommandPrompt):
"""
Class for the live command prompt. This is the main landing point
and is called from __main__.py
"""
def __init__(self, config, rawprompt='', stdin=sys.stdin, stdout=sys.stdout):
# super(CommandPrompt, self).__init__()
CommandPrompt.__init__(self, config, rawprompt="lvsm(live)# ")
self.modules = ['director', 'firewall', 'nat', 'virtual', 'real']
self.protocols = ['tcp', 'udp', 'fwm']
self.firewall = firewall.Firewall(self.config['iptables'])
def do_configure(self, line):
"""Enter configuration level."""
commands = line.split()
# configshell = prompts.configure.ConfigurePrompt(self.config)
configshell = ConfigurePrompt(self.config)
if not line:
configshell.cmdloop()
else:
configshell.onecmd(' '.join(commands[0:]))
def do_virtual(self, line):
"""
\rVirtual IP level.
\rLevel providing information on virtual IPs
"""
commands = line.split()
# Check for the director before instantiating the right class
if self.config['director'] == 'ldirectord':
from lvsm.modules import ldirectordprompts
virtualshell = ldirectordprompts.VirtualPrompt(self.config)
elif self.config['director'] == 'keepalived':
from lvsm.modules import keepalivedprompts
virtualshell = keepalivedprompts.VirtualPrompt(self.config)
else:
virtualshell = VirtualPrompt(self.config)
if not line:
virtualshell.cmdloop()
else:
virtualshell.onecmd(' '.join(commands[0:]))
def do_real(self, line):
"""
\rReal server level.
\rProvides information on real servers.
"""
commands = line.split()
# Check for the director before instantiating the right class
if self.config['director'] == 'ldirectord':
from lvsm.modules import ldirectordprompts
realshell = ldirectordprompts.RealPrompt(self.config)
elif self.config['director'] == 'keepalived':
from lvsm.modules import keepalivedprompts
realshell = keepalivedprompts.RealPrompt(self.config)
else:
realshell = RealPrompt(self.config)
if not line:
realshell.cmdloop()
else:
realshell.onecmd(' '.join(commands[0:]))
def do_firewall(self, line):
"""
\rFirewall level.
\riptables information is available at this level.
"""
commands = line.split()
fwshell = FirewallPrompt(self.config)
if not line:
fwshell.cmdloop()
else:
fwshell.onecmd(' '.join(commands[0:]))
def do_restart(self, line):
"""Restart the director or firewall module."""
if line == "director":
if self.config['director_cmd']:
print "restaring director"
try:
subprocess.call(self.config['director_cmd'], shell=True)
except OSError as e:
logger.error("problem while restaring director - %s" % e.strerror)
else:
logger.error("'director_cmd' not defined in lvsm configuration!")
elif line == "firewall":
if self.config['firewall_cmd']:
print "restarting firewall"
try:
subprocess.call(self.config['firewall_cmd'], shell=True)
except OSError as e:
logger.error("problem restaring firewall - %s" % e.strerror)
else:
logger.error("'firewall_cmd' not defined in lvsm configuration!")
else:
print "syntax: restart firewall|director"
def do_version(self, line):
"""
\rDisplay version information about modules
"""
args = [self.config['ipvsadm'], '--version']
ipvsadm = utils.check_output(args)
header = ["", "Linux Virtual Server",
"===================="]
print '\n'.join(header)
print ipvsadm
print
header = ["Director",
"========"]
print '\n'.join(header)
if not self.config['director_bin'] :
director = 'director binary not defined. Unable to get version!'
else:
args = [self.config['director_bin'], '--version']
director = utils.check_output(args)
print director
print
args = [self.config['iptables'], '--version']
iptables = utils.check_output(args)
header = ["Packet Filtering",
"================"]
print '\n'.join(header)
print iptables
print
def help_configure(self):
print ""
print "The configuration level."
print "Items related to configuration of IPVS and iptables are available here."
print ""
def help_restart(self):
print "Restart the given module."
print ""
print "Module must be one of director or firewall."
print ""
print "syntax: restart director|firewall"
def complete_restart(self, text, line, begix, endidx):
"""Tab completion for restart command."""
if len(line) < 17:
if not text:
completions = self.modules[:]
else:
completions = [m for m in self.modules if m.startswith(text)]
else:
completions = []
return completions
class ConfigurePrompt(CommandPrompt):
"""
Configure prompt class. Handles commands for manipulating configuration
items in the various plugins.
"""
def __init__(self, config, rawprompt='', stdin=sys.stdin, stdout=sys.stdout):
CommandPrompt.__init__(self, config, rawprompt="lvsm(configure)# ")
# List of moduels used in autocomplete function
self.modules = ['director', 'firewall']
def svn_sync(self, filename, username, password):
"""Commit changed configs to svn and do update on remote node."""
# commit config locally
args = ['svn',
'commit',
'--username',
username,
'--password',
password,
filename]
svn_cmd = ('svn commit --username ' + username +
' --password ' + password + ' ' + filename)
logger.info('Running command : %s' % svn_cmd)
try:
result = subprocess.call(svn_cmd, shell=True)
except OSError as e:
logger.error("Problem with configuration sync - %s" % e.strerror)
# update config on all nodes
n = self.config['nodes']
if n != '':
nodes = n.replace(' ', '').split(',')
else:
nodes = None
try:
hostname = utils.check_output(['hostname', '-s'])
except (OSError, subprocess.CalledProcessError):
hostname = ''
if nodes is not None:
svn_cmd = ('svn update --username ' + username +
' --password ' + password + ' ' + filename)
for node in nodes:
if node != hostname:
args = 'ssh ' + node + ' ' + svn_cmd
logger.info('Running command : %s' % (' '.join(args)))
try:
subprocess.call(args, shell=True)
except OSError as e:
logger.error("Problem with configuration sync - %s" % e.strerror)
def complete_show(self, text, line, begidx, endidx):
"""Tab completion for the show command."""
if len(line) < 14:
if not text:
completions = self.modules[:]
else:
completions = [m for m in self.modules if m.startswith(text)]
else:
completions = []
return completions
def help_show(self):
""
print "Show configuration for an item. The configuration files are defined in lvsm.conf"
print ""
print "<module> can be one of the following"
print "\tdirector the IPVS director config file"
print "\tfirewall the iptables firewall config file"
print ""
def do_show(self, line):
"""Show director or firewall configuration."""
if line == "director" or line == "firewall":
configkey = line + "_config"
if not self.config[configkey]:
logger.error("'%s' not defined in configuration file!" % configkey)
else:
lines = utils.print_file(self.config[configkey])
utils.pager(self.config['pager'], lines)
else:
print "\nsyntax: show <module>\n"
def complete_edit(self, text, line, begidx, endidx):
"""Tab completion for the show command"""
if len(line) < 14:
if not text:
completions = self.modules[:]
else:
completions = [m for m in self.modules if m.startswith(text)]
else:
completions = []
return completions
def help_edit(self):
print ""
print "Edit the configuration of an item. The configuration files are defined in lvsm.conf"
print "syntax: edit <module>"
print ""
print "<module> can be one of the follwoing"
print "\tdirector the IPVS director config file"
print "\tfirewall the iptables firewall config file"
print ""
def do_edit(self, line):
"""Edit the configuration of an item."""
if line == "director":
key = line + "_config"
configfile = self.config[key]
if not configfile:
logger.error("'%s' not defined in config file!" % key)
else:
# make a temp copy of the config
try:
temp = tempfile.NamedTemporaryFile(prefix='keepalived.conf.')
shutil.copyfile(configfile, temp.name)
except IOError as e:
logger.error(e.strerror)
return
while True:
args = "vi " + temp.name
logger.info('Running command : %s' % args)
result = subprocess.call(args, shell=True)
if result != 0:
logger.error("Something happened during the edit of %s" % self.config[key])
try:
template = self.config['template_lang']
except KeyError:
template = None
# If parsing is disabled, skip the large if/else block
if self.config['parse_director_config'].lower() == 'no':
logger.warn('Director parsing disabled.')
logger.warn('To enable it, please activate the \'parse_director_config\' option in lvsm.conf')
shutil.copy(temp.name, configfile)
temp.close()
break
# Parse the config file and verify the changes
# If successful, copy changes back to original file
# If a template language is defined, run it against the config
# before parsing the configuration.
if template:
try:
output = tempfile.NamedTemporaryFile()
logger.info('Running command: %s ' % ' '.join(args))
args = [template, temp.name]
p = subprocess.Popen(args, stdout=output, stderr=subprocess.PIPE)
out, err = p.communicate()
ret = p.wait()
except OSError as e:
logger.error(e)
logger.error("Please fix the above error before editting the config file.")
break
except IOError as e:
logger.error(e)
break
if ret:
logger.error(err)
break
elif self.director.parse_config(output.name):
shutil.copyfile(temp.name, configfile)
temp.close()
break
else:
answer = raw_input("Found a syntax error in your config file, edit again? (y/n) ")
if answer.lower() == 'y':
pass
elif answer.lower() == 'n':
logger.warn("Changes were not saved due to syntax errors.")
break
elif self.director.parse_config(temp.name):
shutil.copyfile(temp.name, configfile)
temp.close()
break
else:
answer = raw_input("Found a syntax error in your config file, edit again? (y/n) ")
if answer.lower() == 'y':
pass
elif answer.lower() == 'n':
logger.warn("Changes were not saved due to syntax errors.")
break
elif line == "firewall":
key = line + "_config"
configfile = self.config[key]
if not configfile:
logger.error("'%s' not defined in config file!" % key)
else:
args = "vi " + configfile
logger.info(str(args))
result = subprocess.call(args, shell=True)
if result != 0:
logger.error("Something happened during the edit of %s" % self.config[key])
else:
print "syntax: edit <module>"
def help_sync(self):
print "Sync all configuration files across the cluster."
print ""
print "syntax: sync"
def do_sync(self, line):
"""Sync all configuration files across the cluster."""
if line:
print "*** Syntax: sync"
else:
if self.config['version_control'] in ['git', 'svn']:
import sourcecontrol
args = { 'git_remote': self.config['git_remote'],
'git_branch': self.config['git_branch'] }
scm = sourcecontrol.SourceControl(self.config['version_control'], args)
# Create a list of nodes to run the update command on
if self.config['nodes'] != '':
nodes = self.config['nodes'].replace(' ', '').split(',')
else:
nodes = None
hostname = socket.gethostname()
# simple variable, to show users that no mods were made
modified = False
# check to see if the files have changed
if (self.config['director_config'] and
scm.modified(self.config['director_config'])):
scm.commit(self.config['director_config'])
modified = True
for node in nodes:
if node != hostname:
scm.update(self.config['director_config'], node)
if (self.config['firewall_config'] and
scm.modified(self.config['firewall_config'])):
scm.commit(self.config['firewall_config'])
modified = True
for node in nodes:
if node != hostname:
scm.update(self.config['director_config'], node)
if not modified:
print "Configurations not modified. No sync necessary."
else:
logger.error("'version_control' not defined correctly in lvsm.conf")
class VirtualPrompt(CommandPrompt):
def __init__(self, config, rawprompt='', stdin=sys.stdin, stdout=sys.stdout):
# Change the word delimiters so that - or . don't cause a new match
try:
import readline
readline.set_completer_delims(' ')
except ImportError:
pass
# super(CommandPrompt, self).__init__()
CommandPrompt.__init__(self, config, rawprompt="lvsm(live)(virtual)# ")
self.modules = ['director', 'firewall', 'nat', 'virtual', 'real']
self.protocols = ['tcp', 'udp', 'fwm']
self.firewall = firewall.Firewall(self.config['iptables'])
def do_status(self,line):
"""
\rDisplay status of all virtual servers
"""
syntax = "*** Syntax: status"
numeric = self.settings['numeric']
color = self.settings['color']
if not line:
d = self.director.show(numeric, color)
d.append('')
utils.pager(self.config['pager'], d)
else:
print syntax
def do_show(self, line):
"""
\rShow status of a virtual server
\rSyntax: show tcp|udp|fwm <vip> <port>
"""
syntax = "*** Syntax: show tcp|udp|fwm <vip> <port>"
commands = line.split()
numeric = self.settings['numeric']
color = self.settings['color']
if len(commands) == 3 or len(commands) == 2:
protocol = commands[0]
vip = commands[1]
if len(commands) == 3:
port = commands[2]
else:
port = None
if protocol in self.protocols:
d = self.director.show_virtual(vip, port, protocol, numeric, color)
f = self.firewall.show_virtual(vip, port, protocol, numeric, color)
utils.pager(self.config['pager'], d + f)
else:
print syntax
else:
print syntax
def complete_show(self, text, line, begidx, endidx):
"""Tab completion for the show command"""
if len(line) < 8:
completions = [p for p in self.protocols if p.startswith(text)]
elif len(line.split()) == 2:
prot = line.split()[1]
virtuals = self.director.get_virtual(prot)
if not text:
completions = virtuals[:]
elif len(line.split()) == 3 and text:
prot = line.split()[1]
virtuals = self.director.get_virtual(prot)
completions = [p for p in virtuals if p.startswith(text)]
return completions
class RealPrompt(CommandPrompt):
def __init__(self, config, rawprompt='', stdin=sys.stdin, stdout=sys.stdout):
# Change the word delimiters so that - or . don't cause a new match
try:
import readline
readline.set_completer_delims(' ')
except ImportError:
pass
# super(CommandPrompt, self).__init__()
CommandPrompt.__init__(self, config, rawprompt="lvsm(live)(real)# ")
self.modules = ['director', 'firewall', 'nat', 'virtual', 'real']
self.protocols = ['tcp', 'udp', 'fwm']
self.firewall = firewall.Firewall(self.config['iptables'])
def do_show(self, line):
"""
\rShow information about a specific real server.
\rsyntax: show <server> [<port>]
"""
syntax = "*** Syntax: show <server> [<port>]"
commands = line.split()
numeric = self.settings['numeric']
color = self.settings['color']
if len(commands) == 2:
host = commands[0]
port = commands[1]
utils.pager(self.config['pager'], self.director.show_real(host, port, numeric, color))
elif len(commands) == 1:
host = commands[0]
port = None
utils.pager(self.config['pager'], self.director.show_real(host, port, numeric, color))
else:
print syntax
def complete_show(self, text, line, begidx, endidx):
"""Tab completion for show command"""
tokens = line.split()
reals = self.director.get_real(protocol='')
if len(tokens) == 1 and not text:
completions = reals[:]
elif len(tokens) == 2 and text:
completions = [r for r in reals if r.startswith(text)]
else:
completions = list()
return completions
# def do_disable(self, line):
# """
# \rDisable real server across VIPs.
# \rsyntax: disable <rip> <port>
# """
# syntax = "*** Syntax: disable <rip> <port>"
# commands = line.split()
# if len(commands) > 2 or len(commands) == 0:
# print syntax
# elif len(commands) <= 2:
# host = commands[0]
# if len(commands) == 1:
# port = ''
# elif len(commands) == 2:
# port = commands[1]
# else:
# print syntax
# return
# # ask for an optional reason for disabling
# reason = raw_input("Reason for disabling [default = None]: ")
# if not self.director.disable(host, port, reason=reason):
# logger.error("Could not disable %s" % host)
# else:
# print syntax
# def do_enable(self, line):
# """
# \rEnable real server across VIPs.
# \rsyntax: enable <rip> <port>
# """
# syntax = "*** Syntax: enable <rip> <port>"
# commands = line.split()
# if len(commands) > 2 or len(commands) == 0:
# print syntax
# elif len(commands) <= 2:
# host = commands[0]
# if len(commands) == 1:
# port = ''
# elif len(commands) == 2:
# port = commands[1]
# else:
# print syntax
# return
# if not self.director.enable(host, port):
# logger.error("Could not enable %s" % host)
# else:
# print syntax
# def complete_disable(self, text, line, begidx, endidx):
# """Tab completion for disable command."""
# servers = ['real', 'virtual']
# if (line.startswith("disable real") or
# line.startswith("disable virtual")):
# completions = []
# elif not text:
# completions = servers[:]
# else:
# completions = [s for s in servers if s.startswith(text)]
# return completions
# def complete_enable(self, text, line, begidx, endidx):
# """Tab completion for enable command."""
# if (line.startswith("enable real") or
# line.startswith("enable virtual")):
# completions = []
# elif not text:
# completions = servers[:]
# else:
# completions = [s for s in servers if s.startswith(text)]
# return completions
class FirewallPrompt(CommandPrompt):
"""Class handling shell prompt for firewall (iptables) related actions"""
def __init__(self, config, rawprompt='', stdin=sys.stdin, stdout=sys.stdout):
# super(CommandPrompt, self).__init__()
CommandPrompt.__init__(self, config, rawprompt="lvsm(live)(firewall)# ")
self.firewall = firewall.Firewall(self.config['iptables'])
def do_status(self, line):
"""
\rDisplay status of all packet filtering rules
"""
mangle = self.firewall.show_mangle(self.settings['numeric'], self.settings['color'])
ports = self.firewall.show(self.settings['numeric'], self.settings['color'])
nat = self.firewall.show_nat(self.settings['numeric'])
utils.pager(self.config['pager'], mangle + ports + nat + [''])
def do_show(self, line):
"""
\rShow the running status specific packet filter tables.
\rSyntax: show <table>
\r<table> can be one of the following
nat the NAT table.
fwm|mangle the mangle table.
filters the input filters table.
"""
if line == "nat":
output = self.firewall.show_nat(self.settings['numeric'])
elif line == "filters":
output = self.firewall.show(self.settings['numeric'], self.settings['color'])
elif line == "mangle" or line == "fwm":
output = self.firewall.show_mangle(self.settings['numeric'], self.settings['color'])
else:
print "*** Syntax: show nat|fwm|mangle|filters"
return
utils.pager(self.config['pager'], output + [''])
def complete_show(self, text, line, begidx, endidx):
"""Command completion for the show command"""
args = ['nat', 'filters']
if not text:
completions = args[:]
else:
completions = [s for s in args if s.startswith(text)]
return completions
| |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Proxy AMI-related calls from cloud controller to objectstore service."""
import base64
import binascii
import os
import shutil
import tarfile
import tempfile
import boto.s3.connection
import eventlet
from lxml import etree
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from nova.api.ec2 import ec2utils
import nova.cert.rpcapi
from nova.compute import arch
from nova import exception
from nova.i18n import _, _LE, _LI
from nova.image import glance
from nova import utils
LOG = logging.getLogger(__name__)
s3_opts = [
cfg.StrOpt('image_decryption_dir',
default='/tmp',
help='Parent directory for tempdir used for image decryption'),
cfg.StrOpt('s3_host',
default='$my_ip',
help='Hostname or IP for OpenStack to use when accessing '
'the S3 api'),
cfg.IntOpt('s3_port',
default=3333,
min=1,
max=65535,
help='Port used when accessing the S3 api'),
cfg.StrOpt('s3_access_key',
default='notchecked',
help='Access key to use for S3 server for images'),
cfg.StrOpt('s3_secret_key',
default='notchecked',
help='Secret key to use for S3 server for images'),
cfg.BoolOpt('s3_use_ssl',
default=False,
help='Whether to use SSL when talking to S3'),
cfg.BoolOpt('s3_affix_tenant',
default=False,
help='Whether to affix the tenant id to the access key '
'when downloading from S3'),
]
CONF = cfg.CONF
CONF.register_opts(s3_opts)
CONF.import_opt('my_ip', 'nova.netconf')
class S3ImageService(object):
"""Wraps an existing image service to support s3 based register."""
# translate our internal state to states valid by the EC2 API documentation
image_state_map = {'downloading': 'pending',
'failed_download': 'failed',
'decrypting': 'pending',
'failed_decrypt': 'failed',
'untarring': 'pending',
'failed_untar': 'failed',
'uploading': 'pending',
'failed_upload': 'failed',
'available': 'available'}
def __init__(self, service=None, *args, **kwargs):
self.cert_rpcapi = nova.cert.rpcapi.CertAPI()
self.service = service or glance.get_default_image_service()
self.service.__init__(*args, **kwargs)
def _translate_uuids_to_ids(self, context, images):
return [self._translate_uuid_to_id(context, img) for img in images]
def _translate_uuid_to_id(self, context, image):
image_copy = image.copy()
try:
image_uuid = image_copy['id']
except KeyError:
pass
else:
image_copy['id'] = ec2utils.glance_id_to_id(context, image_uuid)
for prop in ['kernel_id', 'ramdisk_id']:
try:
image_uuid = image_copy['properties'][prop]
except (KeyError, ValueError):
pass
else:
image_id = ec2utils.glance_id_to_id(context, image_uuid)
image_copy['properties'][prop] = image_id
try:
image_copy['properties']['image_state'] = self.image_state_map[
image['properties']['image_state']]
except (KeyError, ValueError):
pass
return image_copy
def _translate_id_to_uuid(self, context, image):
image_copy = image.copy()
try:
image_id = image_copy['id']
except KeyError:
pass
else:
image_copy['id'] = ec2utils.id_to_glance_id(context, image_id)
for prop in ['kernel_id', 'ramdisk_id']:
try:
image_id = image_copy['properties'][prop]
except (KeyError, ValueError):
pass
else:
image_uuid = ec2utils.id_to_glance_id(context, image_id)
image_copy['properties'][prop] = image_uuid
return image_copy
def create(self, context, metadata, data=None):
"""Create an image.
metadata['properties'] should contain image_location.
"""
image = self._s3_create(context, metadata)
return image
def delete(self, context, image_id):
image_uuid = ec2utils.id_to_glance_id(context, image_id)
self.service.delete(context, image_uuid)
def update(self, context, image_id, metadata, data=None):
image_uuid = ec2utils.id_to_glance_id(context, image_id)
metadata = self._translate_id_to_uuid(context, metadata)
image = self.service.update(context, image_uuid, metadata, data)
return self._translate_uuid_to_id(context, image)
def detail(self, context, **kwargs):
# NOTE(bcwaldon): sort asc to make sure we assign lower ids
# to older images
kwargs.setdefault('sort_dir', 'asc')
images = self.service.detail(context, **kwargs)
return self._translate_uuids_to_ids(context, images)
def show(self, context, image_id):
image_uuid = ec2utils.id_to_glance_id(context, image_id)
image = self.service.show(context, image_uuid)
return self._translate_uuid_to_id(context, image)
@staticmethod
def _conn(context):
# NOTE(vish): access and secret keys for s3 server are not
# checked in nova-objectstore
access = CONF.s3_access_key
if CONF.s3_affix_tenant:
access = '%s:%s' % (access, context.project_id)
secret = CONF.s3_secret_key
calling = boto.s3.connection.OrdinaryCallingFormat()
return boto.s3.connection.S3Connection(aws_access_key_id=access,
aws_secret_access_key=secret,
is_secure=CONF.s3_use_ssl,
calling_format=calling,
port=CONF.s3_port,
host=CONF.s3_host)
@staticmethod
def _download_file(bucket, filename, local_dir):
key = bucket.get_key(filename)
local_filename = os.path.join(local_dir, os.path.basename(filename))
key.get_contents_to_filename(local_filename)
return local_filename
def _s3_parse_manifest(self, context, metadata, manifest):
manifest = etree.fromstring(manifest)
image_format = 'ami'
try:
kernel_id = manifest.find('machine_configuration/kernel_id').text
if kernel_id == 'true':
image_format = 'aki'
kernel_id = None
except Exception:
kernel_id = None
try:
ramdisk_id = manifest.find('machine_configuration/ramdisk_id').text
if ramdisk_id == 'true':
image_format = 'ari'
ramdisk_id = None
except Exception:
ramdisk_id = None
try:
guestarch = manifest.find(
'machine_configuration/architecture').text
except Exception:
guestarch = arch.X86_64
if not arch.is_valid(guestarch):
raise exception.InvalidArchitectureName(arch=guestarch)
# NOTE(yamahata):
# EC2 ec2-budlne-image --block-device-mapping accepts
# <virtual name>=<device name> where
# virtual name = {ami, root, swap, ephemeral<N>}
# where N is no negative integer
# device name = the device name seen by guest kernel.
# They are converted into
# block_device_mapping/mapping/{virtual, device}
#
# Do NOT confuse this with ec2-register's block device mapping
# argument.
mappings = []
try:
block_device_mapping = manifest.findall('machine_configuration/'
'block_device_mapping/'
'mapping')
for bdm in block_device_mapping:
mappings.append({'virtual': bdm.find('virtual').text,
'device': bdm.find('device').text})
except Exception:
mappings = []
properties = metadata['properties']
properties['architecture'] = guestarch
def _translate_dependent_image_id(image_key, image_id):
image_uuid = ec2utils.ec2_id_to_glance_id(context, image_id)
properties[image_key] = image_uuid
if kernel_id:
_translate_dependent_image_id('kernel_id', kernel_id)
if ramdisk_id:
_translate_dependent_image_id('ramdisk_id', ramdisk_id)
if mappings:
properties['mappings'] = mappings
metadata.update({'disk_format': image_format,
'container_format': image_format,
'status': 'queued',
'is_public': False,
'properties': properties})
metadata['properties']['image_state'] = 'pending'
# TODO(bcwaldon): right now, this removes user-defined ids.
# We need to re-enable this.
metadata.pop('id', None)
image = self.service.create(context, metadata)
# extract the new uuid and generate an int id to present back to user
image_uuid = image['id']
image['id'] = ec2utils.glance_id_to_id(context, image_uuid)
# return image_uuid so the caller can still make use of image_service
return manifest, image, image_uuid
def _s3_create(self, context, metadata):
"""Gets a manifest from s3 and makes an image."""
image_path = tempfile.mkdtemp(dir=CONF.image_decryption_dir)
image_location = metadata['properties']['image_location'].lstrip('/')
bucket_name = image_location.split('/')[0]
manifest_path = image_location[len(bucket_name) + 1:]
bucket = self._conn(context).get_bucket(bucket_name)
key = bucket.get_key(manifest_path)
manifest = key.get_contents_as_string()
manifest, image, image_uuid = self._s3_parse_manifest(context,
metadata,
manifest)
def delayed_create():
"""This handles the fetching and decrypting of the part files."""
log_vars = {'image_location': image_location,
'image_path': image_path}
def _update_image_state(context, image_uuid, image_state):
metadata = {'properties': {'image_state': image_state}}
self.service.update(context, image_uuid, metadata,
purge_props=False)
def _update_image_data(context, image_uuid, image_data):
metadata = {}
self.service.update(context, image_uuid, metadata, image_data,
purge_props=False)
try:
_update_image_state(context, image_uuid, 'downloading')
try:
parts = []
elements = manifest.find('image').getiterator('filename')
for fn_element in elements:
part = self._download_file(bucket,
fn_element.text,
image_path)
parts.append(part)
# NOTE(vish): this may be suboptimal, should we use cat?
enc_filename = os.path.join(image_path, 'image.encrypted')
with open(enc_filename, 'w') as combined:
for filename in parts:
with open(filename) as part:
shutil.copyfileobj(part, combined)
except Exception:
LOG.exception(_LE("Failed to download %(image_location)s "
"to %(image_path)s"), log_vars)
_update_image_state(context, image_uuid, 'failed_download')
return
_update_image_state(context, image_uuid, 'decrypting')
try:
hex_key = manifest.find('image/ec2_encrypted_key').text
encrypted_key = binascii.a2b_hex(hex_key)
hex_iv = manifest.find('image/ec2_encrypted_iv').text
encrypted_iv = binascii.a2b_hex(hex_iv)
dec_filename = os.path.join(image_path, 'image.tar.gz')
self._decrypt_image(context, enc_filename, encrypted_key,
encrypted_iv, dec_filename)
except Exception:
LOG.exception(_LE("Failed to decrypt %(image_location)s "
"to %(image_path)s"), log_vars)
_update_image_state(context, image_uuid, 'failed_decrypt')
return
_update_image_state(context, image_uuid, 'untarring')
try:
unz_filename = self._untarzip_image(image_path,
dec_filename)
except Exception:
LOG.exception(_LE("Failed to untar %(image_location)s "
"to %(image_path)s"), log_vars)
_update_image_state(context, image_uuid, 'failed_untar')
return
_update_image_state(context, image_uuid, 'uploading')
try:
with open(unz_filename) as image_file:
_update_image_data(context, image_uuid, image_file)
except Exception:
LOG.exception(_LE("Failed to upload %(image_location)s "
"to %(image_path)s"), log_vars)
_update_image_state(context, image_uuid, 'failed_upload')
return
metadata = {'status': 'active',
'properties': {'image_state': 'available'}}
self.service.update(context, image_uuid, metadata,
purge_props=False)
shutil.rmtree(image_path)
except exception.ImageNotFound:
LOG.info(_LI("Image %s was deleted underneath us"), image_uuid)
return
eventlet.spawn_n(delayed_create)
return image
def _decrypt_image(self, context, encrypted_filename, encrypted_key,
encrypted_iv, decrypted_filename):
elevated = context.elevated()
try:
key = self.cert_rpcapi.decrypt_text(elevated,
project_id=context.project_id,
text=base64.b64encode(encrypted_key))
except Exception as exc:
msg = _('Failed to decrypt private key: %s') % exc
raise exception.NovaException(msg)
try:
iv = self.cert_rpcapi.decrypt_text(elevated,
project_id=context.project_id,
text=base64.b64encode(encrypted_iv))
except Exception as exc:
raise exception.NovaException(_('Failed to decrypt initialization '
'vector: %s') % exc)
try:
utils.execute('openssl', 'enc',
'-d', '-aes-128-cbc',
'-in', '%s' % (encrypted_filename,),
'-K', '%s' % (key,),
'-iv', '%s' % (iv,),
'-out', '%s' % (decrypted_filename,))
except processutils.ProcessExecutionError as exc:
raise exception.NovaException(_('Failed to decrypt image file '
'%(image_file)s: %(err)s') %
{'image_file': encrypted_filename,
'err': exc.stdout})
@staticmethod
def _test_for_malicious_tarball(path, filename):
"""Raises exception if extracting tarball would escape extract path."""
tar_file = tarfile.open(filename, 'r|gz')
for n in tar_file.getnames():
if not os.path.abspath(os.path.join(path, n)).startswith(path):
tar_file.close()
raise exception.NovaException(_('Unsafe filenames in image'))
tar_file.close()
@staticmethod
def _untarzip_image(path, filename):
S3ImageService._test_for_malicious_tarball(path, filename)
tar_file = tarfile.open(filename, 'r|gz')
tar_file.extractall(path)
image_file = tar_file.getnames()[0]
tar_file.close()
return os.path.join(path, image_file)
| |
"""Test different accessory types: Sensors."""
from homeassistant.components.homekit import get_accessory
from homeassistant.components.homekit.const import (
DEVICE_CLASS_MOTION,
PROP_CELSIUS,
THRESHOLD_CO,
THRESHOLD_CO2,
)
from homeassistant.components.homekit.type_sensors import (
BINARY_SENSOR_SERVICE_MAP,
AirQualitySensor,
BinarySensor,
CarbonDioxideSensor,
CarbonMonoxideSensor,
HumiditySensor,
LightSensor,
TemperatureSensor,
)
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_UNIT_OF_MEASUREMENT,
EVENT_HOMEASSISTANT_START,
PERCENTAGE,
STATE_HOME,
STATE_NOT_HOME,
STATE_OFF,
STATE_ON,
STATE_UNKNOWN,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.core import CoreState
from homeassistant.helpers import entity_registry as er
async def test_temperature(hass, hk_driver):
"""Test if accessory is updated after state change."""
entity_id = "sensor.temperature"
hass.states.async_set(entity_id, None)
await hass.async_block_till_done()
acc = TemperatureSensor(hass, hk_driver, "Temperature", entity_id, 2, None)
await acc.run()
await hass.async_block_till_done()
assert acc.aid == 2
assert acc.category == 10 # Sensor
assert acc.char_temp.value == 0.0
for key, value in PROP_CELSIUS.items():
assert acc.char_temp.properties[key] == value
hass.states.async_set(
entity_id, STATE_UNKNOWN, {ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS}
)
await hass.async_block_till_done()
assert acc.char_temp.value == 0.0
hass.states.async_set(entity_id, "20", {ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS})
await hass.async_block_till_done()
assert acc.char_temp.value == 20
hass.states.async_set(
entity_id, "75.2", {ATTR_UNIT_OF_MEASUREMENT: TEMP_FAHRENHEIT}
)
await hass.async_block_till_done()
assert acc.char_temp.value == 24
async def test_humidity(hass, hk_driver):
"""Test if accessory is updated after state change."""
entity_id = "sensor.humidity"
hass.states.async_set(entity_id, None)
await hass.async_block_till_done()
acc = HumiditySensor(hass, hk_driver, "Humidity", entity_id, 2, None)
await acc.run()
await hass.async_block_till_done()
assert acc.aid == 2
assert acc.category == 10 # Sensor
assert acc.char_humidity.value == 0
hass.states.async_set(entity_id, STATE_UNKNOWN)
await hass.async_block_till_done()
assert acc.char_humidity.value == 0
hass.states.async_set(entity_id, "20")
await hass.async_block_till_done()
assert acc.char_humidity.value == 20
async def test_air_quality(hass, hk_driver):
"""Test if accessory is updated after state change."""
entity_id = "sensor.air_quality"
hass.states.async_set(entity_id, None)
await hass.async_block_till_done()
acc = AirQualitySensor(hass, hk_driver, "Air Quality", entity_id, 2, None)
await acc.run()
await hass.async_block_till_done()
assert acc.aid == 2
assert acc.category == 10 # Sensor
assert acc.char_density.value == 0
assert acc.char_quality.value == 0
hass.states.async_set(entity_id, STATE_UNKNOWN)
await hass.async_block_till_done()
assert acc.char_density.value == 0
assert acc.char_quality.value == 0
hass.states.async_set(entity_id, "34")
await hass.async_block_till_done()
assert acc.char_density.value == 34
assert acc.char_quality.value == 1
hass.states.async_set(entity_id, "200")
await hass.async_block_till_done()
assert acc.char_density.value == 200
assert acc.char_quality.value == 5
async def test_co(hass, hk_driver):
"""Test if accessory is updated after state change."""
entity_id = "sensor.co"
hass.states.async_set(entity_id, None)
await hass.async_block_till_done()
acc = CarbonMonoxideSensor(hass, hk_driver, "CO", entity_id, 2, None)
await acc.run()
await hass.async_block_till_done()
assert acc.aid == 2
assert acc.category == 10 # Sensor
assert acc.char_level.value == 0
assert acc.char_peak.value == 0
assert acc.char_detected.value == 0
hass.states.async_set(entity_id, STATE_UNKNOWN)
await hass.async_block_till_done()
assert acc.char_level.value == 0
assert acc.char_peak.value == 0
assert acc.char_detected.value == 0
value = 32
assert value > THRESHOLD_CO
hass.states.async_set(entity_id, str(value))
await hass.async_block_till_done()
assert acc.char_level.value == 32
assert acc.char_peak.value == 32
assert acc.char_detected.value == 1
value = 10
assert value < THRESHOLD_CO
hass.states.async_set(entity_id, str(value))
await hass.async_block_till_done()
assert acc.char_level.value == 10
assert acc.char_peak.value == 32
assert acc.char_detected.value == 0
async def test_co2(hass, hk_driver):
"""Test if accessory is updated after state change."""
entity_id = "sensor.co2"
hass.states.async_set(entity_id, None)
await hass.async_block_till_done()
acc = CarbonDioxideSensor(hass, hk_driver, "CO2", entity_id, 2, None)
await acc.run()
await hass.async_block_till_done()
assert acc.aid == 2
assert acc.category == 10 # Sensor
assert acc.char_level.value == 0
assert acc.char_peak.value == 0
assert acc.char_detected.value == 0
hass.states.async_set(entity_id, STATE_UNKNOWN)
await hass.async_block_till_done()
assert acc.char_level.value == 0
assert acc.char_peak.value == 0
assert acc.char_detected.value == 0
value = 1100
assert value > THRESHOLD_CO2
hass.states.async_set(entity_id, str(value))
await hass.async_block_till_done()
assert acc.char_level.value == 1100
assert acc.char_peak.value == 1100
assert acc.char_detected.value == 1
value = 800
assert value < THRESHOLD_CO2
hass.states.async_set(entity_id, str(value))
await hass.async_block_till_done()
assert acc.char_level.value == 800
assert acc.char_peak.value == 1100
assert acc.char_detected.value == 0
async def test_light(hass, hk_driver):
"""Test if accessory is updated after state change."""
entity_id = "sensor.light"
hass.states.async_set(entity_id, None)
await hass.async_block_till_done()
acc = LightSensor(hass, hk_driver, "Light", entity_id, 2, None)
await acc.run()
await hass.async_block_till_done()
assert acc.aid == 2
assert acc.category == 10 # Sensor
assert acc.char_light.value == 0.0001
hass.states.async_set(entity_id, STATE_UNKNOWN)
await hass.async_block_till_done()
assert acc.char_light.value == 0.0001
hass.states.async_set(entity_id, "300")
await hass.async_block_till_done()
assert acc.char_light.value == 300
async def test_binary(hass, hk_driver):
"""Test if accessory is updated after state change."""
entity_id = "binary_sensor.opening"
hass.states.async_set(entity_id, STATE_UNKNOWN, {ATTR_DEVICE_CLASS: "opening"})
await hass.async_block_till_done()
acc = BinarySensor(hass, hk_driver, "Window Opening", entity_id, 2, None)
await acc.run()
await hass.async_block_till_done()
assert acc.aid == 2
assert acc.category == 10 # Sensor
assert acc.char_detected.value == 0
hass.states.async_set(entity_id, STATE_ON, {ATTR_DEVICE_CLASS: "opening"})
await hass.async_block_till_done()
assert acc.char_detected.value == 1
hass.states.async_set(entity_id, STATE_OFF, {ATTR_DEVICE_CLASS: "opening"})
await hass.async_block_till_done()
assert acc.char_detected.value == 0
hass.states.async_set(entity_id, STATE_HOME, {ATTR_DEVICE_CLASS: "opening"})
await hass.async_block_till_done()
assert acc.char_detected.value == 1
hass.states.async_set(entity_id, STATE_NOT_HOME, {ATTR_DEVICE_CLASS: "opening"})
await hass.async_block_till_done()
assert acc.char_detected.value == 0
hass.states.async_remove(entity_id)
await hass.async_block_till_done()
assert acc.char_detected.value == 0
async def test_motion_uses_bool(hass, hk_driver):
"""Test if accessory is updated after state change."""
entity_id = "binary_sensor.motion"
hass.states.async_set(
entity_id, STATE_UNKNOWN, {ATTR_DEVICE_CLASS: DEVICE_CLASS_MOTION}
)
await hass.async_block_till_done()
acc = BinarySensor(hass, hk_driver, "Motion Sensor", entity_id, 2, None)
await acc.run()
await hass.async_block_till_done()
assert acc.aid == 2
assert acc.category == 10 # Sensor
assert acc.char_detected.value is False
hass.states.async_set(entity_id, STATE_ON, {ATTR_DEVICE_CLASS: DEVICE_CLASS_MOTION})
await hass.async_block_till_done()
assert acc.char_detected.value is True
hass.states.async_set(
entity_id, STATE_OFF, {ATTR_DEVICE_CLASS: DEVICE_CLASS_MOTION}
)
await hass.async_block_till_done()
assert acc.char_detected.value is False
hass.states.async_set(
entity_id, STATE_HOME, {ATTR_DEVICE_CLASS: DEVICE_CLASS_MOTION}
)
await hass.async_block_till_done()
assert acc.char_detected.value is True
hass.states.async_set(
entity_id, STATE_NOT_HOME, {ATTR_DEVICE_CLASS: DEVICE_CLASS_MOTION}
)
await hass.async_block_till_done()
assert acc.char_detected.value is False
hass.states.async_remove(entity_id)
await hass.async_block_till_done()
assert acc.char_detected.value is False
async def test_binary_device_classes(hass, hk_driver):
"""Test if services and characteristics are assigned correctly."""
entity_id = "binary_sensor.demo"
for device_class, (service, char, _) in BINARY_SENSOR_SERVICE_MAP.items():
hass.states.async_set(entity_id, STATE_OFF, {ATTR_DEVICE_CLASS: device_class})
await hass.async_block_till_done()
acc = BinarySensor(hass, hk_driver, "Binary Sensor", entity_id, 2, None)
assert acc.get_service(service).display_name == service
assert acc.char_detected.display_name == char
async def test_sensor_restore(hass, hk_driver, events):
"""Test setting up an entity from state in the event registry."""
hass.state = CoreState.not_running
registry = er.async_get(hass)
registry.async_get_or_create(
"sensor",
"generic",
"1234",
suggested_object_id="temperature",
device_class="temperature",
)
registry.async_get_or_create(
"sensor",
"generic",
"12345",
suggested_object_id="humidity",
device_class="humidity",
unit_of_measurement=PERCENTAGE,
)
hass.bus.async_fire(EVENT_HOMEASSISTANT_START, {})
await hass.async_block_till_done()
acc = get_accessory(hass, hk_driver, hass.states.get("sensor.temperature"), 2, {})
assert acc.category == 10
acc = get_accessory(hass, hk_driver, hass.states.get("sensor.humidity"), 2, {})
assert acc.category == 10
| |
# Copyright 2014 NeuroData (http://neurodata.io)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import urllib2
import json
import django
django.setup()
from django.conf import settings
from django.http import HttpResponse, HttpResponseBadRequest
from ndproj.ndprojdb import NDProjectsDB
from webservices.ndwsingest import IngestData
# from ndschema import PROJECT_SCHEMA, DATASET_SCHEMA, CHANNEL_SCHEMA
from ndlib.ndtype import READONLY_FALSE, REDIS, S3_TRUE
from nduser.models import Project
from nduser.models import Dataset
from nduser.models import Token
from nduser.models import Channel
from nduser.models import User
from webservices.ndwserror import NDWSError
from ndproj.nddataset import NDDataset
from ndproj.ndproject import NDProject
from ndproj.ndchannel import NDChannel
from ndproj.ndtoken import NDToken
import logging
logger = logging.getLogger('neurodata')
def autoIngest(webargs, post_data):
"""Create a project using a JSON file"""
# setting state values for error handling
TOKEN_CREATED = False
PROJECT_CREATED = False
CHANNEL_CREATED = False
DATASET_CREATED = False
nd_dict = json.loads(post_data)
try:
dataset_dict = nd_dict['dataset']
project_dict = nd_dict['project']
channels = nd_dict['channels']
metadata_dict = nd_dict['metadata']
except Exception, e:
logger.error("Missing requred fields of dataset, project, channels, metadata.")
return HttpResponseBadRequest(json.dumps("Missing required fields of dataset, project, channels, metadata. Please check if one of them is not missing."), content_type="application/json")
# try:
# DATASET_SCHEMA.validate(dataset_dict)
# except Exception, e:
# logger.error("Invalid Dataset schema")
# return json.dumps("Invalid Dataset schema")
# try:
# PROJECT_SCHEMA.validate(project_dict)
# except Exception, e:
# logger.error("Invalid Project schema")
# return json.dumps("Invalid Project schema")
#try:
#CHANNEL_SCHEMA.validate(channels)
#except Exception, e:
#print "Invalid Channel schema"
#return json.dumps("Invalid Channel schema")
# pr.host = 'localhost'
# pr.s3backend = S3_TRUE
# if pr.project_name in ['unittest','unittest2']:
# pr.host = 'localhost'
try:
# Setting the user_ids to brain for now
# creating ds object
ds = NDDataset.fromJson(extractDatasetDict(dataset_dict))
ds.user_id = 1
# Checking if the posted dataset already exists
# Setting the foreign key for dataset
if Dataset.objects.filter(dataset_name = ds.dataset_name).exists():
stored_ds = NDDataset.fromName(ds.dataset_name)
if compareModelObjects(stored_ds, ds):
pass
# pr.dataset_id = stored_ds.dataset_name
else:
logger.error("Dataset {} already exists and is different then the chosen dataset".format(ds.dataset_name))
return HttpResponseBadRequest(json.dumps("Dataset {} already exists and is different then the chosen dataset. Please choose a different dataset name".format(ds.dataset_name)), content_type="application/json")
else:
ds.create()
DATASET_CREATED = True
# pr.dataset_id = ds.dataset_name
# extracting project and token
pr, tk = extractProjectDict(project_dict)
pr = NDProject.fromJson(ds.dataset_name, pr)
pr.user_id = 1
pr.kvengine = REDIS
# Checking if the posted project already exists
# Setting the foreign key for project
if Project.objects.filter(project_name = pr.project_name).exists():
stored_pr = NDProject.fromName(pr.project_name)
# Checking if the existing project is same as the posted one, here we compare their datasets since python behaves wierdly with sub-objects in other objects. this is not fool-proof but works as a good hack
tk = NDToken.fromJson(pr.project_name, tk)
tk.user_id = 1
if compareModelObjects(stored_pr.datasetcfg, pr.datasetcfg):
if Token.objects.filter(token_name = tk.token_name).exists():
stored_tk = NDToken.fromName(tk.token_name)
# tk.project_id = stored_pr.project_name
# Checking if the existing token is same as the posted one
if compareModelObjects(stored_tk, tk):
pass
else:
if DATASET_CREATED:
ds.delete()
logger.error("Token {} already exists.".format(tk.token_name))
return HttpResponseBadRequest(json.dumps("Token {} already exists. Please choose a different token name.".format(tk.token_name)), content_type="application/json")
else:
# tk.project_id = stored_pr.project_name
tk.create()
TOKEN_CREATED = True
else:
if DATASET_CREATED:
ds.delete()
if TOKEN_CREATED:
tk.delete()
logger.error("Project {} already exists.".format(pr.project_name))
return HttpResponseBadRequest(json.dumps("Project {} already exists. Please choose a different project name".format(pr.project_name)), content_type="application/json")
else:
try:
pr.create()
tk = NDToken.fromJson(pr.project_name, tk)
tk.user_id = 1
tk.create()
# pd = NDProjectsDB.getProjDB(pr)
# pd.newNDProject()
PROJECT_CREATED = True
TOKEN_CREATED = True
except Exception, e:
if TOKEN_CREATED:
tk.delete()
if PROJECT_CREATED:
pr.delete()
if DATASET_CREATED:
ds.delete()
logger.error("There was an error in creating the project {} database".format(pr.project_name))
return HttpResponseBadRequest(json.dumps("There was an error in creating the project {} database".format(pr.project_name)), content_type="application/json")
# tk.project_id = pr.project_name
tk.create()
TOKEN_CREATED = True
ch_list = []
for channel_name, value in channels.iteritems():
channel_dict = channels[channel_name]
ch, data_url, file_format, file_type = extractChannelDict(channel_dict)
ch = NDChannel.fromJson(pr.project_name, ch)
ch_list.append((ch, data_url, file_format, file_type))
channel_object_list = []
# Iterating over channel list to store channels
for (ch, data_url, file_format, file_type) in ch_list:
# ch.project_id = pr.project_name
ch.user_id = 1
# Checking if the channel already exists or not
if not Channel.objects.filter(channel_name = ch.channel_name, project = pr.project_name).exists():
# Maintain a list of channel objects created during this iteration and delete all even if one fails
channel_object_list.append(ch)
try:
ch.create()
# pd = NDProjectsDB.getProjDB(pr)
# pd.newNDChannel(ch.channel_name)
CHANNEL_CREATED = True
except Exception, e:
if TOKEN_CREATED:
tk.delete()
if CHANNEL_CREATED:
for ch_obj in channel_object_list:
ch_obj.delete()
if PROJECT_CREATED:
pr.delete()
if DATASET_CREATED:
ds.delete()
logger.error("There was an error creating in the channel {} table".format(ch.channel_name))
return HttpResponseBadRequest(json.dumps("There was an error in creating the channel {} table.".format(ch.channel_name)), content_type="application/json")
else:
logger.error("Channel {} already exists.".format(ch.channel_name))
return HttpResponseBadRequest(json.dumps("Channel {} already exists. Please choose a different channel name.".format(ch.channel_name)), content_type="application/json")
# checking if the posted data_url has a trialing slash or not. This becomes an issue in auto-ingest
if data_url.endswith('/'):
# removing the trailing slash if there exists one
data_url = data_url[:-1]
# calling celery ingest task
from sd.tasks import ingest
# ingest(tk.token_name, ch.channel_name, ch.resolution, data_url, file_format, file_type)
ingest.delay(tk.token_name, ch.channel_name, ch.resolution, data_url, file_format, file_type)
# calling ndworker
# from ndworker.ndworker import NDWorker
# worker = NDWorker(tk.token_name, ch.channel_name, ch.resolution)
# queue_name = worker.populateQueue()
# Posting to LIMS system
# postMetadataDict(metadata_dict, pr.project_name)
except Exception, e:
# KL TODO Delete data from the LIMS systems
if pr is not None and PROJECT_CREATED:
pr.delete()
logger.error("Error saving models. There was an error in the information posted")
return HttpResponseBadRequest(json.dumps("FAILED. There was an error in the information you posted."), content_type="application/json")
return HttpResponse(json.dumps("SUCCESS. The ingest process has now started."), content_type="application/json")
# return_dict = {'queue_name' : queue_name}
return HttpResponse(json.dumps(return_dict), content_type="application/json")
def createChannel(webargs, post_data):
"""Create a list of channels using a JSON file"""
# Get the token and load the project
try:
m = re.match("(\w+)/createChannel/$", webargs)
token_name = m.group(1)
except Exception, e:
logger.error("Error in URL format")
raise NDWSError("Error in the URL format")
nd_dict = json.loads(post_data)
try:
channels = nd_dict['channels']
except Exception, e:
logger.error("Missing channels field. Ensure that 'Channel' field exists.")
return HttpResponseBadRequest("Missing channels field. Ensure that 'Channel' field exists.")
tk = Token.objects.get(token_name=token_name)
ur = User.objects.get(id=tk.user_id)
pr = Project.objects.get(project_name=tk.project_id)
ch_list = []
for channel_name, value in channels.iteritems():
channel_dict = channels[channel_name]
ch_list.append(extractChannelDict(channel_dict, channel_only=True))
try:
# First iterating over the channel list to check if all the channels don't exist
for ch in ch_list:
if Channel.objects.filter(channel_name = ch.channel_name, project = pr.project_name).exists():
logger.error("Channel {} already exists for project {}. Specify a different channel name".format(ch.channel_name, pr.project_name))
return HttpResponseBadRequest("Channel {} already exists for project {}. Specify a different channel name".format(ch.channel_name, pr.project_name), content_type="text/plain")
# Iterating over channel list to store channels
for ch in ch_list:
ch.project_id = pr.project_name
# Setting the user_ids based on token user_id
ch.user_id = tk.user_id
ch.save()
# Create channel database using the ndproj interface
pd = NDProjectsDB.getProjDB(pr)
pd.newNDChannel(ch.channel_name)
except Exception, e:
logger.error("Error saving models")
# return the bad request with failed message
return HttpResponseBadRequest("Error saving models.", content_type="text/plain")
# return the JSON file with success
return HttpResponse("Success. The channels were created.", content_type="text/plain")
def deleteChannel(webargs, post_data):
"""Delete a list of channels using a JSON file"""
# Get the token and load the project
try:
m = re.match("(\w+)/deleteChannel/$", webargs)
token_name = m.group(1)
except Exception, e:
logger.error("Error in URL format")
raise NDWSError("Error in URL format")
nd_dict = json.loads(post_data)
try:
channels = nd_dict['channels']
except Exception, e:
logger.error("Missing requred fields.")
return HttpResponseBadRequest("Missing requred fields.")
tk = Token.objects.get(token_name=token_name)
ur = User.objects.get(id=tk.user_id)
pr = Project.objects.get(project_name=tk.project_id)
try:
# Iterating over channel list to store channels
for channel_name in channels:
# Checking if the channel already exists or not
if Channel.objects.get(channel_name = channel_name, project = pr.project_name):
ch = Channel.objects.get(channel_name = channel_name, project = pr.project_name)
# Checking if channel is readonly or not
if ch.readonly == READONLY_FALSE:
# delete channel table using the ndproj interface
pd = NDProjectsDB().getProjDB(pr)
pd.deleteNDChannel(ch.channel_name)
ch.delete()
return HttpResponse("Success. Channels deleted.")
except Exception, e:
logger.error("Error saving models. The channels were not deleted.")
return HttpResponseBadRequest("Error saving models. The channels were not deleted.")
def postMetadataDict(metadata_dict, project_name):
"""Post metdata to the LIMS system"""
try:
url = 'http://{}/metadata/ocp/set/{}/'.format(settings.LIMS_SERVER, project_name)
req = urllib2.Request(url, json.dumps(metadata_dict))
req.add_header('Content-Type', 'application/json')
response = urllib2.urlopen(req)
except urllib2.URLError, e:
logger.error("Failed URL {}".format(url))
pass
def extractDatasetDict(ds_dict):
"""Generate a dataset object from the JSON flle"""
ds = {}
try:
ds['dataset_name'] = ds_dict['dataset_name']
ds['ximagesize'] = ds_dict['imagesize'][0]
ds['yimagesize'] = ds_dict['imagesize'][1]
ds['zimagesize'] = ds_dict['imagesize'][2]
ds['xvoxelres'] = ds_dict['voxelres'][0]
ds['yvoxelres'] = ds_dict['voxelres'][1]
ds['zvoxelres'] = ds_dict['voxelres'][2]
except Exception, e:
logger.error("Missing required fields")
raise NDWSError("Missing required fields")
if 'offset' in ds_dict:
ds['xoffset'] = ds_dict['offset'][0]
ds['yoffset'] = ds_dict['offset'][1]
ds['zoffset'] = ds_dict['offset'][2]
if 'scaling' in ds_dict:
ds['scalingoption'] = ds_dict['scaling']
if 'scalinglevels' in ds_dict:
ds['scalinglevels'] = ds_dict['scalinglevels']
else:
ds['scalinglevels'] = computeScalingLevels(imagesize)
return json.dumps(ds)
def computeScalingLevels(imagesize):
"""Dynamically decide the scaling levels"""
ximagesz, yimagesz, zimagesz = imagesize
scalinglevels = 0
# When both x and y dimensions are below 1000 or one is below 100 then stop
while (ximagesz>1000 or yimagesz>1000) and ximagesz>500 and yimagesz>500:
ximagesz = ximagesz / 2
yimagesz = yimagesz / 2
scalinglevels += 1
return scalinglevels
def extractProjectDict(pr_dict):
"""Generate a project object from the JSON flle"""
pr = {}
tk = {}
try:
pr['project_name'] = pr_dict['project_name']
except Exception, e:
logger.error("Missing required fields")
raise NDWSError("Missing required fields")
if 'token_name' in pr_dict:
tk['token_name'] = pr_dict['token_name']
else:
tk['token_name'] = pr_dict['project_name']
if 'public' in pr_dict:
tk['public'] = pr_dict['public']
return json.dumps(pr), json.dumps(tk)
def extractChannelDict(ch_dict, channel_only=False):
"""Generate a channel object from the JSON flle"""
ch = {}
try:
ch['channel_name'] = ch_dict['channel_name']
ch['channel_datatype'] = ch_dict['datatype']
ch['channel_type'] = ch_dict['channel_type']
if 'timerange' in ch_dict:
ch['starttime'] = ch_dict['timerange'][0]
ch['endtime'] = ch_dict['timerange'][1]
if not channel_only:
data_url = ch_dict['data_url']
file_format = ch_dict['file_format']
file_type = ch_dict['file_type']
except Exception, e:
logger.error("Missing required fields")
raise NDWSError("Missing required fields")
if 'exceptions' in ch_dict:
ch['exceptions'] = ch_dict['exceptions']
if 'resolution' in ch_dict:
ch['resolution'] = ch_dict['resolution']
if 'windowrange' in ch_dict:
ch['startwindow'] = ch_dict['windowrange'][0]
ch['endwindow'] = ch_dict['windowrange'][1]
if 'readonly' in ch_dict:
ch['readonly'] = ch_dict['readonly']
if not channel_only:
return (json.dumps(ch), data_url, file_format, file_type)
else:
return ch
def createJson(dataset, project, channel_list, metadata={}, channel_only=False):
"""Genarate ND json object"""
nd_dict = {}
nd_dict['channels'] = {}
if not channel_only:
nd_dict['dataset'] = createDatasetDict(*dataset)
nd_dict['project'] = createProjectDict(*project)
nd_dict['metadata'] = metadata
for channel_name, value in channel_list.iteritems():
value = value + (channel_only,)
nd_dict['channels'][channel_name] = createChannelDict(*value)
return json.dumps(nd_dict, sort_keys=True, indent=4)
# def createDatasetDict(dataset_name, imagesize, voxelres, offset=[0,0,0], timerange=[0,0], scalinglevels=0, scaling=0):
# """Generate the dataset dictionary"""
def postMetadataDict(metadata_dict, project_name):
"""Post metdata to the LIMS system"""
try:
url = 'http://{}/lims/{}/'.format(settings.LIMS_SERVER, project_name)
req = urllib2.Request(url, json.dumps(metadata_dict))
req.add_header('Content-Type', 'application/json')
response = urllib2.urlopen(req)
except urllib2.URLError, e:
logger.error("Failed URL {}".format(url))
pass
# def extractDatasetDict(ds_dict):
# """Generate a dataset object from the JSON flle"""
# ds = Dataset();
# try:
# ds.dataset_name = ds_dict['dataset_name']
# imagesize = [ds.ximagesize, ds.yimagesize, ds.zimagesize] = ds_dict['imagesize']
# [ds.xvoxelres, ds.yvoxelres, ds.zvoxelres] = ds_dict['voxelres']
# except Exception, e:
# logger.error("Missing required fields")
# raise NDWSError("Missing required fields")
# if 'offset' in ds_dict:
# [ds.xoffset, ds.yoffset, ds.zoffset] = ds_dict['offset']
# if 'scaling' in ds_dict:
# ds.scalingoption = ds_dict['scaling']
# if 'scalinglevels' in ds_dict:
# ds.scalinglevels = ds_dict['scalinglevels']
# else:
# ds.scalinglevels = computeScalingLevels(imagesize)
# return ds
def computeScalingLevels(imagesize):
"""Dynamically decide the scaling levels"""
ximagesz, yimagesz, zimagesz = imagesize
scalinglevels = 0
# When both x and y dimensions are below 1000 or one is below 100 then stop
while (ximagesz>1000 or yimagesz>1000) and ximagesz>500 and yimagesz>500:
ximagesz = ximagesz / 2
yimagesz = yimagesz / 2
scalinglevels += 1
return scalinglevels
def createJson(dataset, project, channel_list, metadata={}, channel_only=False):
"""Genarate ND json object"""
nd_dict = {}
nd_dict['channels'] = {}
if not channel_only:
nd_dict['dataset'] = createDatasetDict(*dataset)
nd_dict['project'] = createProjectDict(*project)
nd_dict['metadata'] = metadata
for channel_name, value in channel_list.iteritems():
nd_dict['channels'][channel_name] = createChannelDict(*value)
return json.dumps(nd_dict, sort_keys=True, indent=4)
def createDatasetDict(dataset_name, imagesize, voxelres, offset=[0,0,0], scalinglevels=0, scaling=0):
"""Generate the dataset dictionary"""
# dataset format = (dataset_name, [ximagesz, yimagesz, zimagesz], [[xvoxel, yvoxel, zvoxel], [xoffset, yoffset, zoffset], scalinglevels, scaling)
dataset_dict = {}
dataset_dict['dataset_name'] = dataset_name
dataset_dict['imagesize'] = imagesize
dataset_dict['voxelres'] = voxelres
if offset is not None:
dataset_dict['offset'] = offset
if scalinglevels is not None:
dataset_dict['scalinglevels'] = scalinglevels
if scaling is not None:
dataset_dict['scaling'] = scaling
return dataset_dict
def createChannelDict(channel_name, datatype, channel_type, data_url, file_format, file_type, time_range=[0,0], exceptions=0, resolution=0, windowrange=[0,0], readonly=0, channel_only=False):
"""Genearte the project dictionary"""
# channel format = (channel_name, datatype, channel_type, data_url, file_type, file_format, exceptions, resolution, timerange, windowrange, readonly)
channel_dict = {}
channel_dict['channel_name'] = channel_name
channel_dict['datatype'] = datatype
channel_dict['channel_type'] = channel_type
if exceptions is not None:
channel_dict['exceptions'] = exceptions
if resolution is not None:
channel_dict['resolution'] = resolution
if timerange is not None:
channel_dict['timerange'] = timerange
if windowrange is not None:
channel_dict['windowrange'] = windowrange
if readonly is not None:
channel_dict['readonly'] = readonly
if not channel_only:
channel_dict['data_url'] = data_url
channel_dict['file_format'] = file_format
channel_dict['file_type'] = file_type
return channel_dict
def createProjectDict(project_name, token_name='', public=0):
"""Genarate the project dictionary"""
# project format = (project_name, token_name, public)
project_dict = {}
project_dict['project_name'] = project_name
if token_name is not None:
project_dict['token_name'] = project_name if token_name == '' else token_name
if public is not None:
project_dict['public'] = public
return project_dict
def compareModelObjects(obj1, obj2, excluded_keys=['_state']):
"""Compare two model objects"""
for key, value in obj1.__dict__.items():
if key in excluded_keys:
continue
if obj2.__dict__[key] == value:
pass
else:
return False
return True
| |
# K-means clustering in 2d
# Code is from chapter 9 of
# https://github.com/ageron/handson-ml2
import superimport
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
# To plot pretty figures
#%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc('axes', labelsize=14)
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
if 0:
blob_centers = np.array(
[[ 0.2, 2.3],
[-1.5 , 2.3],
[-2.8, 1.8],
[-2.8, 2.8],
[-2.8, 1.3]])
blob_std = np.array([0.4, 0.3, 0.1, 0.1, 0.1])
X, y = make_blobs(n_samples=2000, centers=blob_centers,
cluster_std=blob_std, random_state=7)
geron_data = True
if 1:
# two off-diagonal blobs
X1, _ = make_blobs(n_samples=1000, centers=((4, -4), (0, 0)), random_state=42)
X1 = X1.dot(np.array([[0.374, 0.95], [0.732, 0.598]]))
# three spherical blobs
blob_centers = np.array(
[[ -4, 1],
[-4 , 3],
[-4, -2]])
s = 0.5
blob_std = np.array([s, s, s])
X2, _ = make_blobs(n_samples=1000, centers=blob_centers,
cluster_std=blob_std, random_state=7)
X = np.r_[X1, X2]
geron_data= False
kmeans_per_k = [KMeans(n_clusters=k, random_state=42).fit(X)
for k in range(1, 10)]
inertias = [model.inertia_ for model in kmeans_per_k[1:]]
plt.figure(figsize=(8, 3))
plt.plot(range(2, 10), inertias, "bo-")
plt.xlabel("$k$", fontsize=14)
plt.ylabel("Distortion", fontsize=14)
if geron_data:
plt.annotate('Elbow',
xy=(4, inertias[3]),
xytext=(0.55, 0.55),
textcoords='figure fraction',
fontsize=16,
arrowprops=dict(facecolor='black', shrink=0.1)
)
#plt.axis([1, 8.5, 0, 1300])
plt.tight_layout()
plt.savefig("../figures/kmeans_distortion_vs_k.pdf", dpi=300)
plt.show()
silhouette_scores = [silhouette_score(X, model.labels_)
for model in kmeans_per_k[1:]]
plt.figure(figsize=(8, 3))
plt.plot(range(2, 10), silhouette_scores, "bo-")
plt.xlabel("$k$", fontsize=14)
plt.ylabel("Silhouette score", fontsize=14)
#plt.axis([1.8, 8.5, 0.55, 0.7])
plt.tight_layout()
plt.savefig("../figures/kmeans_silhouette_vs_k.pdf", dpi=300)
plt.show()
##########
from sklearn.metrics import silhouette_samples
from matplotlib.ticker import FixedLocator, FixedFormatter
plt.figure(figsize=(11, 9))
for k in (3, 4, 5, 6):
plt.subplot(2, 2, k - 2)
y_pred = kmeans_per_k[k - 1].labels_
silhouette_coefficients = silhouette_samples(X, y_pred)
padding = len(X) // 30
pos = padding
ticks = []
cmap = cm.get_cmap("Pastel2")
colors = [cmap(i) for i in range(k)]
for i in range(k):
coeffs = silhouette_coefficients[y_pred == i]
coeffs.sort()
color = mpl.cm.Spectral(i / k)
#color = colors[i]
plt.fill_betweenx(np.arange(pos, pos + len(coeffs)), 0, coeffs,
facecolor=color, edgecolor=color, alpha=0.7)
#cmap = "Pastel2"
#plt.fill_betweenx(np.arange(pos, pos + len(coeffs)), 0, coeffs,
# cmap=cmap, alpha=0.7)
ticks.append(pos + len(coeffs) // 2)
pos += len(coeffs) + padding
plt.gca().yaxis.set_major_locator(FixedLocator(ticks))
plt.gca().yaxis.set_major_formatter(FixedFormatter(range(k)))
if k in (3, 5):
plt.ylabel("Cluster")
if k in (5, 6):
plt.gca().set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
plt.xlabel("Silhouette Coefficient")
else:
plt.tick_params(labelbottom=False)
score = silhouette_scores[k - 2]
plt.axvline(x=score, color="red", linestyle="--")
plt.title("$k={}, score={:0.2f}$".format(k, score), fontsize=16)
plt.tight_layout()
plt.savefig("../figures/kmeans_silhouette_diagram.pdf", dpi=300)
plt.show()
##########
def plot_data(X):
plt.plot(X[:, 0], X[:, 1], 'k.', markersize=2)
def plot_centroids(centroids, weights=None, circle_color='w', cross_color='k'):
if weights is not None:
centroids = centroids[weights > weights.max() / 10]
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='o', s=30, linewidths=8,
color=circle_color, zorder=10, alpha=0.9)
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=50, linewidths=50,
color=cross_color, zorder=11, alpha=1)
def plot_decision_boundaries(clusterer, X, K, resolution=1000):
mins = X.min(axis=0) - 0.1
maxs = X.max(axis=0) + 0.1
xx, yy = np.meshgrid(np.linspace(mins[0], maxs[0], resolution),
np.linspace(mins[1], maxs[1], resolution))
Z = clusterer.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
print(np.unique(Z))
#cmap = [mpl.cm.Spectral( (i / K)) for i in range(K)]
cmap ="Pastel2"
#cmap = mpl.cm.Spectral(K)
plt.contourf(Z, extent=(mins[0], maxs[0], mins[1], maxs[1]),cmap=cmap)
##cmap = cm.get_cmap("Pastel2")
#colors = [cmap(i) for i in range(K)]
#print(colors)
#plt.contourf(Z, extent=(mins[0], maxs[0], mins[1], maxs[1]),colors=colors)
plt.contour(Z, extent=(mins[0], maxs[0], mins[1], maxs[1]),
linewidths=1, colors='k')
plot_data(X)
plot_centroids(clusterer.cluster_centers_)
#K, D = clusterer.cluster_centers_.shape
plt.title(f'K={K}')
plt.figure(figsize=(11, 9))
for k in (3, 4, 5, 6):
plt.subplot(2, 2, k - 2)
plot_decision_boundaries(kmeans_per_k[k-1], X, k)
plt.tight_layout()
plt.savefig("../figures/kmeans_silhouette_voronoi.pdf", dpi=300)
plt.show()
X_new = np.array([[0, 2], [3, 2], [-3, 3], [-3, 2.5]])
clusterer = kmeans_per_k[3-1]
Z = clusterer.predict(X_new)
print(Z)
| |
import base64
import os
import sys
import time
from optparse import OptionParser
import internal
from internal.connect import Connection
from internal.listener import ConnectionListener, StatsListener
from internal.exception import NotConnectedException
def get_commands():
"""
Return a list of commands available on a \link StompCLI \endlink (the command line interface
to stomp.py)
"""
commands = [ ]
for f in dir(StompCLI):
if f.startswith('_') or f.startswith('on_') or f == 'c':
continue
else:
commands.append(f)
return commands
class StompCLI(ConnectionListener):
"""
A command line interface to the stomp.py client. See \link stomp::internal::connect::Connection \endlink
for more information on establishing a connection to a stomp server.
"""
def __init__(self, host='localhost', port=61613, user='', passcode=''):
self.conn = Connection([(host, port)], user, passcode)
self.conn.set_listener('', self)
self.conn.start()
self.__commands = get_commands()
self.transaction_id = None
def __print_async(self, frame_type, headers, body):
"""
Utility function to print a message and setup the command prompt
for the next input
"""
print("\r \r", end='')
print(frame_type)
for header_key in headers.keys():
print('%s: %s' % (header_key, headers[header_key]))
print('')
print(body)
print('> ', end='')
sys.stdout.flush()
def on_connecting(self, host_and_port):
"""
\see ConnectionListener::on_connecting
"""
self.conn.connect(wait=True)
def on_disconnected(self):
"""
\see ConnectionListener::on_disconnected
"""
print("lost connection")
def on_message(self, headers, body):
"""
\see ConnectionListener::on_message
Special case: if the header 'filename' is present, the content is written out
as a file
"""
if 'filename' in headers:
content = base64.b64decode(body.encode())
if os.path.exists(headers['filename']):
fname = '%s.%s' % (headers['filename'], int(time.time()))
else:
fname = headers['filename']
f = open(fname, 'wb')
f.write(content)
f.close()
self.__print_async("MESSAGE", headers, "Saved file: %s" % fname)
else:
self.__print_async("MESSAGE", headers, body)
def on_error(self, headers, body):
"""
\see ConnectionListener::on_error
"""
self.__print_async("ERROR", headers, body)
def on_receipt(self, headers, body):
"""
\see ConnectionListener::on_receipt
"""
self.__print_async("RECEIPT", headers, body)
def on_connected(self, headers, body):
"""
\see ConnectionListener::on_connected
"""
self.__print_async("CONNECTED", headers, body)
def ack(self, args):
'''
Usage:
ack <message-id>
Required Parameters:
message-id - the id of the message being acknowledged
Description:
The command 'ack' is used to acknowledge consumption of a message from a subscription using client
acknowledgment. When a client has issued a 'subscribe' with the ack flag set to client, any messages
received from that destination will not be considered to have been consumed (by the server) until
the message has been acknowledged.
'''
if len(args) < 2:
print("Expecting: ack <message-id>")
elif not self.transaction_id:
self.conn.ack(headers = { 'message-id' : args[1] })
else:
self.conn.ack(headers = { 'message-id' : args[1] }, transaction=self.transaction_id)
def abort(self, args):
'''
Usage:
abort
Description:
Roll back a transaction in progress.
'''
if not self.transaction_id:
print("Not currently in a transaction")
else:
self.conn.abort(transaction = self.transaction_id)
self.transaction_id = None
def begin(self, args):
'''
Usage:
begin
Description:
Start a transaction. Transactions in this case apply to sending and acknowledging -
any messages sent or acknowledged during a transaction will be handled atomically based on the
transaction.
'''
if self.transaction_id:
print("Currently in a transaction (%s)" % self.transaction_id)
else:
self.transaction_id = self.conn.begin()
print('Transaction id: %s' % self.transaction_id)
def commit(self, args):
'''
Usage:
commit
Description:
Commit a transaction in progress.
'''
if not self.transaction_id:
print("Not currently in a transaction")
else:
print('Committing %s' % self.transaction_id)
self.conn.commit(transaction=self.transaction_id)
self.transaction_id = None
def disconnect(self, args):
'''
Usage:
disconnect
Description:
Gracefully disconnect from the server.
'''
try:
self.conn.disconnect()
except NotConnectedException:
pass # ignore if no longer connected
def send(self, args):
'''
Usage:
send <destination> <message>
Required Parameters:
destination - where to send the message
message - the content to send
Description:
Sends a message to a destination in the messaging system.
'''
if len(args) < 3:
print('Expecting: send <destination> <message>')
elif not self.transaction_id:
self.conn.send(destination=args[1], message=' '.join(args[2:]))
else:
self.conn.send(destination=args[1], message=' '.join(args[2:]), transaction=self.transaction_id)
def sendfile(self, args):
'''
Usage:
sendfile <destination> <filename>
Required Parameters:
destination - where to send the message
filename - the file to send
Description:
Sends a file to a destination in the messaging system.
'''
if len(args) < 3:
print('Expecting: sendfile <destination> <filename>')
elif not os.path.exists(args[2]):
print('File %s does not exist' % args[2])
else:
s = open(args[2], mode='rb').read()
msg = base64.b64encode(s).decode()
if not self.transaction_id:
self.conn.send(destination=args[1], message=msg, filename=args[2])
else:
self.conn.send(destination=args[1], message=msg, filename=args[2], transaction=self.transaction_id)
def subscribe(self, args):
'''
Usage:
subscribe <destination> [ack]
Required Parameters:
destination - the name to subscribe to
Optional Parameters:
ack - how to handle acknowledgements for a message; either automatically (auto) or manually (client)
Description:
Register to listen to a given destination. Like send, the subscribe command requires a destination
header indicating which destination to subscribe to. The ack parameter is optional, and defaults to
auto.
'''
if len(args) < 2:
print('Expecting: subscribe <destination> [ack]')
elif len(args) > 2:
print('Subscribing to "%s" with acknowledge set to "%s"' % (args[1], args[2]))
self.conn.subscribe(destination=args[1], ack=args[2])
else:
print('Subscribing to "%s" with auto acknowledge' % args[1])
self.conn.subscribe(destination=args[1], ack='auto')
def unsubscribe(self, args):
'''
Usage:
unsubscribe <destination>
Required Parameters:
destination - the name to unsubscribe from
Description:
Remove an existing subscription - so that the client no longer receive messages from that destination.
'''
if len(args) < 2:
print('Expecting: unsubscribe <destination>')
else:
print('Unsubscribing from "%s"' % args[1])
self.conn.unsubscribe(destination=args[1])
def stats(self, args):
'''
Usage:
stats [on|off]
Description:
Record statistics on messages sent, received, errors, etc. If no argument (on|off) is specified,
dump the current statistics.
'''
if len(args) < 2:
stats = self.conn.get_listener('stats')
if stats:
print(stats)
else:
print('No stats available')
elif args[1] == 'on':
self.conn.set_listener('stats', StatsListener())
elif args[1] == 'off':
self.conn.remove_listener('stats')
else:
print('Expecting: stats [on|off]')
def run(self, args):
'''
Usage:
run <filename>
Description:
Execute commands in a specified file
'''
if len(args) == 1:
print("Expecting: run <filename>")
elif not os.path.exists(args[1]):
print("File %s was not found" % args[1])
else:
filecommands = open(args[1]).read().split('\n')
for x in range(len(filecommands)):
split = filecommands[x].split()
if len(split) < 1:
continue
elif split[0] in self.__commands:
getattr(self, split[0])(split)
else:
print('Unrecognized command "%s" at line %s' % (split[0], x))
break
def help(self, args):
'''
Usage:
help [command]
Description:
Display info on a specified command, or a list of available commands
'''
if len(args) == 1:
print('Usage: help <command>, where command is one of the following:')
print(' ')
for f in self.__commands:
print('%s ' % f, end='')
print('')
return
elif not hasattr(self, args[1]):
print('There is no command "%s"' % args[1])
return
func = getattr(self, args[1])
if hasattr(func, '__doc__') and getattr(func, '__doc__') is not None:
print(func.__doc__)
else:
print('There is no help for command "%s"' % args[1])
man = help
def version(self, args):
print('Stomp.py Version %s.%s' % internal.__version__)
ver = version
def quit(self, args):
pass
exit = quit
def main():
commands = get_commands()
parser = OptionParser()
parser.add_option('-H', '--host', type = 'string', dest = 'host', default = 'localhost',
help = 'Hostname or IP to connect to. Defaults to localhost if not specified.')
parser.add_option('-P', '--port', type = int, dest = 'port', default = 61613,
help = 'Port providing stomp protocol connections. Defaults to 61613 if not specified.')
parser.add_option('-U', '--user', type = 'string', dest = 'user', default = None,
help = 'Username for the connection')
parser.add_option('-W', '--password', type = 'string', dest = 'password', default = None,
help = 'Password for the connection')
parser.add_option('-F', '--file', type = 'string', dest = 'filename',
help = 'File containing commands to be executed, instead of prompting from the command prompt.')
(options, args) = parser.parse_args()
st = StompCLI(options.host, options.port, options.user, options.password)
try:
if not options.filename:
# If the readline module is available, make command input easier
try:
import readline
def stomp_completer(text, state):
for command in commands[state:]:
if command.startswith(text):
return "%s " % command
return None
readline.parse_and_bind("tab: complete")
readline.set_completer(stomp_completer)
readline.set_completer_delims("")
except ImportError:
pass # ignore unavailable readline module
while True:
line = input("\r> ")
if not line or line.lstrip().rstrip() == '':
continue
line = line.lstrip().rstrip()
if line.startswith('quit') or line.startswith('exit') or line.startswith('disconnect'):
break
split = line.split()
command = split[0]
if command in commands:
getattr(st, command)(split)
else:
print('Unrecognized command')
else:
st.run(['run', options.filename])
except EOFError:
pass
except KeyboardInterrupt:
pass
finally:
st.disconnect(None)
#
# command line testing
#
if __name__ == '__main__':
main()
| |
from __future__ import with_statement
from datetime import datetime, tzinfo
try:
import pytz
except ImportError:
pytz = None
from django.template import Node
from django.template import TemplateSyntaxError, Library
from note import timezone
register = Library()
# HACK: datetime is an old-style class, create a new-style equivalent
# so we can define additional attributes.
class datetimeobject(datetime, object):
pass
@register.filter
def timetag(value):
return value
# Template filters
@register.filter
def localtime(value):
"""
Converts a datetime to local time in the active time zone.
This only makes sense within a {% localtime off %} block.
"""
return do_timezone(value, timezone.get_current_timezone())
@register.filter
def utc(value):
"""
Converts a datetime to UTC.
"""
return do_timezone(value, timezone.utc)
@register.filter('timezone')
def do_timezone(value, arg):
"""
Converts a datetime to local time in a given time zone.
The argument must be an instance of a tzinfo subclass or a time zone name.
If it is a time zone name, pytz is required.
Naive datetimes are assumed to be in local time in the default time zone.
"""
if not isinstance(value, datetime):
return ''
# Obtain a timezone-aware datetime
try:
if timezone.is_naive(value):
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
# Filters must never raise exceptions, and pytz' exceptions inherit
# Exception directly, not a specific subclass. So catch everything.
except Exception:
return ''
# Obtain a tzinfo instance
if isinstance(arg, tzinfo):
tz = arg
elif isinstance(arg, basestring) and pytz is not None:
try:
tz = pytz.timezone(arg)
except pytz.UnknownTimeZoneError:
return ''
else:
return ''
# Convert and prevent further conversion
result = value.astimezone(tz)
if hasattr(tz, 'normalize'):
# available for pytz time zones
result = tz.normalize(result)
# HACK: the convert_to_local_time flag will prevent
# automatic conversion of the value to local time.
result = datetimeobject(result.year, result.month, result.day,
result.hour, result.minute, result.second,
result.microsecond, result.tzinfo)
result.convert_to_local_time = False
return result
# Template tags
class LocalTimeNode(Node):
"""
Template node class used by ``localtime_tag``.
"""
def __init__(self, nodelist, use_tz):
self.nodelist = nodelist
self.use_tz = use_tz
def render(self, context):
output = self.nodelist.render(context)
return output
class TimezoneNode(Node):
"""
Template node class used by ``timezone_tag``.
"""
def __init__(self, nodelist, tz):
self.nodelist = nodelist
self.tz = tz
def render(self, context):
with timezone.override(self.tz.resolve(context)):
output = self.nodelist.render(context)
return output
class GetCurrentTimezoneNode(Node):
"""
Template node class used by ``get_current_timezone_tag``.
"""
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = timezone.get_current_timezone_name()
return ''
@register.tag('localtime')
def localtime_tag(parser, token):
"""
Forces or prevents conversion of datetime objects to local time,
regardless of the value of ``settings.USE_TZ``.
Sample usage::
{% localtime off %}{{ value_in_utc }}{% endlocaltime %}
"""
bits = token.split_contents()
if len(bits) == 1:
use_tz = True
elif len(bits) > 2 or bits[1] not in ('on', 'off'):
raise TemplateSyntaxError("%r argument should be 'on' or 'off'" %
bits[0])
else:
use_tz = bits[1] == 'on'
nodelist = parser.parse(('endlocaltime',))
parser.delete_first_token()
return LocalTimeNode(nodelist, use_tz)
@register.tag('timezone')
def timezone_tag(parser, token):
"""
Enables a given time zone just for this block.
The ``timezone`` argument must be an instance of a ``tzinfo`` subclass, a
time zone name, or ``None``. If is it a time zone name, pytz is required.
If it is ``None``, the default time zone is used within the block.
Sample usage::
{% timezone "Europe/Paris" %}
It is {{ now }} in Paris.
{% endtimezone %}
"""
bits = token.split_contents()
if len(bits) != 2:
raise TemplateSyntaxError("'%s' takes one argument (timezone)" %
bits[0])
tz = parser.compile_filter(bits[1])
nodelist = parser.parse(('endtimezone',))
parser.delete_first_token()
return TimezoneNode(nodelist, tz)
@register.tag("get_current_timezone")
def get_current_timezone_tag(parser, token):
"""
Stores the name of the current time zone in the context.
Usage::
{% get_current_timezone as TIME_ZONE %}
This will fetch the currently active time zone and put its name
into the ``TIME_ZONE`` context variable.
"""
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_current_timezone' requires "
"'as variable' (got %r)" % args)
return GetCurrentTimezoneNode(args[2])
| |
from pytest import fixture, raises
from hoomd.conftest import pickling_check
from hoomd.pytest.dummy import DummyOperation, DummySimulation
from hoomd.operation import Operation
from hoomd.data.syncedlist import SyncedList, _PartialIsInstance
@fixture
def op_list():
return [DummyOperation(), DummyOperation(), DummyOperation()]
def test_init(op_list):
def validate(x):
return isinstance(x, DummyOperation)
# Test automatic to_synced_list function generation
synced_list = SyncedList(validation=validate)
assert synced_list._validate == validate
op = DummyOperation()
assert synced_list._to_synced_list_conversion(op) is op
# Test specified to_synced_list
def cpp_identity(x):
return x._cpp_obj
synced_list = SyncedList(validation=validate, to_synced_list=cpp_identity)
assert synced_list._to_synced_list_conversion == cpp_identity
op._cpp_obj = 2
assert synced_list._to_synced_list_conversion(op) == 2
# Test full initialziation
synced_list = SyncedList(validation=validate,
to_synced_list=cpp_identity,
iterable=op_list)
assert len(synced_list._list) == 3
assert all(op._added for op in synced_list)
@fixture
def synced_list_empty():
return SyncedList(_PartialIsInstance(Operation))
@fixture
def synced_list(synced_list_empty, op_list):
synced_list_empty.extend(op_list)
return synced_list_empty
def test_contains(synced_list_empty, op_list):
for op in op_list:
synced_list_empty._list.append(op)
assert op in synced_list_empty
new_op = DummyOperation()
print(id(new_op), [id(op) for op in synced_list_empty])
assert new_op not in synced_list_empty
def test_len(synced_list_empty, op_list):
synced_list_empty._list.extend(op_list)
assert len(synced_list_empty) == 3
del synced_list_empty._list[1]
assert len(synced_list_empty) == 2
def test_iter(synced_list, op_list):
for op, op2 in zip(synced_list, synced_list._list):
assert op is op2
def test_getitem(synced_list):
assert all([op is synced_list[i] for i, op in enumerate(synced_list)])
assert synced_list[:] == synced_list._list
assert synced_list[1:] == synced_list._list[1:]
def test_synced(synced_list):
assert not synced_list._synced
synced_list._sync(None, [])
assert synced_list._synced
def test_attach_value(synced_list):
op = DummyOperation()
synced_list._attach_value(op)
assert not op._attached
assert op._added
synced_list._sync(DummySimulation(), [])
op = DummyOperation()
synced_list._attach_value(op)
assert op._attached
assert op._added
def test_validate_or_error(synced_list):
with raises(ValueError):
synced_list._validate_or_error(3)
with raises(ValueError):
synced_list._validate_or_error(None)
with raises(ValueError):
synced_list._validate_or_error("hello")
assert synced_list._validate_or_error(DummyOperation())
def test_syncing(synced_list, op_list):
sync_list = []
synced_list._sync(None, sync_list)
assert len(sync_list) == 3
assert all([op is op2 for op, op2 in zip(synced_list, sync_list)])
assert all([op._attached for op in synced_list])
def test_unsync(synced_list, op_list):
sync_list = []
synced_list._sync(None, sync_list)
synced_list._unsync()
assert all([not op._attached for op in synced_list])
assert not hasattr(synced_list, "_synced_list")
def test_delitem(synced_list):
old_op = synced_list[2]
del synced_list[2]
assert len(synced_list) == 2
assert old_op not in synced_list
assert not old_op._added
synced_list.append(old_op)
old_ops = synced_list[1:]
del synced_list[1:]
assert len(synced_list) == 1
assert all(old_op not in synced_list for old_op in old_ops)
assert all(not old_op._added for old_op in old_ops)
synced_list.extend(old_ops)
# Tested attached
sync_list = []
synced_list._sync(None, sync_list)
old_op = synced_list[1]
del synced_list[1]
assert len(synced_list) == 2
assert len(sync_list) == 2
assert old_op not in synced_list
assert all(old_op is not op for op in sync_list)
assert not old_op._attached
assert not old_op._added
old_ops = synced_list[1:]
del synced_list[1:]
assert len(synced_list) == 1
assert all(old_op not in synced_list for old_op in old_ops)
assert all(not (old_op._added or old_op._attached) for old_op in old_ops)
def test_setitem(synced_list, op_list):
with raises(IndexError):
synced_list[3]
with raises(IndexError):
synced_list[-4]
new_op = DummyOperation()
synced_list[1] = new_op
assert new_op is synced_list[1]
assert new_op._added
# Check when attached
sync_list = []
# need a non-None dummy simulation 1 works
synced_list._sync(1, sync_list)
new_op = DummyOperation()
old_op = synced_list[1]
synced_list[1] = new_op
assert not (old_op._attached or old_op._added)
assert new_op._attached and new_op._added
assert sync_list[1] is new_op
def test_synced_iter(synced_list):
synced_list._simulation = None
synced_list._synced_list = [1, 2, 3]
assert all(
[i == j for i, j in zip(range(1, 4), synced_list._synced_iter())])
class OpInt(int):
"""Used to test SyncedList where item equality checks are needed."""
def _attach(self):
self._cpp_obj = None
@property
def _attached(self):
return hasattr(self, '_cpp_obj')
def _detach(self):
del self._cpp_obj
def _add(self, simulation):
self._simulation = simulation
def _remove(self):
del self._simulation
@property
def _added(self):
return hasattr(self, '_simulation')
@fixture
def int_synced_list(synced_list_empty):
return SyncedList(_PartialIsInstance(int),
iterable=[OpInt(i) for i in [1, 2, 3]])
def test_sync(int_synced_list):
int_synced_list.append(OpInt(4))
assert len(int_synced_list) == 4
assert int_synced_list[-1] == 4
# Test attached
sync_list = []
int_synced_list._sync(None, sync_list)
int_synced_list.append(OpInt(5))
assert len(int_synced_list) == 5
assert len(sync_list) == 5
assert int_synced_list[-1] == 5
def test_insert(int_synced_list):
index = 1
int_synced_list.insert(1, OpInt(4))
assert len(int_synced_list) == 4
assert int_synced_list[index] == 4
# Test attached
sync_list = []
int_synced_list._sync(None, sync_list)
int_synced_list.insert(index, OpInt(5))
assert len(int_synced_list) == 5
assert len(sync_list) == 5
assert int_synced_list[index] == 5
def test_extend(int_synced_list):
oplist = [OpInt(i) for i in range(4, 7)]
int_synced_list.extend(oplist)
assert len(int_synced_list) == 6
assert int_synced_list[3:] == oplist
# Test attached
oplist = [OpInt(i) for i in range(7, 10)]
sync_list = []
int_synced_list._sync(None, sync_list)
int_synced_list.extend(oplist)
assert len(int_synced_list) == 9
assert len(sync_list) == 9
assert sync_list[6:] == oplist
assert int_synced_list[6:] == oplist
def test_clear(int_synced_list):
int_synced_list.clear()
assert len(int_synced_list) == 0
oplist = [OpInt(i) for i in range(1, 4)]
int_synced_list.extend(oplist)
# Test attached
sync_list = []
int_synced_list._sync(None, sync_list)
int_synced_list.clear()
assert len(int_synced_list) == 0
assert len(sync_list) == 0
assert all([not op._attached for op in oplist])
def test_remove(int_synced_list):
int_synced_list.clear()
oplist = [OpInt(i) for i in range(1, 4)]
int_synced_list.extend(oplist)
int_synced_list.remove(oplist[1])
assert len(int_synced_list) == 2
assert oplist[1] not in int_synced_list
# Test attached
sync_list = []
int_synced_list._sync(None, sync_list)
int_synced_list.remove(oplist[0])
assert len(int_synced_list) == 1
assert len(sync_list) == 1
assert not oplist[0]._attached
assert oplist[0] not in int_synced_list
assert oplist[0] not in sync_list
def test_without_attaching():
synced_list = SyncedList(_PartialIsInstance(int),
iterable=[OpInt(i) for i in [1, 2, 3]],
attach_members=False)
synced_list.append(OpInt(4))
assert len(synced_list) == 4
assert synced_list[-1] == 4
# Test attached
sync_list = []
synced_list._sync(None, sync_list)
synced_list.append(OpInt(5))
assert len(synced_list) == 5
assert len(sync_list) == 5
assert synced_list[-1] == 5
assert all(not op._added for op in synced_list)
assert all(not op._attached for op in synced_list)
def test_pickling(synced_list):
pickling_check(synced_list)
| |
# -*- encoding: utf-8 -*-
from __future__ import print_function
import os
import signal
import time
from HPOlibConfigSpace import configuration_space
from autosklearn.data.abstract_data_manager import AbstractDataManager
from autosklearn.data.competition_data_manager import CompetitionDataManager
from autosklearn.evaluation import CVEvaluator, HoldoutEvaluator, \
NestedCVEvaluator, TestEvaluator, get_new_run_num
from autosklearn.util.pipeline import get_configuration_space
from autosklearn.util import Backend
def store_and_or_load_data(dataset_info, outputdir):
backend = Backend(None, outputdir)
try:
D = backend.load_datamanager()
except IOError:
D = None
# Datamanager probably doesn't exist
if D is None:
D = CompetitionDataManager(dataset_info, encode_labels=True)
backend.save_datamanager(D)
return D
# signal handler seem to work only if they are globally defined
# to give it access to the evaluator class, the evaluator name has to
# be a global name. It's not the cleanest solution, but works for now.
evaluator = None
def signal_handler(signum, frame):
print('Received signal %s. Aborting Training!' % str(signum))
global evaluator
evaluator.finish_up()
exit(0)
def empty_signal_handler(signum, frame):
print('Received Signal %s, but alread finishing up!' % str(signum))
signal.signal(15, signal_handler)
def _get_base_dict():
return {
'with_predictions': True,
'output_y_test': True,
}
def make_mode_holdout(data, seed, configuration, num_run, output_dir):
global evaluator
evaluator = HoldoutEvaluator(data, output_dir, configuration,
seed=seed,
num_run=num_run,
all_scoring_functions=False,
**_get_base_dict())
loss, opt_pred, valid_pred, test_pred = evaluator.fit_predict_and_loss()
evaluator.finish_up(loss, opt_pred, valid_pred, test_pred)
def make_mode_holdout_iterative_fit(data, seed, configuration, num_run,
output_dir):
global evaluator
evaluator = HoldoutEvaluator(data, output_dir, configuration,
seed=seed,
num_run=num_run,
all_scoring_functions=False,
**_get_base_dict())
evaluator.iterative_fit()
signal.signal(15, empty_signal_handler)
evaluator.finish_up()
def make_mode_test(data, seed, configuration, metric, output_dir):
global evaluator
evaluator = TestEvaluator(data, output_dir,
configuration,
seed=seed,
all_scoring_functions=True,
with_predictions=True
)
evaluator.fit()
signal.signal(15, empty_signal_handler)
scores, _, _, _ = evaluator.predict()
duration = time.time() - evaluator.starttime
score = scores[metric]
additional_run_info = ';'.join(['%s: %s' % (m_, value)
for m_, value in scores.items()])
additional_run_info += ';' + 'duration: ' + str(duration)
print('Result for ParamILS: %s, %f, 1, %f, %d, %s' %
('SAT', abs(duration), score, evaluator.seed,
additional_run_info))
def make_mode_cv(data, seed, configuration, num_run, folds, output_dir):
global evaluator
evaluator = CVEvaluator(data, output_dir, configuration,
cv_folds=folds,
seed=seed,
num_run=num_run,
all_scoring_functions=False,
**_get_base_dict())
loss, opt_pred, valid_pred, test_pred = evaluator.fit_predict_and_loss()
evaluator.finish_up(loss, opt_pred, valid_pred, test_pred)
def make_mode_partial_cv(data, seed, configuration, num_run, metric, fold,
folds, output_dir):
global evaluator
evaluator = CVEvaluator(data, output_dir, configuration,
cv_folds=folds,
seed=seed,
num_run=num_run,
all_scoring_functions=False,
**_get_base_dict())
loss, opt_pred, valid_pred, test_pred = \
evaluator.partial_fit_predict_and_loss(fold)
duration = time.time() - evaluator.starttime
additional_run_info = 'duration: ' + str(duration)
print(metric, loss, additional_run_info)
print('Result for ParamILS: %s, %f, 1, %f, %d, %s' %
('SAT', abs(duration), loss, evaluator.seed,
additional_run_info))
def make_mode_nested_cv(data, seed, configuration, num_run, inner_folds,
outer_folds, output_dir):
global evaluator
evaluator = NestedCVEvaluator(data, output_dir, configuration,
inner_cv_folds=inner_folds,
outer_cv_folds=outer_folds,
seed=seed,
all_scoring_functions=False,
num_run=num_run,
**_get_base_dict())
loss, opt_pred, valid_pred, test_pred = evaluator.fit_predict_and_loss()
evaluator.finish_up(loss, opt_pred, valid_pred, test_pred)
def main(dataset_info, mode, seed, params,
mode_args=None, output_dir=None):
"""This command line interface has three different operation modes:
* CV: useful for the Tweakathon
* 1/3 test split: useful to evaluate a configuration
* cv on 2/3 train split: useful to optimize hyperparameters in a training
mode before testing a configuration on the 1/3 test split.
It must by no means be used for the Auto part of the competition!
"""
if mode_args is None:
mode_args = {}
if output_dir is None:
output_dir = os.getcwd()
if not isinstance(dataset_info, AbstractDataManager):
D = store_and_or_load_data(dataset_info=dataset_info,
outputdir=output_dir)
else:
D = dataset_info
metric = D.info['metric']
num_run = None
if mode != 'test':
num_run = get_new_run_num()
if params is not None:
for key in params:
try:
params[key] = int(params[key])
except Exception:
try:
params[key] = float(params[key])
except Exception:
pass
cs = get_configuration_space(D.info)
configuration = configuration_space.Configuration(cs, params)
else:
configuration = None
if seed is not None:
seed = int(float(seed))
else:
seed = 1
global evaluator
if mode == 'holdout':
make_mode_holdout(D, seed, configuration, num_run, output_dir)
elif mode == 'holdout-iterative-fit':
make_mode_holdout_iterative_fit(D, seed, configuration, num_run,
output_dir)
elif mode == 'test':
make_mode_test(D, seed, configuration, metric, output_dir)
elif mode == 'cv':
make_mode_cv(D, seed, configuration, num_run, mode_args['folds'],
output_dir)
elif mode == 'partial-cv':
make_mode_partial_cv(D, seed, configuration, num_run,
metric, mode_args['fold'], mode_args['folds'],
output_dir)
elif mode == 'nested-cv':
make_mode_nested_cv(D, seed, configuration, num_run,
mode_args['inner_folds'], mode_args['outer_folds'],
output_dir)
else:
raise ValueError('Must choose a legal mode.')
| |
# Copyright 2012 Pedro Navarro Perez
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit tests for Windows Server 2012 OpenStack Cinder volume driver
"""
import os
import shutil
import tempfile
from oslo.config import cfg
import mox
from cinder import test
from cinder.image import image_utils
from cinder.openstack.common import fileutils
from cinder.tests.windows import db_fakes
from cinder.volume import configuration as conf
from cinder.volume.drivers.windows import constants
from cinder.volume.drivers.windows import vhdutils
from cinder.volume.drivers.windows import windows
from cinder.volume.drivers.windows import windows_utils
CONF = cfg.CONF
class TestWindowsDriver(test.TestCase):
def __init__(self, method):
super(TestWindowsDriver, self).__init__(method)
def setUp(self):
self.lun_path_tempdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.lun_path_tempdir)
super(TestWindowsDriver, self).setUp()
self.flags(
windows_iscsi_lun_path=self.lun_path_tempdir,
)
self._setup_stubs()
configuration = conf.Configuration(None)
configuration.append_config_values(windows.windows_opts)
self._driver = windows.WindowsDriver(configuration=configuration)
self._driver.do_setup({})
def _setup_stubs(self):
def fake_wutils__init__(self):
pass
windows_utils.WindowsUtils.__init__ = fake_wutils__init__
def fake_local_path(self, volume):
return os.path.join(CONF.windows_iscsi_lun_path,
str(volume['name']) + ".vhd")
def test_check_for_setup_errors(self):
drv = self._driver
self.mox.StubOutWithMock(windows_utils.WindowsUtils,
'check_for_setup_error')
windows_utils.WindowsUtils.check_for_setup_error()
self.mox.ReplayAll()
drv.check_for_setup_error()
def test_create_volume(self):
drv = self._driver
vol = db_fakes.get_fake_volume_info()
self.stubs.Set(drv, 'local_path', self.fake_local_path)
self.mox.StubOutWithMock(windows_utils.WindowsUtils,
'create_volume')
windows_utils.WindowsUtils.create_volume(self.fake_local_path(vol),
vol['name'], vol['size'])
self.mox.ReplayAll()
drv.create_volume(vol)
def test_delete_volume(self):
"""delete_volume simple test case."""
drv = self._driver
vol = db_fakes.get_fake_volume_info()
self.mox.StubOutWithMock(drv, 'local_path')
drv.local_path(vol).AndReturn(self.fake_local_path(vol))
self.mox.StubOutWithMock(windows_utils.WindowsUtils,
'delete_volume')
windows_utils.WindowsUtils.delete_volume(vol['name'],
self.fake_local_path(vol))
self.mox.ReplayAll()
drv.delete_volume(vol)
def test_create_snapshot(self):
drv = self._driver
self.mox.StubOutWithMock(windows_utils.WindowsUtils,
'create_snapshot')
volume = db_fakes.get_fake_volume_info()
snapshot = db_fakes.get_fake_snapshot_info()
self.stubs.Set(drv, 'local_path', self.fake_local_path(snapshot))
windows_utils.WindowsUtils.create_snapshot(volume['name'],
snapshot['name'])
self.mox.ReplayAll()
drv.create_snapshot(snapshot)
def test_create_volume_from_snapshot(self):
drv = self._driver
snapshot = db_fakes.get_fake_snapshot_info()
volume = db_fakes.get_fake_volume_info()
self.mox.StubOutWithMock(windows_utils.WindowsUtils,
'create_volume_from_snapshot')
windows_utils.WindowsUtils.\
create_volume_from_snapshot(volume, snapshot['name'])
self.mox.ReplayAll()
drv.create_volume_from_snapshot(volume, snapshot)
def test_delete_snapshot(self):
drv = self._driver
snapshot = db_fakes.get_fake_snapshot_info()
self.mox.StubOutWithMock(windows_utils.WindowsUtils,
'delete_snapshot')
windows_utils.WindowsUtils.delete_snapshot(snapshot['name'])
self.mox.ReplayAll()
drv.delete_snapshot(snapshot)
def test_create_export(self):
drv = self._driver
volume = db_fakes.get_fake_volume_info()
initiator_name = "%s%s" % (CONF.iscsi_target_prefix, volume['name'])
self.mox.StubOutWithMock(windows_utils.WindowsUtils,
'create_iscsi_target')
windows_utils.WindowsUtils.create_iscsi_target(initiator_name,
mox.IgnoreArg())
self.mox.StubOutWithMock(windows_utils.WindowsUtils,
'add_disk_to_target')
windows_utils.WindowsUtils.add_disk_to_target(volume['name'],
initiator_name)
self.mox.ReplayAll()
export_info = drv.create_export(None, volume)
self.assertEqual(export_info['provider_location'], initiator_name)
def test_initialize_connection(self):
drv = self._driver
volume = db_fakes.get_fake_volume_info()
initiator_name = "%s%s" % (CONF.iscsi_target_prefix, volume['name'])
connector = db_fakes.get_fake_connector_info()
self.mox.StubOutWithMock(windows_utils.WindowsUtils,
'associate_initiator_with_iscsi_target')
windows_utils.WindowsUtils.associate_initiator_with_iscsi_target(
volume['provider_location'], initiator_name, )
self.mox.StubOutWithMock(windows_utils.WindowsUtils,
'get_host_information')
windows_utils.WindowsUtils.get_host_information(
volume, volume['provider_location'])
self.mox.ReplayAll()
drv.initialize_connection(volume, connector)
def test_terminate_connection(self):
drv = self._driver
volume = db_fakes.get_fake_volume_info()
initiator_name = "%s%s" % (CONF.iscsi_target_prefix, volume['name'])
connector = db_fakes.get_fake_connector_info()
self.mox.StubOutWithMock(windows_utils.WindowsUtils,
'delete_iscsi_target')
windows_utils.WindowsUtils.delete_iscsi_target(
initiator_name, volume['provider_location'])
self.mox.ReplayAll()
drv.terminate_connection(volume, connector)
def test_ensure_export(self):
drv = self._driver
volume = db_fakes.get_fake_volume_info()
initiator_name = "%s%s" % (CONF.iscsi_target_prefix, volume['name'])
self.mox.StubOutWithMock(windows_utils.WindowsUtils,
'create_iscsi_target')
windows_utils.WindowsUtils.create_iscsi_target(initiator_name, True)
self.mox.StubOutWithMock(windows_utils.WindowsUtils,
'add_disk_to_target')
windows_utils.WindowsUtils.add_disk_to_target(volume['name'],
initiator_name)
self.mox.ReplayAll()
drv.ensure_export(None, volume)
def test_remove_export(self):
drv = self._driver
volume = db_fakes.get_fake_volume_info()
target_name = volume['provider_location']
self.mox.StubOutWithMock(windows_utils.WindowsUtils,
'remove_iscsi_target')
windows_utils.WindowsUtils.remove_iscsi_target(target_name)
self.mox.ReplayAll()
drv.remove_export(None, volume)
def test_copy_image_to_volume(self):
"""resize_image common case usage."""
drv = self._driver
volume = db_fakes.get_fake_volume_info()
fake_get_supported_type = lambda x: constants.VHD_TYPE_FIXED
self.stubs.Set(drv, 'local_path', self.fake_local_path)
self.stubs.Set(windows_utils.WindowsUtils, 'get_supported_vhd_type',
fake_get_supported_type)
self.mox.StubOutWithMock(os, 'makedirs')
self.mox.StubOutWithMock(os, 'unlink')
self.mox.StubOutWithMock(image_utils, 'create_temporary_file')
self.mox.StubOutWithMock(image_utils, 'fetch_to_vhd')
self.mox.StubOutWithMock(vhdutils.VHDUtils, 'convert_vhd')
self.mox.StubOutWithMock(vhdutils.VHDUtils, 'resize_vhd')
self.mox.StubOutWithMock(windows_utils.WindowsUtils,
'change_disk_status')
fake_temp_path = r'C:\fake\temp\file'
if (CONF.image_conversion_dir and not
os.path.exists(CONF.image_conversion_dir)):
os.makedirs(CONF.image_conversion_dir)
image_utils.create_temporary_file(suffix='.vhd').AndReturn(
fake_temp_path)
fake_volume_path = self.fake_local_path(volume)
image_utils.fetch_to_vhd(None, None, None,
fake_temp_path,
mox.IgnoreArg())
windows_utils.WindowsUtils.change_disk_status(volume['name'],
mox.IsA(bool))
os.unlink(mox.IsA(str))
vhdutils.VHDUtils.convert_vhd(fake_temp_path,
fake_volume_path,
constants.VHD_TYPE_FIXED)
vhdutils.VHDUtils.resize_vhd(fake_volume_path,
volume['size'] << 30)
windows_utils.WindowsUtils.change_disk_status(volume['name'],
mox.IsA(bool))
os.unlink(mox.IsA(str))
self.mox.ReplayAll()
drv.copy_image_to_volume(None, volume, None, None)
def _test_copy_volume_to_image(self, supported_format):
drv = self._driver
vol = db_fakes.get_fake_volume_info()
image_meta = db_fakes.get_fake_image_meta()
fake_get_supported_format = lambda x: supported_format
self.stubs.Set(drv, 'local_path', self.fake_local_path)
self.stubs.Set(windows_utils.WindowsUtils, 'get_supported_format',
fake_get_supported_format)
self.mox.StubOutWithMock(fileutils, 'delete_if_exists')
self.mox.StubOutWithMock(image_utils, 'upload_volume')
self.mox.StubOutWithMock(windows_utils.WindowsUtils, 'copy_vhd_disk')
self.mox.StubOutWithMock(vhdutils.VHDUtils, 'convert_vhd')
temp_vhd_path = os.path.join(CONF.image_conversion_dir,
str(image_meta['id']) + "." +
supported_format)
upload_image = temp_vhd_path
windows_utils.WindowsUtils.copy_vhd_disk(self.fake_local_path(vol),
temp_vhd_path)
if supported_format == 'vhdx':
upload_image = upload_image[:-1]
vhdutils.VHDUtils.convert_vhd(temp_vhd_path, upload_image,
constants.VHD_TYPE_DYNAMIC)
image_utils.upload_volume(None, None, image_meta, upload_image, 'vpc')
fileutils.delete_if_exists(temp_vhd_path)
fileutils.delete_if_exists(upload_image)
self.mox.ReplayAll()
drv.copy_volume_to_image(None, vol, None, image_meta)
def test_copy_volume_to_image_using_vhd(self):
self._test_copy_volume_to_image('vhd')
def test_copy_volume_to_image_using_vhdx(self):
self._test_copy_volume_to_image('vhdx')
def test_create_cloned_volume(self):
drv = self._driver
volume = db_fakes.get_fake_volume_info()
volume_cloned = db_fakes.get_fake_volume_info_cloned()
self.mox.StubOutWithMock(windows_utils.WindowsUtils,
'create_volume')
self.stubs.Set(drv, 'local_path', self.fake_local_path)
windows_utils.WindowsUtils.create_volume(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
self.mox.StubOutWithMock(windows_utils.WindowsUtils,
'copy_vhd_disk')
windows_utils.WindowsUtils.copy_vhd_disk(self.fake_local_path(
volume_cloned), self.fake_local_path(volume))
self.mox.ReplayAll()
drv.create_cloned_volume(volume, volume_cloned)
def test_extend_volume(self):
drv = self._driver
volume = db_fakes.get_fake_volume_info()
TEST_VOLUME_ADDITIONAL_SIZE_MB = 1024
TEST_VOLUME_ADDITIONAL_SIZE_GB = 1
self.mox.StubOutWithMock(windows_utils.WindowsUtils, 'extend')
windows_utils.WindowsUtils.extend(volume['name'],
TEST_VOLUME_ADDITIONAL_SIZE_MB)
new_size = volume['size'] + TEST_VOLUME_ADDITIONAL_SIZE_GB
self.mox.ReplayAll()
drv.extend_volume(volume, new_size)
| |
#!/usr/bin/env python2.7
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command-line tool for interacting with the Google Consumer Surveys API.
To run, generate a client secret using https://console.developers.google.com/
under the APIs and Auth Tab for your project. Then download the JSON object
and save it as client_secrets.json
For more instructions on how to obtain the local files necessary for OAuth
authorization, please see https://github.com/google/consumer-surveys
Download and install the python Google Oauth Library:
https://code.google.com/p/google-api-python-client/downloads/list
Or install it with PIP:
$ pip install google-api-python-client
To create a survey:
$ ./example_client.py create --owner_email <email1> <email2> \
--client_secrets_file <file>
To set the number of desired responses on a survey:
$ ./example_client.py set_num_responses --survey_id <id> \
--client_secrets_file <file>
To start the survey:
$ ./example_client.py start --survey_id <id> --client_secrets_file <file>
To download survey results:
$ ./example_client.py fetch --survey_id <id> --results_file=~/my_results.xls \
--client_secrets_file <file>
Alternatively, to download survey results with a Service Account:
$ ./example_client.py fetch --survey_id <id> --results_file=~/my_results.xls \
--service_account <email> --service_account_secrets_file <file>
"""
import argparse
import httplib2
import json
import os
from apiclient.discovery import build_from_document
import googleapiclient
from oauth2client import client
from oauth2client import clientsecrets
from oauth2client import tools
from oauth2client import file as oauth_file
from oauth2client.client import flow_from_clientsecrets
from oauth2client.client import OAuth2Credentials
from oauth2client.client import SignedJwtAssertionCredentials
_SERVICE_ACCOUNT_SECRETS = 'robot_account_secret.json'
_OAUTH_CLIENT_SECRETS = 'client_secrets.json'
OAUTH2_STORAGE = 'oauth2.dat'
SCOPES = [
'https://www.googleapis.com/auth/consumersurveys',
'https://www.googleapis.com/auth/consumersurveys.readonly',
'https://www.googleapis.com/auth/userinfo.email',
]
# Constants that enumerate the various operations that the client allows.
# Create a new survey.
_CREATE = 'create'
# Set the desired response count of an existing survey.
_SET_RESPONSE_COUNT = 'set_response_count'
# List the surveys that this user owns.
_LIST = 'list'
# Fetch the results of an existing survey.
_FETCH = 'fetch'
# Fetch the results of an existing survey.
_START = 'start'
_OPERATIONS = [
_CREATE,
_SET_RESPONSE_COUNT,
_START,
_FETCH,
_LIST,
]
_DESCRIPTION = """
You must choose one of the following operations:
- create: To create a new survey.
- set_response_count: Set the number of desired responses for a given survey.
- start: Start the given survey.
- fetch: Fetch the results in .xls format for a given survey.
- list: List the surveys that are owned by this user.
For a full list of available flags, use the --help flag.
"""
def main():
parser = argparse.ArgumentParser(
usage=_DESCRIPTION,
)
parser.add_argument('operation', choices=_OPERATIONS,
help='The operation to perform.')
parser.add_argument('--survey_id',
help='Survey ID to operate on.')
parser.add_argument('--owner_emails',
nargs='+',
help='List of survey owners (space separated) for a '
'new survey.')
parser.add_argument('--results_file',
default='results.xls',
help='filename to store excel results.')
# Service Account flags.
parser.add_argument('--service_account',
help='Service account email to use. Make sure that '
'--service_account_secrets_file is set correctly '
'.')
parser.add_argument('--service_account_secrets_file',
default=_SERVICE_ACCOUNT_SECRETS,
help='Path to the Service Account secrets JSON file.')
# OAuth2 client ID flags.
parser.add_argument('--client_secrets_file',
default=_OAUTH_CLIENT_SECRETS,
help='Path to the OAuth client secrets JSON file.')
# Arguments required by tools.run_flow
parser.add_argument('--logging_level',
default='INFO',
help='default logging level to use.')
parser.add_argument('--auth_host_name',
default='localhost',
help='Hostname for redirects during the OAuth flow.')
parser.add_argument('--auth_host_port',
default=[8080],
help='Port for redirects during the OAuth flow.')
parser.add_argument('--noauth_local_webserver',
action='store_true',
default=False,
help='Run a local webserver to handle redirects.')
args = parser.parse_args()
try:
auth_http = setup_auth(args)
except clientsecrets.InvalidClientSecretsError, e:
print ('Unable to setup authorization with the given credentials. %s'
% e)
return
# Load the local copy of the discovery document
f = file(os.path.join(
os.path.dirname(__file__),
"consumersurveys_v2beta_discovery.json"), "r")
discovery_file = f.read()
f.close()
# Construct a service from the local documents
try:
cs = build_from_document(service=discovery_file, http=auth_http)
except ValueError, e:
print 'Error parsing discovery file "%s": %s' % (f.name, e)
return
if args.operation == _CREATE:
if not args.owner_emails:
parser.error('--owner_emails is required for this operation.')
survey = create_survey(cs, args.owner_emails)
if not survey:
parser.exit(status=1, message='Failed to create survey.\n')
print 'Successully created survey with id %s\n' % survey['surveyUrlId']
print 'Once started, survey results will be visible here:'
print ('https://www.google.com/insights/consumersurveys/view'
'?survey=%s\n' % survey['surveyUrlId'])
if args.operation == _START:
if not args.survey_id:
parser.error('--survey_id is required for this operation.')
start_survey(cs, args.survey_id)
print 'You can view results for the survey here:'
print ('https://www.google.com/insights/consumersurveys/view'
'?survey=%s\n' % args.survey_id)
if args.operation == _FETCH:
if not args.survey_id:
parser.error('--survey_id is required for this operation.')
get_survey_results(
cs,
args.survey_id,
args.results_file)
print 'You can also view the survey results here:'
print ('https://www.google.com/insights/consumersurveys/view'
'?survey=%s\n' % args.survey_id)
if args.operation == _SET_RESPONSE_COUNT:
if not args.survey_id:
parser.error('--survey_id is required for this operation.')
update_survey_response_count(cs, args.survey_id, 120)
if args.operation == _LIST:
list_surveys(cs)
def get_survey(cs, survey_id):
return cs.surveys().get(surveyUrlId=survey_id).execute()
def list_surveys(cs):
"""Prints the surveys that are owned by the given user.
Args:
cs: The Consumer Surveys Service used to send the HTTP requests.
Returns:
A dictionary containing the survey id of the started survey.
"""
results = cs.surveys().list().execute()
for s in results.resources:
print '%s' % s.surveyUrlId
def start_survey(cs, survey_id):
"""Sends the survey to the review process and it is then started.
Args:
cs: The Consumer Surveys Service used to send the HTTP requests.
survey_id: The survey id for which we are starting the survey.
Returns:
A dictionary containing the survey id of the started survey.
"""
json_spec = {'state': 'running'}
return cs.surveys().update(
surveyUrlId=survey_id, body=json_spec).execute()
def get_survey_results(cs, survey_id, result_file):
"""Writes the survey results into a xls file.
Args:
cs: The Consumer survey service used to send the HTTP requests.
survey_id: The survey id for which we are downloading the
survey results for.
result_file: The file name which we write the survey results to.
"""
f = open(result_file, 'wb')
f.write(cs.results().get_media(surveyUrlId=survey_id).execute())
print 'Successfully wrote survey %s results to %s\n' % (survey_id,
result_file)
def create_survey(cs, owner_emails):
"""Creates a new survey using a json object containing necessary
survey fields.
Args:
cs: The consumer survey service used to send the HTTP requests.
owner_emails: The list of owners that will be in the newly created
survey.
Returns:
A dictionary containing the survey id of the created survey.
"""
body_def = {
'title': 'Phone purchase survey',
'description': 'What phones do people buy and how much do they pay?',
'owners': owner_emails,
'wantedResponseCount': 100,
'audience': {
'country': 'US',
},
'questions': [
{
'lowValueLabel': '1',
'openTextPlaceholder': 'enter amount here',
'question': 'How much did you pay for your last phone?',
'singleLineResponse': True,
'type': 'openNumericQuestion',
'unitOfMeasurementLabel': '$',
'unitsPosition': 'before',
}
]
}
try:
survey = cs.surveys().insert(body=body_def).execute()
except googleapiclient.errors.HttpError, e:
print 'Error creating the survey: %s\n' % e
return None
return survey
def update_survey_response_count(cs, survey_id, new_response_count):
"""Updated the response count of the survey.
Args:
cs: The cunsumer survey service used to send the HTTP requests.
survey_id: The survey id for which we are updating the response count
for.
new_response_count: An integer specifing the new response count for
the survey.
Returns:
A dictionary containing the survey id of the updated survey.
"""
body_def = {'wantedResponseCount': new_response_count}
return cs.surveys().update(surveyUrlId=survey_id, body=body_def).execute()
def setup_auth(args):
"""Set up and authentication httplib.
Args:
args: ArgumentParser with additional command-line flags to pass to
the OAuth authentication flow.
Returns:
An http client library with authentication enabled.
"""
# Perform OAuth 2.0 authorization.
if args.service_account:
# Service accounts will follow the following authenication.
client_email = args.service_account
with open(args.service_account_secrets_file) as f:
private_key = json.loads(f.read())['private_key']
credentials = client.SignedJwtAssertionCredentials(client_email,
private_key,
scope=SCOPES)
else:
flow = flow_from_clientsecrets(args.client_secrets_file, scope=SCOPES)
storage = oauth_file.Storage(OAUTH2_STORAGE)
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = tools.run_flow(flow, storage, args)
http = httplib2.Http()
return credentials.authorize(http)
if __name__ == '__main__':
main()
| |
import logging
from PySide import QtGui, QtCore
from PySide.QtGui import QMenu, QMenuBar, QToolBar, QAction
from juma.core import signals, app
from Menu import MenuManager
from juma.qt.IconCache import getIcon
class ToolBarItem(object):
def __init__( self, name, **option ):
option = option or {}
self.name = name.lower()
self.label = option.get( 'label', name )
self.priority = option.get( 'priority', 0 )
self.shortcut = option.get( 'shortcut', False )
self.cmd = option.get( 'command', None )
self.cmdArgs = option.get( 'command_args', None )
self.groupId = option.get( 'group', None )
iconName = option.get( 'icon', None )
self.icon = iconName and getIcon( iconName ) or None
self.parent = None
self.owner = None
self.onClick = None
self.signal = None
self.itemType = False
widget = option.get( 'widget', None )
menuLink = option.get( 'menu_link')
if widget:
self.qtAction = QtGui.QWidgetAction( None )
self.qtAction.setDefaultWidget( widget )
elif menuLink:
m = MenuManager.get().find( menuLink )
if m and hasattr( m, 'qtaction' ):
self.qtAction = m.qtaction
else:
logging.error( 'not valid menu link:' + menuLink )
self.qtAction = QtGui.QAction( self.label, None )
else:
self.itemType = option.get( 'type', False )
self.onClick = option.get( 'on_click', None )
self.signal = None
self.qtAction = QtGui.QAction(
self.label, None,
checkable = self.itemType == 'check',
triggered = self.handleEvent,
shortcut = self.shortcut
)
if self.icon:
self.qtAction.setIcon( self.icon )
def setEnabled( self, enabled = True ):
self.qtAction.setEnabled( enabled )
def getAction( self ):
return self.qtAction
def getValue(self):
if self.itemType in ('check','radio'):
return self.qtAction.isChecked()
return True
def setValue( self, value ):
if self.itemType in ('check','radio'):
self.qtAction.setChecked( value and True or False )
def getOwner( self ):
if self.owner: return self.owner
if self.parent: return self.parent.getOwner()
return None
def handleEvent( self ):
value = self.getValue()
owner = self.getOwner()
if owner:
owner.onTool( self )
if self.signal:
self.signal( value )
if self.onClick != None:
self.onClick( value )
if self.cmd:
args = self.cmdArgs or {}
app.doCommand( self.cmd, **args )
def trigger( self ):
if self.qtAction:
self.qtAction.trigger()
class ToolBarNode(object):
"""docstring for ToolBar"""
def __init__(self, name, qtToolbar, **option):
self.name = name or ''
assert isinstance( qtToolbar, QToolBar )
self.qtToolbar = qtToolbar
self.items = {}
self.groups = {}
self.owner = None
if not hasattr( qtToolbar, '_icon_size' ):
iconSize = option.get( 'icon_size', 16 )
qtToolbar.setIconSize( QtCore.QSize( iconSize, iconSize ) )
def affirmGroup( self, id ):
group = self.groups.get( id, None )
if not group:
group = QtGui.QActionGroup( self.qtToolbar )
self.groups[ id ] = group
return group
def addTools( self, dataList ):
for data in dataList:
if data == '----':
self.addTool( data )
elif isinstance( data, dict ):
name = data.get( 'name', None )
if name:
self.addTool( **data )
def addTool( self, name, **option ):
if name == '----':
self.qtToolbar.addSeparator()
return
item = ToolBarItem( name, **option )
self.items[ name ] = item
self.qtToolbar.addAction( item.qtAction )
item.parent = self
if item.groupId:
group = self.affirmGroup( item.groupId )
group.addAction( item.qtAction )
return item
def addWidget( self, widget ):
return self.qtToolbar.addWidget( widget )
def getQtToolBar( self ):
return self.qtToolbar
def addSeparator( self ):
self.qtToolbar.addSeparator()
def getTool( self, name ):
return self.items.get( name, None )
def removeTool( self, name ):
tool = self.getTool( name )
if tool:
self.qtToolbar.removeAction( tool.qtAction )
del self.items[ name ]
def enableTool( self, name, enabled = True ):
tool = self.getTool( name )
if tool:
tool.setEnabled( enabled )
def setEnabled( self, enabled = True ):
self.qtToolbar.setEnabled( enabled )
def setValue( self, value ):
pass
def getOwner( self ):
return self.owner
class ToolBarManager(object):
"""docstring for ToolBarManager"""
_singleton = None
@staticmethod
def get():
return ToolBarManager._singleton
def __init__(self):
assert not ToolBarManager._singleton
ToolBarManager._singleton = self
self.toolbars = {}
def addToolBar( self, name, toolbar, owner, **option ):
tb = ToolBarNode( name, toolbar, **option )
tb.owner = owner
if name:
self.toolbars[ name ] = tb
return tb
def find( self, path ):
blobs = path.split('/')
l = len(blobs)
if l< 1 or l > 2:
logging.error( 'invalid toolbar path' + path )
return None
toolbar = self.toolbars.get( blobs[0] )
if l == 2 :
return toolbar and toolbar.getTool( blobs[1] ) or None
return toolbar
def addTool( self, path, option = {}, owner = None ):
blobs = path.split('/')
if len(blobs) != 2:
logging.error( 'invalid toolbar item path' + path )
return None
toolbar = self.find( blobs[0] )
if toolbar:
tool = toolbar.addTool( blobs[1], **option )
if tool: tool.owner = owner
return tool
logging.error( 'toolbar not found:' + blobs[0] )
return None
def enableTool( self, path, enabled = True ):
tool = self.find( path )
if tool:
tool.setEnabled( enabled )
else:
logging.error( 'toolbar/tool not found:' + path )
ToolBarManager()
| |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for fedjax.legacy.training.federated_experiment."""
import glob
import os.path
from absl.testing import absltest
from fedjax.core import client_datasets
from fedjax.core import client_samplers
from fedjax.core import federated_algorithm
from fedjax.core import in_memory_federated_data
from fedjax.core import metrics
from fedjax.core import models
from fedjax.training import federated_experiment
import jax
import numpy as np
import numpy.testing as npt
class FakeClientSampler(client_samplers.ClientSampler):
"""Sequentially creates single client samples."""
def __init__(self, base=0):
self._base = base
self._round_num = 0
def set_round_num(self, round_num):
self._round_num = round_num
def sample(self):
client_id = self._round_num + self._base
dataset = client_datasets.ClientDataset({
'x': np.array([client_id], dtype=np.int32),
'y': np.array([client_id], dtype=np.int32) % 2
})
rng = None
self._round_num += 1
return [(client_id, dataset, rng)]
def fake_algorithm():
"""Counts the number of clients and sums up the 'x' feature."""
def init():
# num_clients, sum_values
return 0, 0
def apply(state, clients):
num_clients, sum_values = state
for _, dataset, _ in clients:
num_clients += 1
for x in dataset.all_examples()['x']:
sum_values += x
state = num_clients, sum_values
return state, None
return federated_algorithm.FederatedAlgorithm(init, apply)
class FakeEvaluationFn(federated_experiment.EvaluationFn):
def __init__(self, test, expected_state):
self._test = test
self._expected_state = expected_state
def __call__(self, state, round_num):
self._test.assertEqual(state, self._expected_state[round_num])
return {'round_num': round_num}
class FakeTrainClientsEvaluationFn(federated_experiment.TrainClientsEvaluationFn
):
def __init__(self, test, expected_state, expected_client_ids):
self._test = test
self._expected_state = expected_state
self._expected_client_ids = expected_client_ids
def __call__(self, state, round_num, train_clients):
self._test.assertEqual(state, self._expected_state[round_num])
client_ids = [i[0] for i in train_clients]
self._test.assertCountEqual(client_ids,
self._expected_client_ids[round_num])
return {'round_num': round_num}
class RunFederatedExperimentTest(absltest.TestCase):
def test_no_eval(self):
config = federated_experiment.FederatedExperimentConfig(
root_dir=self.create_tempdir(), num_rounds=5)
client_sampler = FakeClientSampler()
algorithm = fake_algorithm()
state = federated_experiment.run_federated_experiment(
algorithm=algorithm,
init_state=algorithm.init(),
client_sampler=client_sampler,
config=config)
self.assertEqual(state, (5, 15))
def test_checkpoint(self):
with self.subTest('checkpoint init'):
config = federated_experiment.FederatedExperimentConfig(
root_dir=self.create_tempdir(), num_rounds=5, checkpoint_frequency=3)
client_sampler = FakeClientSampler()
algorithm = fake_algorithm()
state = federated_experiment.run_federated_experiment(
algorithm=algorithm,
init_state=(1, -1),
client_sampler=client_sampler,
config=config)
self.assertEqual(state, (6, 14))
self.assertCountEqual(
glob.glob(os.path.join(config.root_dir, 'checkpoint_*')),
[os.path.join(config.root_dir, 'checkpoint_00000003')])
with self.subTest('checkpoint restore'):
# Restored state is (4, 5). FakeSampler produces clients [5, 6].
state = federated_experiment.run_federated_experiment(
algorithm=algorithm,
init_state=None,
client_sampler=FakeClientSampler(1),
config=config)
self.assertEqual(state, (6, 16))
self.assertCountEqual(
glob.glob(os.path.join(config.root_dir, 'checkpoint_*')),
[os.path.join(config.root_dir, 'checkpoint_00000004')])
def test_periodic_eval_fn_map(self):
config = federated_experiment.FederatedExperimentConfig(
root_dir=self.create_tempdir(), num_rounds=5, eval_frequency=3)
client_sampler = FakeClientSampler()
algorithm = fake_algorithm()
expected_state = {1: (1, 1), 3: (3, 6)}
expected_client_ids = {1: [1], 3: [3]}
state = federated_experiment.run_federated_experiment(
algorithm=algorithm,
init_state=algorithm.init(),
client_sampler=client_sampler,
config=config,
periodic_eval_fn_map={
'test_eval':
FakeEvaluationFn(self, expected_state),
'train_eval':
FakeTrainClientsEvaluationFn(self, expected_state,
expected_client_ids)
})
self.assertEqual(state, (5, 15))
self.assertCountEqual(
glob.glob(os.path.join(config.root_dir, '*eval*')), [
os.path.join(config.root_dir, 'test_eval'),
os.path.join(config.root_dir, 'train_eval')
])
def test_final_eval_fn_map(self):
config = federated_experiment.FederatedExperimentConfig(
root_dir=self.create_tempdir(), num_rounds=5, eval_frequency=3)
client_sampler = FakeClientSampler()
algorithm = fake_algorithm()
expected_state = {5: (5, 15)}
state = federated_experiment.run_federated_experiment(
algorithm=algorithm,
init_state=algorithm.init(),
client_sampler=client_sampler,
config=config,
final_eval_fn_map={
'final_eval': FakeEvaluationFn(self, expected_state)
})
self.assertEqual(state, (5, 15))
self.assertCountEqual(
glob.glob(os.path.join(config.root_dir, 'final_eval.tsv')),
[os.path.join(config.root_dir, 'final_eval.tsv')])
def fake_model():
def apply_for_eval(params, example):
del params
return jax.nn.one_hot(example['x'] % 3, 3)
eval_metrics = {'accuracy': metrics.Accuracy()}
return models.Model(
init=None,
apply_for_train=None,
apply_for_eval=apply_for_eval,
train_loss=None,
eval_metrics=eval_metrics)
class FakeState:
params = None
class EvaluationFnsTest(absltest.TestCase):
def test_model_sample_clients_evaluation_fn(self):
eval_fn = federated_experiment.ModelSampleClientsEvaluationFn(
FakeClientSampler(), fake_model(),
client_datasets.PaddedBatchHParams(batch_size=4))
state = FakeState()
npt.assert_equal(eval_fn(state, 1), {'accuracy': np.array(1.)})
npt.assert_equal(eval_fn(state, 2), {'accuracy': np.array(0.)})
npt.assert_equal(eval_fn(state, 3), {'accuracy': np.array(0.)})
npt.assert_equal(eval_fn(state, 4), {'accuracy': np.array(0.)})
npt.assert_equal(eval_fn(state, 5), {'accuracy': np.array(0.)})
npt.assert_equal(eval_fn(state, 6), {'accuracy': np.array(1.)})
def test_model_full_evaluation_fn(self):
sampler = FakeClientSampler()
sampler.set_round_num(1)
clients = [sampler.sample()[0] for _ in range(4)]
fd = in_memory_federated_data.InMemoryFederatedData(
dict((k, v.all_examples()) for k, v, _ in clients))
eval_fn = federated_experiment.ModelFullEvaluationFn(
fd, fake_model(), client_datasets.PaddedBatchHParams(batch_size=4))
state = FakeState()
npt.assert_equal(eval_fn(state, 1), {'accuracy': np.array(0.25)})
npt.assert_equal(eval_fn(state, 100), {'accuracy': np.array(0.25)})
def test_model_train_clients_evaluation_fn(self):
sampler = FakeClientSampler()
sampler.set_round_num(1)
clients = [sampler.sample()[0] for _ in range(4)]
eval_fn = federated_experiment.ModelTrainClientsEvaluationFn(
fake_model(), client_datasets.PaddedBatchHParams(batch_size=4))
state = FakeState()
npt.assert_equal(eval_fn(state, 1, clients), {'accuracy': np.array(0.25)})
npt.assert_equal(eval_fn(state, 100, clients), {'accuracy': np.array(0.25)})
if __name__ == '__main__':
absltest.main()
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# from ceilometer project
# https://github.com/openstack/ceilometer : ceilometer/openstack/common/setup.py
# Copyright 2011 OpenStack Foundation.
# Copyright 2012-2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utilities with minimum-depends for use in setup.py
"""
import email
import os
import re
import subprocess
import sys
from setuptools.command import sdist
def parse_mailmap(mailmap='.mailmap'):
mapping = {}
if os.path.exists(mailmap):
with open(mailmap, 'r') as fp:
for l in fp:
try:
canonical_email, alias = re.match(
r'[^#]*?(<.+>).*(<.+>).*', l).groups()
except AttributeError:
continue
mapping[alias] = canonical_email
return mapping
def _parse_git_mailmap(git_dir, mailmap='.mailmap'):
mailmap = os.path.join(os.path.dirname(git_dir), mailmap)
return parse_mailmap(mailmap)
def canonicalize_emails(changelog, mapping):
"""Takes in a string and an email alias mapping and replaces all
instances of the aliases in the string with their real email.
"""
for alias, email_address in mapping.iteritems():
changelog = changelog.replace(alias, email_address)
return changelog
# Get requirements from the first file that exists
def get_reqs_from_files(requirements_files):
for requirements_file in requirements_files:
if os.path.exists(requirements_file):
with open(requirements_file, 'r') as fil:
return fil.read().split('\n')
return []
def parse_requirements(requirements_files=['requirements.txt',
'tools/pip-requires']):
requirements = []
for line in get_reqs_from_files(requirements_files):
# For the requirements list, we need to inject only the portion
# after egg= so that distutils knows the package it's looking for
# such as:
# -e git://github.com/openstack/nova/master#egg=nova
if re.match(r'\s*-e\s+', line):
requirements.append(re.sub(r'\s*-e\s+.*#egg=(.*)$', r'\1',
line))
# such as:
# http://github.com/openstack/nova/zipball/master#egg=nova
elif re.match(r'\s*https?:', line):
requirements.append(re.sub(r'\s*https?:.*#egg=(.*)$', r'\1',
line))
# -f lines are for index locations, and don't get used here
elif re.match(r'\s*-f\s+', line):
pass
# argparse is part of the standard library starting with 2.7
# adding it to the requirements list screws distro installs
elif line == 'argparse' and sys.version_info >= (2, 7):
pass
else:
requirements.append(line)
return requirements
def parse_dependency_links(requirements_files=['requirements.txt',
'tools/pip-requires']):
dependency_links = []
# dependency_links inject alternate locations to find packages listed
# in requirements
for line in get_reqs_from_files(requirements_files):
# skip comments and blank lines
if re.match(r'(\s*#)|(\s*$)', line):
continue
# lines with -e or -f need the whole line, minus the flag
if re.match(r'\s*-[ef]\s+', line):
dependency_links.append(re.sub(r'\s*-[ef]\s+', '', line))
# lines that are only urls can go in unmolested
elif re.match(r'\s*https?:', line):
dependency_links.append(line)
return dependency_links
def _run_shell_command(cmd, throw_on_error=False):
if os.name == 'nt':
output = subprocess.Popen(["cmd.exe", "/C", cmd],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
else:
output = subprocess.Popen(["/bin/sh", "-c", cmd],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out = output.communicate()
if output.returncode and throw_on_error:
raise Exception("%s returned %d" % cmd, output.returncode)
if len(out) == 0:
return None
if len(out[0].strip()) == 0:
return None
return out[0].strip()
def _get_git_directory():
parent_dir = os.path.dirname(__file__)
while True:
git_dir = os.path.join(parent_dir, '.git')
if os.path.exists(git_dir):
return git_dir
parent_dir, child = os.path.split(parent_dir)
if not child: # reached to root dir
return None
def write_git_changelog():
"""Write a changelog based on the git changelog."""
new_changelog = 'ChangeLog'
git_dir = _get_git_directory()
if not os.getenv('SKIP_WRITE_GIT_CHANGELOG'):
if git_dir:
git_log_cmd = 'git --git-dir=%s log' % git_dir
changelog = _run_shell_command(git_log_cmd)
mailmap = _parse_git_mailmap(git_dir)
with open(new_changelog, "w") as changelog_file:
changelog_file.write(canonicalize_emails(changelog, mailmap))
else:
open(new_changelog, 'w').close()
def generate_authors():
"""Create AUTHORS file using git commits."""
jenkins_email = 'jenkins@review.(openstack|stackforge).org'
old_authors = 'AUTHORS.in'
new_authors = 'AUTHORS'
git_dir = _get_git_directory()
if not os.getenv('SKIP_GENERATE_AUTHORS'):
if git_dir:
# don't include jenkins email address in AUTHORS file
git_log_cmd = ("git --git-dir=" + git_dir +
" log --format='%aN <%aE>' | sort -u | "
"egrep -v '" + jenkins_email + "'")
changelog = _run_shell_command(git_log_cmd)
signed_cmd = ("git --git-dir=" + git_dir +
" log | grep -i Co-authored-by: | sort -u")
signed_entries = _run_shell_command(signed_cmd)
if signed_entries:
new_entries = "\n".join(
[signed.split(":", 1)[1].strip()
for signed in signed_entries.split("\n") if signed])
changelog = "\n".join((changelog, new_entries))
mailmap = _parse_git_mailmap(git_dir)
with open(new_authors, 'w') as new_authors_fh:
new_authors_fh.write(canonicalize_emails(changelog, mailmap))
if os.path.exists(old_authors):
with open(old_authors, "r") as old_authors_fh:
new_authors_fh.write('\n' + old_authors_fh.read())
else:
open(new_authors, 'w').close()
_rst_template = """%(heading)s
%(underline)s
.. automodule:: %(module)s
:members:
:undoc-members:
:show-inheritance:
"""
def get_cmdclass():
"""Return dict of commands to run from setup.py."""
cmdclass = dict()
def _find_modules(arg, dirname, files):
for filename in files:
if filename.endswith('.py') and filename != '__init__.py':
arg["%s.%s" % (dirname.replace('/', '.'),
filename[:-3])] = True
class LocalSDist(sdist.sdist):
"""Builds the ChangeLog and Authors files from VC first."""
def run(self):
write_git_changelog()
generate_authors()
# sdist.sdist is an old style class, can't use super()
sdist.sdist.run(self)
cmdclass['sdist'] = LocalSDist
# If Sphinx is installed on the box running setup.py,
# enable setup.py to build the documentation, otherwise,
# just ignore it
try:
from sphinx.setup_command import BuildDoc
class LocalBuildDoc(BuildDoc):
builders = ['html', 'man']
def generate_autoindex(self):
print "**Autodocumenting from %s" % os.path.abspath(os.curdir)
modules = {}
option_dict = self.distribution.get_option_dict('build_sphinx')
source_dir = os.path.join(option_dict['source_dir'][1], 'api')
if not os.path.exists(source_dir):
os.makedirs(source_dir)
for pkg in self.distribution.packages:
if '.' not in pkg:
os.path.walk(pkg, _find_modules, modules)
module_list = modules.keys()
module_list.sort()
autoindex_filename = os.path.join(source_dir, 'autoindex.rst')
with open(autoindex_filename, 'w') as autoindex:
autoindex.write(""".. toctree::
:maxdepth: 1
""")
for module in module_list:
output_filename = os.path.join(source_dir,
"%s.rst" % module)
heading = "The :mod:`%s` Module" % module
underline = "=" * len(heading)
values = dict(module=module, heading=heading,
underline=underline)
print "Generating %s" % output_filename
with open(output_filename, 'w') as output_file:
output_file.write(_rst_template % values)
autoindex.write(" %s.rst\n" % module)
def run(self):
if not os.getenv('SPHINX_DEBUG'):
self.generate_autoindex()
for builder in self.builders:
self.builder = builder
self.finalize_options()
self.project = self.distribution.get_name()
self.version = self.distribution.get_version()
self.release = self.distribution.get_version()
BuildDoc.run(self)
class LocalBuildLatex(LocalBuildDoc):
builders = ['latex']
cmdclass['build_sphinx'] = LocalBuildDoc
cmdclass['build_sphinx_latex'] = LocalBuildLatex
except ImportError:
pass
return cmdclass
def _get_revno(git_dir):
"""Return the number of commits since the most recent tag.
We use git-describe to find this out, but if there are no
tags then we fall back to counting commits since the beginning
of time.
"""
describe = _run_shell_command(
"git --git-dir=%s describe --always" % git_dir)
if "-" in describe:
return describe.rsplit("-", 2)[-2]
# no tags found
revlist = _run_shell_command(
"git --git-dir=%s rev-list --abbrev-commit HEAD" % git_dir)
return len(revlist.splitlines())
def _get_version_from_git(pre_version):
"""Return a version which is equal to the tag that's on the current
revision if there is one, or tag plus number of additional revisions
if the current revision has no tag."""
git_dir = _get_git_directory()
if git_dir:
if pre_version:
try:
return _run_shell_command(
"git --git-dir=" + git_dir + " describe --exact-match",
throw_on_error=True).replace('-', '.')
except Exception:
sha = _run_shell_command(
"git --git-dir=" + git_dir + " log -n1 --pretty=format:%h")
return "%s.a%s.g%s" % (pre_version, _get_revno(git_dir), sha)
else:
return _run_shell_command(
"git --git-dir=" + git_dir + " describe --always").replace(
'-', '.')
return None
def _get_version_from_pkg_info(package_name):
"""Get the version from PKG-INFO file if we can."""
try:
pkg_info_file = open('PKG-INFO', 'r')
except (IOError, OSError):
return None
try:
pkg_info = email.message_from_file(pkg_info_file)
except email.MessageError:
return None
# Check to make sure we're in our own dir
if pkg_info.get('Name', None) != package_name:
return None
return pkg_info.get('Version', None)
def get_version(package_name, pre_version=None):
"""Get the version of the project. First, try getting it from PKG-INFO, if
it exists. If it does, that means we're in a distribution tarball or that
install has happened. Otherwise, if there is no PKG-INFO file, pull the
version from git.
We do not support setup.py version sanity in git archive tarballs, nor do
we support packagers directly sucking our git repo into theirs. We expect
that a source tarball be made from our git repo - or that if someone wants
to make a source tarball from a fork of our repo with additional tags in it
that they understand and desire the results of doing that.
"""
version = os.environ.get("OSLO_PACKAGE_VERSION", None)
if version:
return version
version = _get_version_from_pkg_info(package_name)
if version:
return version
version = _get_version_from_git(pre_version)
if version:
return version
raise Exception("Versioning for this project requires either an sdist"
" tarball, or access to an upstream git repository.")
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Automated bot for testing recipes.
"""
from __future__ import absolute_import, print_function
import argparse
from argparse import RawDescriptionHelpFormatter as RawDescriptionHelp
import glob
import mmap
import os
import re
import sys
__version__ = '0.1.0'
TEMPLATE = """\"\"\"
Do not edit this file.
Just rerun the program to regenerate tests.
\"\"\"
from __future__ import absolute_import
import os
import shutil
import subprocess
import tempfile
CONFIG_TEMPLATE = \"\"\"pakit:
command:
timeout: 120
defaults:
repo: stable
log:
enabled: true
file: {0}
level: debug
paths:
link: {1}
prefix: {2}
recipes: {3}
source: {4}
recipe:
update_interval: 86400
uris:
- uri: test_recipes
{5}
\"\"\"
def write_config(tail):
\"\"\"
Write config for a test execution to path.
Args:
tail: Vaild yaml to affix to the end of CONFIG_TEMPLATE.
Returns:
Path to temp directory.
\"\"\"
root = tempfile.mkdtemp()
recipe_d = os.path.join(root, 'recipe')
os.mkdir(recipe_d)
os.symlink('ROOT_RECS',
os.path.join(recipe_d, 'test_recipes'))
with open(os.path.join(root, 'pakit.yml'), 'w') as fout:
fout.write(CONFIG_TEMPLATE.format(
os.path.join(root, 'main.log'),
os.path.join(root, 'link'),
os.path.join(root, 'prefix'),
recipe_d,
os.path.join(root, 'source'),
tail
))
return root
def delete_it(path):
\"\"\"
File or folder, it is deleted.
Args:
path: path to a file or dir
\"\"\"
try:
shutil.rmtree(path)
except OSError:
try:
os.remove(path)
except OSError:
pass
class RecipeTest(object):
def setup_method(self, method):
recipe = type(self).__name__.replace('Test_', '').split('::')[0]
repo = method.__name__.replace('test_', '')
self.temp_d = write_config(recipe + ':\\n repo: ' + repo)
self.args = ['pakit', '--conf',
os.path.join(self.temp_d, 'pakit.yml'), 'install', recipe]
self.new_env = os.environ.copy()
new_path = os.environ['PATH'] + ':' + os.path.join(self.temp_d,
'link', 'bin')
self.new_env.update({'PATH': new_path})
def teardown_method(self, _):
delete_it(self.temp_d)
"""
def create_args_parser():
"""
Create the program argument parser.
Returns:
An argparse parser object.
"""
prog_name = os.path.basename(os.path.dirname(sys.argv[0]))
mesg = """
This script will (re)generate tests for recipes.
It will OVERWRITE existing tests.
"""
mesg = mesg[0:-5]
parser = argparse.ArgumentParser(prog=prog_name, description=mesg,
formatter_class=RawDescriptionHelp)
parser.add_argument('-v', '--version', action='version',
version='pakit_tests {0}'.format(__version__))
parser.add_argument('recipes_root', help='the folder containing recipes')
parser.add_argument('output', nargs='?', default='tests/test_recipes.py',
help="""relative path from recipes root to test file,
default: tests/test_recipes.py""")
return parser
def extract_repo_names(text):
"""
Given a string, extract all keys from the string.
Returns:
List of keys in the string.
"""
matcher = re.compile(r'\'(\w+)\':')
results = [matched.group(1) for matched in matcher.finditer(text)]
return results
def extract_repo_block(text):
"""
Given a string, extract ONLY the repos dictionary block.
Returns:
A string containing only required block.
"""
return re.search(r'(self.repos\s*=\s*{.*?})', text, re.DOTALL).group(1)
def format_lines(recipes):
"""
Transform the dictionary to lines to write.
Returns:
Lines to write to test file.
"""
lines = []
class_line = '\nclass Test_{0}(RecipeTest):'
repo_line = """ def test_{0}(self):
assert subprocess.call(self.args, cwd=self.temp_d,
env=self.new_env) == 0
"""
for recipe in sorted(recipes):
repo_name = recipes[recipe]
lines.append(class_line.format(recipe))
for repo_name in recipes[recipe]:
lines.extend(repo_line.format(repo_name).split('\n'))
return lines[0:-1]
def scan_recipes(recipe_d):
"""
Scan the recipe directory and return relevant data.
"""
data = {}
matcher = re.compile(r'class\s+\S+\(Recipe\)')
candidates = [fname for fname in glob.glob(os.path.join(recipe_d, '*.py'))]
for fname in candidates:
short_name = os.path.basename(fname)[0:-3]
with open(fname, 'r+') as fin:
text = mmap.mmap(fin.fileno(), 0)
if matcher.search(text) is not None:
data[short_name] = extract_repo_names(extract_repo_block(text))
return data
def write_file(root, test_file):
"""
Write the test file as requested.
"""
try:
os.makedirs(os.path.dirname(test_file))
except OSError:
pass
text = TEMPLATE.replace('ROOT_RECS', root) + \
'\n'.join(format_lines(scan_recipes(root)))
with open(test_file, 'w') as fout:
fout.write(text)
def main(argv=None):
"""
The main entry point for this program.
Args:
argv: A list of program options, if None use sys.argv.
"""
if argv is None:
argv = sys.argv
parser = create_args_parser()
args = parser.parse_args(argv[1:])
root = os.path.abspath(args.recipes_root)
if os.path.isabs(args.output):
test_file = os.path.join(root, args.output)
else:
test_file = os.path.join(root, args.output)
print('Scanning recipes under: ' + root)
print('Writing tests to: ' + test_file)
write_file(root, test_file)
if __name__ == "__main__":
main() # pragma: no cover
| |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
from babelfish import Language, language_converters
import pytest
from vcr import VCR
from subliminal.exceptions import AuthenticationError, ConfigurationError
from subliminal.providers.addic7ed import Addic7edProvider, Addic7edSubtitle
vcr = VCR(path_transformer=lambda path: path + '.yaml',
match_on=['method', 'scheme', 'host', 'port', 'path', 'query', 'body'],
cassette_library_dir=os.path.join('tests', 'cassettes', 'addic7ed'))
@pytest.mark.converter
def test_converter_convert_alpha3_country_script():
assert language_converters['addic7ed'].convert('srp', None, 'Cyrl') == 'Serbian (Cyrillic)'
@pytest.mark.converter
def test_converter_convert_alpha3_country():
assert language_converters['addic7ed'].convert('por', 'BR') == 'Portuguese (Brazilian)'
@pytest.mark.converter
def test_converter_convert_alpha3():
assert language_converters['addic7ed'].convert('eus') == 'Euskera'
@pytest.mark.converter
def test_converter_convert_alpha3_name_converter():
assert language_converters['addic7ed'].convert('fra') == 'French'
@pytest.mark.converter
def test_converter_reverse():
assert language_converters['addic7ed'].reverse('Chinese (Traditional)') == ('zho',)
@pytest.mark.converter
def test_converter_reverse_name_converter():
assert language_converters['addic7ed'].reverse('English') == ('eng', None, None)
def test_get_matches_with_release_group(episodes):
subtitle = Addic7edSubtitle(Language('eng'), True, None, 'The Big Bang Theory', 7, 5, 'The Workplace Proximity',
2007, 'DIMENSION', None)
matches = subtitle.get_matches(episodes['bbt_s07e05'])
assert matches == {'series', 'season', 'episode', 'title', 'year', 'release_group'}
def test_get_matches_with_resolution_and_release_group(episodes):
subtitle = Addic7edSubtitle(Language('heb'), True, None, 'The Big Bang Theory', 7, 5, 'The Workplace Proximity',
2007, '720PDIMENSION', None)
matches = subtitle.get_matches(episodes['bbt_s07e05'])
assert matches == {'series', 'season', 'episode', 'title', 'year', 'release_group', 'resolution'}
def test_get_matches_with_format_and_release_group(episodes):
subtitle = Addic7edSubtitle(Language('eng'), True, None, 'Game of Thrones', 3, 10, 'Mhysa', None, 'WEB-DL-NTb',
None)
matches = subtitle.get_matches(episodes['got_s03e10'])
assert matches == {'series', 'season', 'episode', 'title', 'year', 'release_group', 'format'}
def test_get_matches_no_match(episodes):
subtitle = Addic7edSubtitle(Language('eng'), True, None, 'The Big Bang Theory', 7, 5, 'The Workplace Proximity',
2007, 'DIMENSION', None)
matches = subtitle.get_matches(episodes['got_s03e10'])
assert matches == set()
def test_configuration_error_no_username():
with pytest.raises(ConfigurationError):
Addic7edProvider(password='subliminal')
def test_configuration_error_no_password():
with pytest.raises(ConfigurationError):
Addic7edProvider(username='subliminal')
@pytest.mark.integration
@vcr.use_cassette
def test_login():
provider = Addic7edProvider('subliminal', 'subliminal')
assert provider.logged_in is False
provider.initialize()
assert provider.logged_in is True
r = provider.session.get(provider.server_url + 'panel.php', allow_redirects=False)
assert r.status_code == 200
@pytest.mark.integration
@vcr.use_cassette
def test_login_bad_password():
provider = Addic7edProvider('subliminal', 'lanimilbus')
with pytest.raises(AuthenticationError):
provider.initialize()
@pytest.mark.integration
@vcr.use_cassette
def test_logout():
provider = Addic7edProvider('subliminal', 'subliminal')
provider.initialize()
provider.terminate()
assert provider.logged_in is False
r = provider.session.get(provider.server_url + 'panel.php', allow_redirects=False)
assert r.status_code == 302
@pytest.mark.integration
@vcr.use_cassette
def test_search_show_id():
with Addic7edProvider() as provider:
show_id = provider._search_show_id('The Big Bang Theory')
assert show_id == 126
@pytest.mark.integration
@vcr.use_cassette
def test_search_show_id_incomplete():
with Addic7edProvider() as provider:
show_id = provider._search_show_id('The Big Bang')
assert show_id is None
@pytest.mark.integration
@vcr.use_cassette
def test_search_show_id_no_year():
with Addic7edProvider() as provider:
show_id = provider._search_show_id('Dallas')
assert show_id == 802
@pytest.mark.integration
@vcr.use_cassette
def test_search_show_id_year():
with Addic7edProvider() as provider:
show_id = provider._search_show_id('Dallas', 2012)
assert show_id == 2559
@pytest.mark.integration
@vcr.use_cassette
def test_search_show_id_error():
with Addic7edProvider() as provider:
show_id = provider._search_show_id('The Big How I Met Your Mother')
assert show_id is None
@pytest.mark.integration
@vcr.use_cassette('test_get_show_ids')
def test_get_show_ids():
with Addic7edProvider() as provider:
show_ids = provider._get_show_ids()
assert 'the big bang theory' in show_ids
assert show_ids['the big bang theory'] == 126
@pytest.mark.integration
@vcr.use_cassette('test_get_show_ids')
def test_get_show_ids_no_year():
with Addic7edProvider() as provider:
show_ids = provider._get_show_ids()
assert 'dallas' in show_ids
assert show_ids['dallas'] == 802
@pytest.mark.integration
@vcr.use_cassette('test_get_show_ids')
def test_get_show_ids_year():
with Addic7edProvider() as provider:
show_ids = provider._get_show_ids()
assert 'dallas (2012)' in show_ids
assert show_ids['dallas (2012)'] == 2559
@pytest.mark.integration
@vcr.use_cassette('test_get_show_ids')
def test_get_show_ids_country():
with Addic7edProvider() as provider:
show_ids = provider._get_show_ids()
assert 'being human (us)' in show_ids
assert show_ids['being human (us)'] == 1317
@pytest.mark.integration
@vcr.use_cassette('test_get_show_ids')
def test_get_show_ids_quoted():
with Addic7edProvider() as provider:
show_ids = provider._get_show_ids()
assert 'marvels agents of s.h.i.e.l.d.' in show_ids
assert show_ids['marvels agents of s.h.i.e.l.d.'] == 4010
@pytest.mark.integration
@vcr.use_cassette
def test_get_show_id_with_quotes_and_mixed_case():
with Addic7edProvider() as provider:
show_id = provider.get_show_id('Marvel\'s Agents of S.H.I.E.L.D.')
assert show_id == 4010
@pytest.mark.integration
@vcr.use_cassette
def test_get_show_id_with_country():
with Addic7edProvider() as provider:
show_id = provider.get_show_id('Being Human', country_code='US')
assert show_id == 1317
@pytest.mark.integration
@vcr.use_cassette
def test_get_show_id_with_year():
with Addic7edProvider() as provider:
show_id = provider.get_show_id('Dallas', year=2012)
assert show_id == 2559
@pytest.mark.integration
@vcr.use_cassette
def test_get_show_id():
with Addic7edProvider() as provider:
show_id = provider.get_show_id('Dallas')
assert show_id == 802
@pytest.mark.integration
@vcr.use_cassette
def test_query(episodes):
video = episodes['bbt_s07e05']
with Addic7edProvider() as provider:
subtitles = provider.query(video.series, video.season, video.year)
assert len(subtitles) == 474
for subtitle in subtitles:
assert subtitle.series == video.series
assert subtitle.season == video.season
assert subtitle.year is None
@pytest.mark.integration
@vcr.use_cassette
def test_query_wrong_series(episodes):
video = episodes['bbt_s07e05']
with Addic7edProvider() as provider:
subtitles = provider.query(video.series[:12], video.season, video.year)
assert len(subtitles) == 0
@pytest.mark.integration
@vcr.use_cassette
def test_query_parsing(episodes):
video = episodes['got_s03e10']
with Addic7edProvider() as provider:
subtitles = provider.query(video.series, video.season)
subtitle = [s for s in subtitles if s.download_link == 'updated/1/76311/1'][0]
assert subtitle.language == Language('eng')
assert subtitle.hearing_impaired is True
assert subtitle.page_link == 'http://www.addic7ed.com/serie/Game_of_Thrones/3/10/Mhysa'
assert subtitle.series == video.series
assert subtitle.season == video.season
assert subtitle.episode == video.episode
assert subtitle.title == video.title
assert subtitle.year == video.year
assert subtitle.version == 'EVOLVE'
@pytest.mark.integration
@vcr.use_cassette
def test_query_year(episodes):
video = episodes['dallas_2012_s01e03']
with Addic7edProvider() as provider:
subtitles = provider.query(video.series, video.season, video.year)
assert len(subtitles) == 123
for subtitle in subtitles:
assert subtitle.series == video.series
assert subtitle.season == video.season
assert subtitle.year == video.year
@pytest.mark.integration
@vcr.use_cassette
def test_query_no_year(episodes):
video = episodes['dallas_s01e03']
with Addic7edProvider() as provider:
subtitles = provider.query(video.series, video.season)
assert len(subtitles) == 7
for subtitle in subtitles:
assert subtitle.series == video.series
assert subtitle.season == video.season
assert subtitle.year == video.year
@pytest.mark.integration
@vcr.use_cassette
def test_list_subtitles(episodes):
video = episodes['bbt_s07e05']
languages = {Language('deu'), Language('fra')}
expected_subtitles = {'updated/8/80254/1', 'updated/11/80254/5'}
with Addic7edProvider() as provider:
subtitles = provider.list_subtitles(video, languages)
assert {subtitle.download_link for subtitle in subtitles} == expected_subtitles
assert {subtitle.language for subtitle in subtitles} == languages
@pytest.mark.integration
@vcr.use_cassette
def test_download_subtitle(episodes):
video = episodes['bbt_s07e05']
languages = {Language('fra')}
with Addic7edProvider() as provider:
subtitles = provider.list_subtitles(video, languages)
provider.download_subtitle(subtitles[0])
assert subtitles[0].content is not None
assert subtitles[0].is_valid() is True
| |
#!/usr/bin/env python3
import gen_universe
import json
import logging
import os
import re
from enum import Enum
from http import HTTPStatus
from http.server import HTTPServer, BaseHTTPRequestHandler
from urllib.error import URLError, HTTPError
from urllib.parse import parse_qsl, urlparse
from urllib.request import Request, urlopen
# Binds to all available interfaces
HOST_NAME = ''
# Gets the port number from $PORT0 environment variable
PORT_NUMBER = int(os.environ['PORT_UNIVERSECONVERTER'])
MAX_REPO_SIZE = int(os.environ.get('MAX_REPO_SIZE', '20'))
# Constants
MAX_TIMEOUT = 60
MAX_BYTES = MAX_REPO_SIZE * 1024 * 1024
header_user_agent = 'User-Agent'
header_accept = 'Accept'
header_content_type = 'Content-Type'
header_content_length = 'Content-Length'
param_charset = 'charset'
default_charset = 'utf-8'
json_key_packages = 'packages'
param_url = 'url'
url_path = '/transform'
def run_server(server_class=HTTPServer):
"""Runs a builtin python server using the given server_class.
:param server_class: server
:type server_class: HTTPServer
:return: None
"""
server_address = (HOST_NAME, PORT_NUMBER)
httpd = server_class(server_address, Handler)
logger.warning('Server Starts on port - %s', PORT_NUMBER)
try:
httpd.serve_forever()
except KeyboardInterrupt:
httpd.server_close()
logger.warning('Server Stops on port - %s', PORT_NUMBER)
class Handler(BaseHTTPRequestHandler):
def do_GET(s):
"""
Respond to the GET request. The expected format of this request is:
http://<host>:<port>/transform?url=<url> with `User-Agent`
and `Accept` headers
"""
errors = _validate_request(s)
if errors:
s.send_error(HTTPStatus.BAD_REQUEST, explain=errors)
return
query = dict(parse_qsl(urlparse(s.path).query))
if param_url not in query:
s.send_error(HTTPStatus.BAD_REQUEST,
explain=ErrorResponse.PARAM_NOT_PRESENT.to_msg(param_url))
return
logging.debug(">>>>>>>>>")
user_agent = s.headers.get(header_user_agent)
accept = s.headers.get(header_accept)
decoded_url = query.get(param_url)
try:
json_response = handle(decoded_url, user_agent, accept)
except Exception as e:
s.send_error(HTTPStatus.BAD_REQUEST, explain=str(e))
return
s.send_response(HTTPStatus.OK)
content_header = gen_universe.format_universe_repo_content_type(
_get_repo_version(accept))
s.send_header(header_content_type, content_header)
s.send_header(header_content_length, len(json_response))
s.end_headers()
s.wfile.write(json_response.encode())
def handle(decoded_url, user_agent, accept):
"""Returns the requested json data. May raise an error instead, if it fails.
:param decoded_url: The url to be fetched from
:type decoded_url: str
:param user_agent: User-Agent header value
:type user_agent: str
:param accept: Accept header value
:return Requested json data
:rtype str (a valid json object)
"""
logger.debug('Url : %s\n\tUser-Agent : %s\n\tAccept : %s',
decoded_url, user_agent, accept)
repo_version = _get_repo_version(accept)
dcos_version = _get_dcos_version(user_agent)
logger.debug('Version %s\nDC/OS %s', repo_version, dcos_version)
req = Request(decoded_url)
req.add_header(header_user_agent, user_agent)
req.add_header(header_accept, accept)
try:
with urlopen(req, timeout=MAX_TIMEOUT) as res:
charset = res.info().get_param(param_charset) or default_charset
if header_content_length not in res.headers:
raise ValueError(ErrorResponse.ENDPOINT_HEADER_MISS.to_msg())
if int(res.headers.get(header_content_length)) > MAX_BYTES:
raise ValueError(ErrorResponse.MAX_SIZE.to_msg())
raw_data = res.read()
packages = json.loads(raw_data.decode(charset)).get(json_key_packages)
except (HTTPError, URLError) as error:
logger.info("Request protocol error %s", decoded_url)
logger.exception(error)
raise error
return render_json(packages, dcos_version, repo_version)
def render_json(packages, dcos_version, repo_version):
"""Returns the json
:param packages: package dictionary
:type packages: dict
:param dcos_version: version of dcos
:type dcos_version: str
:param repo_version: version of universe repo
:type repo_version: str
:return filtered json data based on parameters
:rtype str
"""
processed_packages = gen_universe.filter_and_downgrade_packages_by_version(
packages,
dcos_version
)
packages_dict = {json_key_packages: processed_packages}
errors = gen_universe.validate_repo_with_schema(
packages_dict,
repo_version
)
if len(errors) != 0:
logger.error(errors)
raise ValueError(ErrorResponse.VALIDATION_ERROR.to_msg(errors))
return json.dumps(packages_dict)
def _validate_request(s):
"""
:param s: The in built base http request handler
:type s: BaseHTTPRequestHandler
:return Error message (if any)
:rtype String or None
"""
if not urlparse(s.path).path == url_path:
return ErrorResponse.INVALID_PATH.to_msg(s.path)
if header_user_agent not in s.headers:
return ErrorResponse.HEADER_NOT_PRESENT.to_msg(header_user_agent)
if header_accept not in s.headers:
return ErrorResponse.HEADER_NOT_PRESENT.to_msg(header_accept)
def _get_repo_version(accept_header):
"""Returns the version of the universe repo parsed.
:param accept_header: String
:return repo version as a string or raises Error
:rtype str or raises an Error
"""
result = re.findall(r'\bversion=v\d', accept_header)
if result is None or len(result) is 0:
raise ValueError(ErrorResponse.UNABLE_PARSE.to_msg(accept_header))
result.sort(reverse=True)
return str(result[0].split('=')[1])
def _get_dcos_version(user_agent_header):
"""Parses the version of dcos from the specified header.
:param user_agent_header: String
:return dcos version as a string or raises an Error
:rtype str or raises an Error
"""
result = re.search(r'\bdcos/\b\d\.\d{1,2}', user_agent_header)
if result is None:
raise ValueError(ErrorResponse.UNABLE_PARSE.to_msg(user_agent_header))
return str(result.group().split('/')[1])
class ErrorResponse(Enum):
INVALID_PATH = 'URL Path {} is invalid. Expected path /transform'
HEADER_NOT_PRESENT = 'Header {} is missing'
PARAM_NOT_PRESENT = 'Request parameter {} is missing'
UNABLE_PARSE = 'Unable to parse header {}'
VALIDATION_ERROR = 'Validation errors during processing {}'
MAX_SIZE = 'Endpoint response exceeds maximum content size'
ENDPOINT_HEADER_MISS = 'Endpoint doesn\'t return Content-Length header'
def to_msg(self, *args):
return self.value.format(args)
if __name__ == '__main__':
logger = logging.getLogger(__name__)
logging.basicConfig(
level=os.environ.get("LOGLEVEL", "INFO"),
format='%(asctime)s [%(levelname)s] %(message)s'
)
run_server()
| |
import warnings
import neuroml.arraymorph as am
import neuroml
import numpy as np
import neuroml.writers as writers
import neuroml.loaders as loaders
try:
import unittest2 as unittest
except ImportError:
import unittest
class TestObjectBuiltMorphology(unittest.TestCase):
def setUp(self):
"""
Testing a complex hand-built morphology (from neuroml objects
rather than arrays)
"""
p = neuroml.Point3DWithDiam(x=0, y=0, z=0, diameter=50)
d = neuroml.Point3DWithDiam(x=50, y=0, z=0, diameter=50)
soma = neuroml.Segment(proximal=p, distal=d)
soma.name = "Soma"
soma.id = 0
# now make an axon with 100 compartments:
parent = neuroml.SegmentParent(segments=soma.id)
parent_segment = soma
axon_segments = []
seg_id = 1
for i in range(100):
p = neuroml.Point3DWithDiam(
x=parent_segment.distal.x,
y=parent_segment.distal.y,
z=parent_segment.distal.z,
diameter=0.1,
)
d = neuroml.Point3DWithDiam(
x=parent_segment.distal.x + 10,
y=parent_segment.distal.y,
z=parent_segment.distal.z,
diameter=0.1,
)
axon_segment = neuroml.Segment(proximal=p, distal=d, parent=parent)
axon_segment.id = seg_id
axon_segment.name = "axon_segment_" + str(axon_segment.id)
# now reset everything:
parent = neuroml.SegmentParent(segments=axon_segment.id)
parent_segment = axon_segment
seg_id += 1
axon_segments.append(axon_segment)
test_morphology = am.ArrayMorphology()
test_morphology.segments.append(soma)
test_morphology.segments += axon_segments
test_morphology.id = "TestMorphology"
self.test_morphology = test_morphology
def test_valid_morphology_ids(self):
morphology = self.test_morphology
self.assertTrue(morphology.valid_ids)
def test_invalid_morphology_ids(self):
morphology = self.test_morphology
morphology.segments[0].id = 5
self.assertFalse(morphology.valid_ids)
def test_num_segments(self):
num_segments = len(self.test_morphology.segments)
self.assertEqual(num_segments, 101)
def test_segments_ids_ok(self):
self.assertEqual(self.test_morphology.segments[30].id, 30)
def test_soma_still_located_at_zero(self):
self.assertEqual(self.test_morphology.segments[0].name, "Soma")
self.assertEqual(self.test_morphology.segments[0].id, 0)
def test_segment_vertices_ok(self):
self.assertEqual(self.test_morphology.segments[1].proximal.x, 50.0)
def test_axon_names_ok(self):
self.assertEqual(self.test_morphology.segments[32].name, "axon_segment_32")
def test_segment_instance(self):
seg = self.test_morphology.segments[47]
self.assertIsInstance(seg, neuroml.nml.nml.Segment)
class TestArrayMorphology(unittest.TestCase):
def setUp(self):
num_segments = int(100)
num_vertices = num_segments + 1
x = np.linspace(0, 10, num_vertices)
y = np.zeros(num_vertices)
z = np.zeros(num_vertices)
d = np.linspace(1, 0.01, num_vertices)
connectivity = range(-1, num_segments)
vertices = np.array([x, y, z, d]).T
self.complex_vertices = vertices
physical_mask = np.zeros(num_vertices)
# third segment is non-physical:
physical_mask[2] = 1
physical_mask[20] = 1
self.complex_morphology = am.ArrayMorphology(
vertices=vertices,
connectivity=connectivity,
physical_mask=physical_mask,
id="test_arraymorph",
)
self.valid_vertices = [
[0, 0, 0, 0.1],
[1, 0, 0, 0.2],
[2, 0, 0, 0.3],
[3, 0, 0, 0.4],
]
self.valid_connectivity = [-1, 0, 1, 2]
self.optimized_morphology = am.ArrayMorphology(
vertices=self.valid_vertices,
connectivity=self.valid_connectivity,
id="test_arraymorph",
)
proximal_point = neuroml.Point3DWithDiam(
x=0.1,
y=0.2,
z=0.3,
diameter=1.1,
)
distal_point = neuroml.Point3DWithDiam(
x=0.0,
y=0.0,
z=0.0,
diameter=1.1,
)
soma = neuroml.Segment(
proximal=proximal_point,
distal=distal_point,
)
self.small_morphology = am.ArrayMorphology()
self.small_morphology.segments.append(soma)
def test_single_segment_morphology_instantiation(self):
print(self.small_morphology.connectivity)
seg = self.small_morphology.segments[0]
self.assertIsInstance(seg, neuroml.nml.nml.Segment)
def test_single_segment_morphology_length(self):
self.assertEqual(len(self.small_morphology.segments), 1)
def test_index_error(self):
"""
There is no segments[1] for a one-segment morphology
"""
self.assertRaises(IndexError, self.small_morphology.segments.__getitem__, 1)
def test_single_floating_segment(self):
"""
Because physical_mask[4] = 1 a segment should be skipped as it is
floating.
"""
seg = self.complex_morphology.segments[3]
seg_proximal_x = seg.proximal.x
seg_distal_x = seg.distal.x
equivalent_proximal_vertex = self.complex_vertices[5][0]
equivalent_distal_vertex = self.complex_vertices[4][0]
self.assertEqual(seg_proximal_x, equivalent_proximal_vertex)
self.assertEqual(seg_distal_x, equivalent_distal_vertex)
def test_double_floating_segment(self):
"""
Because physical_mask[4] = 1 a segment should be skipped as it is
floating.
"""
seg = self.complex_morphology.segments[3]
seg_proximal_x = seg.proximal.x
seg_distal_x = seg.distal.x
equivalent_proximal_vertex = self.complex_vertices[5][0]
equivalent_distal_vertex = self.complex_vertices[4][0]
self.assertEqual(seg_proximal_x, equivalent_proximal_vertex)
self.assertEqual(seg_distal_x, equivalent_distal_vertex)
def test_segments_len(self):
num_segments = 98
len_segment_list = len(self.complex_morphology.segments)
self.assertEqual(num_segments, len_segment_list)
def test_add_segment_len(self):
"""
Add a neuroml.Segment() object, the segments proximal
and distal vertices should be used. The internal connectivity
should be passed.
"""
proximal_point = neuroml.Point3DWithDiam(
x=0.1,
y=0.2,
z=0.3,
diameter=1.1,
)
distal_point = neuroml.Point3DWithDiam(
x=0.0,
y=0.0,
z=0.0,
diameter=1.1,
)
seg = neuroml.Segment(proximal=proximal_point, distal=distal_point)
num_segments = len(self.complex_morphology.segments)
self.complex_morphology.segments.append(seg)
len_segment_list = len(self.complex_morphology.segments)
self.assertEqual(num_segments + 1, len_segment_list)
self.setUp()
def test_add_segment_vertices_added(self):
proximal_point = neuroml.Point3DWithDiam(
x=0.1,
y=0.2,
z=0.3,
diameter=0.1,
)
distal_point = neuroml.Point3DWithDiam(x=0.0, y=0.0, z=0.0, diameter=0.1)
seg = neuroml.Segment(proximal=proximal_point, distal=distal_point)
num_segments = len(self.complex_morphology.segments)
self.optimized_morphology.segments.append(seg)
true_vertices = self.optimized_morphology.vertices
expected_vertices = np.array(
[
[0, 0, 0, 0.1],
[1, 0, 0, 0.2],
[2, 0, 0, 0.3],
[3, 0, 0, 0.4],
[0, 0, 0, 0.1],
[0.1, 0.2, 0.3, 0.1],
]
)
arrays_equal = np.array_equal(true_vertices, expected_vertices)
self.assertTrue(arrays_equal)
self.setUp()
def tes_add_segment_connectivity_valid(self):
pass
def test_num_vertices(self):
"""
Morphology with one segment
"""
self.assertEqual(self.optimized_morphology.num_vertices, 4)
def test_valid_morphology(self):
"""
Should return false if morphology is invalid
"""
# We're using vertices with inconsistent dimensions here, which Numpy
# does not like.
# Ignore the VisibleDeprecationWarning that numpy throws.
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", "Creating an ndarray from ragged nested sequences"
)
vertices = [[0, 0, 0], [1, 1]]
connectivity = [-1, 0]
self.assertRaises(
AssertionError, am.ArrayMorphology, vertices, connectivity
)
vertices = [[0, 0, 0], [1, 1, 1]]
connectivity = [-1, 0, 0]
self.assertRaises(AssertionError, am.ArrayMorphology, vertices, connectivity)
vertices = [[0, 0, 0], [1, 1, 1]]
connectivity = []
self.assertRaises(AssertionError, am.ArrayMorphology, vertices, connectivity)
def test_root_index(self):
self.assertEqual(self.optimized_morphology.root_index, 0)
def test_physical_indeces(self):
physical_indices = self.optimized_morphology.physical_indices
self.assertTrue(np.array_equal(physical_indices, [0, 1, 2, 3]))
def test_children(self):
self.assertTrue(self.optimized_morphology.children(1), 2)
def test_to_root(self):
new_morphology = am.ArrayMorphology(
self.optimized_morphology.vertices, self.optimized_morphology.connectivity
)
new_morphology.to_root(2)
new_connectivity = new_morphology.connectivity
self.assertTrue(np.array_equal(new_connectivity, [1, 2, -1, 2]))
def test_to_neuroml_morphology(self):
neuroml_morphology = self.optimized_morphology.to_neuroml_morphology(id="Test")
self.assertEqual(neuroml_morphology.id, "Test")
self.assertEqual(len(neuroml_morphology.segments), 3)
def test_pop(self):
new_morphology = am.ArrayMorphology(
self.optimized_morphology.vertices, self.optimized_morphology.connectivity
) #
new_morphology.pop(1)
new_connectivity = new_morphology.connectivity
self.assertTrue(np.array_equal(new_connectivity, [-1, 0, 1]))
def test_segment_getter(self):
segment = self.optimized_morphology.segments[0]
self.assertIsInstance(segment, neuroml.Segment)
self.assertEqual(segment.proximal.diameter, 0.2)
self.assertEqual(segment.distal.diameter, 0.1)
def test_segmentlist_getter(self):
segment = self.optimized_morphology.segments[1]
segment_again = self.optimized_morphology.segments[1]
self.assertEqual(segment, segment_again)
def test_segmentlist_setter(self):
p = neuroml.Point3DWithDiam(x=0.9, y=0.0, z=0.0, diameter=0.1)
d = neuroml.Point3DWithDiam(x=0.0, y=0.0, z=0.0, diameter=0.1)
new_segment = neuroml.Segment(proximal=p, distal=d)
self.optimized_morphology.segments[2] = new_segment
self.assertEqual(self.optimized_morphology.segments[2], new_segment)
def test_segmentlist_setter_by_inference(self):
p = neuroml.Point3DWithDiam(x=0.9, y=0.0, z=0.0, diameter=0.1)
d = neuroml.Point3DWithDiam(x=0.0, y=0.0, z=0.0, diameter=0.1)
new_segment = neuroml.Segment(proximal=p, distal=d)
self.optimized_morphology.segments[2] = new_segment
self.assertEqual(self.optimized_morphology.segments[2].proximal.x, 0.9)
def test_instantiation(self):
"""
Test an arraymorph can be instantiated with default parameters
"""
morphology = am.ArrayMorphology()
def test_parents(self):
"""
A segment by default uses its vertex index as its ID,
as a consequence the first segment has index = 1
"""
test_segment_1 = self.optimized_morphology.segments[0]
test_segment_2 = self.optimized_morphology.segments[1]
self.assertEqual(test_segment_1.id, 1)
self.assertEqual(test_segment_2.id, 2)
self.assertEqual(test_segment_2.parent.segments, 1)
self.assertIsNone(test_segment_1.parent)
def test_valid_morphology_ids(self):
morphology = self.optimized_morphology
self.assertTrue(morphology.valid_ids)
def test_invalid_morphology_ids(self):
morphology = self.optimized_morphology
morphology.segments[0].id = 5
self.assertFalse(morphology.valid_ids)
def test_large_arraymorph(self):
"""
This will generate a morphology which will be difficult to
generate without the optimized intenral representation.
The morphology has 3 million segments
"""
num_segments = int(1e6)
num_vertices = num_segments + 1
x = np.linspace(0, 10, num_vertices)
y = np.zeros(num_vertices)
z = np.zeros(num_vertices)
d = np.linspace(1, 0.01, num_vertices)
vertices = np.array([x, y, z, d]).T
connectivity = range(-1, num_segments)
big_arraymorph = am.ArrayMorphology(
vertices=vertices, connectivity=connectivity
)
self.assertIsInstance(big_arraymorph.segments[3], neuroml.Segment)
self.assertEqual(big_arraymorph.segments[0].distal.diameter, 1.0)
# following test not as obvious as it seems - first execution of getter does not have the same result as second
self.assertEqual(big_arraymorph.segments[2333], big_arraymorph.segments[2333])
self.assertEqual(big_arraymorph.segments[0].distal.diameter, 1.0)
self.assertEqual(big_arraymorph.segments[num_segments - 1].proximal.x, 10.0)
self.assertEqual(big_arraymorph.segments[0].distal.x, 0.0)
self.assertEqual(
big_arraymorph.segments[num_segments - 1].proximal.diameter, 0.01
)
| |
# oracle/cx_oracle.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: oracle+cx_oracle
:name: cx-Oracle
:dbapi: cx_oracle
:connectstring: oracle+cx_oracle://user:pass@host:port/dbname[?key=value&key=value...]
:url: http://cx-oracle.sourceforge.net/
Additional Connect Arguments
----------------------------
When connecting with ``dbname`` present, the host, port, and dbname tokens are
converted to a TNS name using
the cx_oracle ``makedsn()`` function. Otherwise, the host token is taken
directly as a TNS name.
Additional arguments which may be specified either as query string arguments
on the URL, or as keyword arguments to :func:`.create_engine()` are:
* ``allow_twophase`` - enable two-phase transactions. Defaults to ``True``.
* ``arraysize`` - set the cx_oracle.arraysize value on cursors, defaulted
to 50. This setting is significant with cx_Oracle as the contents of LOB
objects are only readable within a "live" row (e.g. within a batch of
50 rows).
* ``auto_convert_lobs`` - defaults to True; See :ref:`cx_oracle_lob`.
* ``auto_setinputsizes`` - the cx_oracle.setinputsizes() call is issued for
all bind parameters. This is required for LOB datatypes but can be
disabled to reduce overhead. Defaults to ``True``. Specific types
can be excluded from this process using the ``exclude_setinputsizes``
parameter.
* ``coerce_to_unicode`` - see :ref:`cx_oracle_unicode` for detail.
* ``coerce_to_decimal`` - see :ref:`cx_oracle_numeric` for detail.
* ``exclude_setinputsizes`` - a tuple or list of string DBAPI type names to
be excluded from the "auto setinputsizes" feature. The type names here
must match DBAPI types that are found in the "cx_Oracle" module namespace,
such as cx_Oracle.UNICODE, cx_Oracle.NCLOB, etc. Defaults to
``(STRING, UNICODE)``.
.. versionadded:: 0.8 specific DBAPI types can be excluded from the
auto_setinputsizes feature via the exclude_setinputsizes attribute.
* ``mode`` - This is given the string value of SYSDBA or SYSOPER, or alternatively
an integer value. This value is only available as a URL query string
argument.
* ``threaded`` - enable multithreaded access to cx_oracle connections. Defaults
to ``True``. Note that this is the opposite default of the cx_Oracle DBAPI
itself.
.. _cx_oracle_unicode:
Unicode
-------
The cx_Oracle DBAPI as of version 5 fully supports unicode, and has the ability
to return string results as Python unicode objects natively.
When used in Python 3, cx_Oracle returns all strings as Python unicode objects
(that is, plain ``str`` in Python 3). In Python 2, it will return as Python
unicode those column values that are of type ``NVARCHAR`` or ``NCLOB``. For
column values that are of type ``VARCHAR`` or other non-unicode string types,
it will return values as Python strings (e.g. bytestrings).
The cx_Oracle SQLAlchemy dialect presents two different options for the use case of
returning ``VARCHAR`` column values as Python unicode objects under Python 2:
* the cx_Oracle DBAPI has the ability to coerce all string results to Python
unicode objects unconditionally using output type handlers. This has
the advantage that the unicode conversion is global to all statements
at the cx_Oracle driver level, meaning it works with raw textual SQL
statements that have no typing information associated. However, this system
has been observed to incur signfiicant performance overhead, not only because
it takes effect for all string values unconditionally, but also because cx_Oracle under
Python 2 seems to use a pure-Python function call in order to do the
decode operation, which under cPython can orders of magnitude slower
than doing it using C functions alone.
* SQLAlchemy has unicode-decoding services built in, and when using SQLAlchemy's
C extensions, these functions do not use any Python function calls and
are very fast. The disadvantage to this approach is that the unicode
conversion only takes effect for statements where the :class:`.Unicode` type
or :class:`.String` type with ``convert_unicode=True`` is explicitly
associated with the result column. This is the case for any ORM or Core
query or SQL expression as well as for a :func:`.text` construct that specifies
output column types, so in the vast majority of cases this is not an issue.
However, when sending a completely raw string to :meth:`.Connection.execute`,
this typing information isn't present, unless the string is handled
within a :func:`.text` construct that adds typing information.
As of version 0.9.2 of SQLAlchemy, the default approach is to use SQLAlchemy's
typing system. This keeps cx_Oracle's expensive Python 2 approach
disabled unless the user explicitly wants it. Under Python 3, SQLAlchemy detects
that cx_Oracle is returning unicode objects natively and cx_Oracle's system
is used.
To re-enable cx_Oracle's output type handler under Python 2, the
``coerce_to_unicode=True`` flag (new in 0.9.4) can be passed to
:func:`.create_engine`::
engine = create_engine("oracle+cx_oracle://dsn", coerce_to_unicode=True)
Alternatively, to run a pure string SQL statement and get ``VARCHAR`` results
as Python unicode under Python 2 without using cx_Oracle's native handlers,
the :func:`.text` feature can be used::
from sqlalchemy import text, Unicode
result = conn.execute(text("select username from user").columns(username=Unicode))
.. versionchanged:: 0.9.2 cx_Oracle's outputtypehandlers are no longer used for
unicode results of non-unicode datatypes in Python 2, after they were identified as a major
performance bottleneck. SQLAlchemy's own unicode facilities are used
instead.
.. versionadded:: 0.9.4 Added the ``coerce_to_unicode`` flag, to re-enable
cx_Oracle's outputtypehandler and revert to pre-0.9.2 behavior.
.. _cx_oracle_returning:
RETURNING Support
-----------------
The cx_oracle DBAPI supports a limited subset of Oracle's already limited RETURNING support.
Typically, results can only be guaranteed for at most one column being returned;
this is the typical case when SQLAlchemy uses RETURNING to get just the value of a
primary-key-associated sequence value. Additional column expressions will
cause problems in a non-determinative way, due to cx_oracle's lack of support for
the OCI_DATA_AT_EXEC API which is required for more complex RETURNING scenarios.
For this reason, stability may be enhanced by disabling RETURNING support completely;
SQLAlchemy otherwise will use RETURNING to fetch newly sequence-generated
primary keys. As illustrated in :ref:`oracle_returning`::
engine = create_engine("oracle://scott:tiger@dsn", implicit_returning=False)
.. seealso::
http://docs.oracle.com/cd/B10501_01/appdev.920/a96584/oci05bnd.htm#420693 - OCI documentation for RETURNING
http://sourceforge.net/mailarchive/message.php?msg_id=31338136 - cx_oracle developer commentary
.. _cx_oracle_lob:
LOB Objects
-----------
cx_oracle returns oracle LOBs using the cx_oracle.LOB object. SQLAlchemy converts
these to strings so that the interface of the Binary type is consistent with that of
other backends, and so that the linkage to a live cursor is not needed in scenarios
like result.fetchmany() and result.fetchall(). This means that by default, LOB
objects are fully fetched unconditionally by SQLAlchemy, and the linkage to a live
cursor is broken.
To disable this processing, pass ``auto_convert_lobs=False`` to :func:`.create_engine()`.
Two Phase Transaction Support
-----------------------------
Two Phase transactions are implemented using XA transactions, and are known
to work in a rudimental fashion with recent versions of cx_Oracle
as of SQLAlchemy 0.8.0b2, 0.7.10. However, the mechanism is not yet
considered to be robust and should still be regarded as experimental.
In particular, the cx_Oracle DBAPI as recently as 5.1.2 has a bug regarding
two phase which prevents
a particular DBAPI connection from being consistently usable in both
prepared transactions as well as traditional DBAPI usage patterns; therefore
once a particular connection is used via :meth:`.Connection.begin_prepared`,
all subsequent usages of the underlying DBAPI connection must be within
the context of prepared transactions.
The default behavior of :class:`.Engine` is to maintain a pool of DBAPI
connections. Therefore, due to the above glitch, a DBAPI connection that has
been used in a two-phase operation, and is then returned to the pool, will
not be usable in a non-two-phase context. To avoid this situation,
the application can make one of several choices:
* Disable connection pooling using :class:`.NullPool`
* Ensure that the particular :class:`.Engine` in use is only used
for two-phase operations. A :class:`.Engine` bound to an ORM
:class:`.Session` which includes ``twophase=True`` will consistently
use the two-phase transaction style.
* For ad-hoc two-phase operations without disabling pooling, the DBAPI
connection in use can be evicted from the connection pool using the
:meth:`.Connection.detach` method.
.. versionchanged:: 0.8.0b2,0.7.10
Support for cx_oracle prepared transactions has been implemented
and tested.
.. _cx_oracle_numeric:
Precision Numerics
------------------
The SQLAlchemy dialect goes through a lot of steps to ensure
that decimal numbers are sent and received with full accuracy.
An "outputtypehandler" callable is associated with each
cx_oracle connection object which detects numeric types and
receives them as string values, instead of receiving a Python
``float`` directly, which is then passed to the Python
``Decimal`` constructor. The :class:`.Numeric` and
:class:`.Float` types under the cx_oracle dialect are aware of
this behavior, and will coerce the ``Decimal`` to ``float`` if
the ``asdecimal`` flag is ``False`` (default on :class:`.Float`,
optional on :class:`.Numeric`).
Because the handler coerces to ``Decimal`` in all cases first,
the feature can detract significantly from performance.
If precision numerics aren't required, the decimal handling
can be disabled by passing the flag ``coerce_to_decimal=False``
to :func:`.create_engine`::
engine = create_engine("oracle+cx_oracle://dsn", coerce_to_decimal=False)
.. versionadded:: 0.7.6
Add the ``coerce_to_decimal`` flag.
Another alternative to performance is to use the
`cdecimal <http://pypi.python.org/pypi/cdecimal/>`_ library;
see :class:`.Numeric` for additional notes.
The handler attempts to use the "precision" and "scale"
attributes of the result set column to best determine if
subsequent incoming values should be received as ``Decimal`` as
opposed to int (in which case no processing is added). There are
several scenarios where OCI_ does not provide unambiguous data
as to the numeric type, including some situations where
individual rows may return a combination of floating point and
integer values. Certain values for "precision" and "scale" have
been observed to determine this scenario. When it occurs, the
outputtypehandler receives as string and then passes off to a
processing function which detects, for each returned value, if a
decimal point is present, and if so converts to ``Decimal``,
otherwise to int. The intention is that simple int-based
statements like "SELECT my_seq.nextval() FROM DUAL" continue to
return ints and not ``Decimal`` objects, and that any kind of
floating point value is received as a string so that there is no
floating point loss of precision.
The "decimal point is present" logic itself is also sensitive to
locale. Under OCI_, this is controlled by the NLS_LANG
environment variable. Upon first connection, the dialect runs a
test to determine the current "decimal" character, which can be
a comma "," for European locales. From that point forward the
outputtypehandler uses that character to represent a decimal
point. Note that cx_oracle 5.0.3 or greater is required
when dealing with numerics with locale settings that don't use
a period "." as the decimal character.
.. versionchanged:: 0.6.6
The outputtypehandler supports the case where the locale uses a
comma "," character to represent a decimal point.
.. _OCI: http://www.oracle.com/technetwork/database/features/oci/index.html
"""
from __future__ import absolute_import
from .base import OracleCompiler, OracleDialect, OracleExecutionContext
from . import base as oracle
from ...engine import result as _result
from sqlalchemy import types as sqltypes, util, exc, processors
import random
import collections
import decimal
import re
class _OracleNumeric(sqltypes.Numeric):
def bind_processor(self, dialect):
# cx_oracle accepts Decimal objects and floats
return None
def result_processor(self, dialect, coltype):
# we apply a cx_oracle type handler to all connections
# that converts floating point strings to Decimal().
# However, in some subquery situations, Oracle doesn't
# give us enough information to determine int or Decimal.
# It could even be int/Decimal differently on each row,
# regardless of the scale given for the originating type.
# So we still need an old school isinstance() handler
# here for decimals.
if dialect.supports_native_decimal:
if self.asdecimal:
fstring = "%%.%df" % self._effective_decimal_return_scale
def to_decimal(value):
if value is None:
return None
elif isinstance(value, decimal.Decimal):
return value
else:
return decimal.Decimal(fstring % value)
return to_decimal
else:
if self.precision is None and self.scale is None:
return processors.to_float
elif not getattr(self, '_is_oracle_number', False) \
and self.scale is not None:
return processors.to_float
else:
return None
else:
# cx_oracle 4 behavior, will assume
# floats
return super(_OracleNumeric, self).\
result_processor(dialect, coltype)
class _OracleDate(sqltypes.Date):
def bind_processor(self, dialect):
return None
def result_processor(self, dialect, coltype):
def process(value):
if value is not None:
return value.date()
else:
return value
return process
class _LOBMixin(object):
def result_processor(self, dialect, coltype):
if not dialect.auto_convert_lobs:
# return the cx_oracle.LOB directly.
return None
def process(value):
if value is not None:
return value.read()
else:
return value
return process
class _NativeUnicodeMixin(object):
if util.py2k:
def bind_processor(self, dialect):
if dialect._cx_oracle_with_unicode:
def process(value):
if value is None:
return value
else:
return unicode(value)
return process
else:
return super(_NativeUnicodeMixin, self).bind_processor(dialect)
# we apply a connection output handler that returns
# unicode in all cases, so the "native_unicode" flag
# will be set for the default String.result_processor.
class _OracleChar(_NativeUnicodeMixin, sqltypes.CHAR):
def get_dbapi_type(self, dbapi):
return dbapi.FIXED_CHAR
class _OracleNVarChar(_NativeUnicodeMixin, sqltypes.NVARCHAR):
def get_dbapi_type(self, dbapi):
return getattr(dbapi, 'UNICODE', dbapi.STRING)
class _OracleText(_LOBMixin, sqltypes.Text):
def get_dbapi_type(self, dbapi):
return dbapi.CLOB
class _OracleLong(oracle.LONG):
# a raw LONG is a text type, but does *not*
# get the LobMixin with cx_oracle.
def get_dbapi_type(self, dbapi):
return dbapi.LONG_STRING
class _OracleString(_NativeUnicodeMixin, sqltypes.String):
pass
class _OracleUnicodeText(_LOBMixin, _NativeUnicodeMixin, sqltypes.UnicodeText):
def get_dbapi_type(self, dbapi):
return dbapi.NCLOB
def result_processor(self, dialect, coltype):
lob_processor = _LOBMixin.result_processor(self, dialect, coltype)
if lob_processor is None:
return None
string_processor = sqltypes.UnicodeText.result_processor(self, dialect, coltype)
if string_processor is None:
return lob_processor
else:
def process(value):
return string_processor(lob_processor(value))
return process
class _OracleInteger(sqltypes.Integer):
def result_processor(self, dialect, coltype):
def to_int(val):
if val is not None:
val = int(val)
return val
return to_int
class _OracleBinary(_LOBMixin, sqltypes.LargeBinary):
def get_dbapi_type(self, dbapi):
return dbapi.BLOB
def bind_processor(self, dialect):
return None
class _OracleInterval(oracle.INTERVAL):
def get_dbapi_type(self, dbapi):
return dbapi.INTERVAL
class _OracleRaw(oracle.RAW):
pass
class _OracleRowid(oracle.ROWID):
def get_dbapi_type(self, dbapi):
return dbapi.ROWID
class OracleCompiler_cx_oracle(OracleCompiler):
def bindparam_string(self, name, **kw):
quote = getattr(name, 'quote', None)
if quote is True or quote is not False and \
self.preparer._bindparam_requires_quotes(name):
quoted_name = '"%s"' % name
self._quoted_bind_names[name] = quoted_name
return OracleCompiler.bindparam_string(self, quoted_name, **kw)
else:
return OracleCompiler.bindparam_string(self, name, **kw)
class OracleExecutionContext_cx_oracle(OracleExecutionContext):
def pre_exec(self):
quoted_bind_names = \
getattr(self.compiled, '_quoted_bind_names', None)
if quoted_bind_names:
if not self.dialect.supports_unicode_statements:
# if DBAPI doesn't accept unicode statements,
# keys in self.parameters would have been encoded
# here. so convert names in quoted_bind_names
# to encoded as well.
quoted_bind_names = \
dict(
(fromname.encode(self.dialect.encoding),
toname.encode(self.dialect.encoding))
for fromname, toname in
quoted_bind_names.items()
)
for param in self.parameters:
for fromname, toname in quoted_bind_names.items():
param[toname] = param[fromname]
del param[fromname]
if self.dialect.auto_setinputsizes:
# cx_oracle really has issues when you setinputsizes
# on String, including that outparams/RETURNING
# breaks for varchars
self.set_input_sizes(quoted_bind_names,
exclude_types=self.dialect.exclude_setinputsizes
)
# if a single execute, check for outparams
if len(self.compiled_parameters) == 1:
for bindparam in self.compiled.binds.values():
if bindparam.isoutparam:
dbtype = bindparam.type.dialect_impl(self.dialect).\
get_dbapi_type(self.dialect.dbapi)
if not hasattr(self, 'out_parameters'):
self.out_parameters = {}
if dbtype is None:
raise exc.InvalidRequestError(
"Cannot create out parameter for parameter "
"%r - its type %r is not supported by"
" cx_oracle" %
(bindparam.key, bindparam.type)
)
name = self.compiled.bind_names[bindparam]
self.out_parameters[name] = self.cursor.var(dbtype)
self.parameters[0][quoted_bind_names.get(name, name)] = \
self.out_parameters[name]
def create_cursor(self):
c = self._dbapi_connection.cursor()
if self.dialect.arraysize:
c.arraysize = self.dialect.arraysize
return c
def get_result_proxy(self):
if hasattr(self, 'out_parameters') and self.compiled.returning:
returning_params = dict(
(k, v.getvalue())
for k, v in self.out_parameters.items()
)
return ReturningResultProxy(self, returning_params)
result = None
if self.cursor.description is not None:
for column in self.cursor.description:
type_code = column[1]
if type_code in self.dialect._cx_oracle_binary_types:
result = _result.BufferedColumnResultProxy(self)
if result is None:
result = _result.ResultProxy(self)
if hasattr(self, 'out_parameters'):
if self.compiled_parameters is not None and \
len(self.compiled_parameters) == 1:
result.out_parameters = out_parameters = {}
for bind, name in self.compiled.bind_names.items():
if name in self.out_parameters:
type = bind.type
impl_type = type.dialect_impl(self.dialect)
dbapi_type = impl_type.get_dbapi_type(self.dialect.dbapi)
result_processor = impl_type.\
result_processor(self.dialect,
dbapi_type)
if result_processor is not None:
out_parameters[name] = \
result_processor(self.out_parameters[name].getvalue())
else:
out_parameters[name] = self.out_parameters[name].getvalue()
else:
result.out_parameters = dict(
(k, v.getvalue())
for k, v in self.out_parameters.items()
)
return result
class OracleExecutionContext_cx_oracle_with_unicode(OracleExecutionContext_cx_oracle):
"""Support WITH_UNICODE in Python 2.xx.
WITH_UNICODE allows cx_Oracle's Python 3 unicode handling
behavior under Python 2.x. This mode in some cases disallows
and in other cases silently passes corrupted data when
non-Python-unicode strings (a.k.a. plain old Python strings)
are passed as arguments to connect(), the statement sent to execute(),
or any of the bind parameter keys or values sent to execute().
This optional context therefore ensures that all statements are
passed as Python unicode objects.
"""
def __init__(self, *arg, **kw):
OracleExecutionContext_cx_oracle.__init__(self, *arg, **kw)
self.statement = util.text_type(self.statement)
def _execute_scalar(self, stmt):
return super(OracleExecutionContext_cx_oracle_with_unicode, self).\
_execute_scalar(util.text_type(stmt))
class ReturningResultProxy(_result.FullyBufferedResultProxy):
"""Result proxy which stuffs the _returning clause + outparams into the fetch."""
def __init__(self, context, returning_params):
self._returning_params = returning_params
super(ReturningResultProxy, self).__init__(context)
def _cursor_description(self):
returning = self.context.compiled.returning
return [
("ret_%d" % i, None)
for i, col in enumerate(returning)
]
def _buffer_rows(self):
return collections.deque([tuple(self._returning_params["ret_%d" % i]
for i, c in enumerate(self._returning_params))])
class OracleDialect_cx_oracle(OracleDialect):
execution_ctx_cls = OracleExecutionContext_cx_oracle
statement_compiler = OracleCompiler_cx_oracle
driver = "cx_oracle"
colspecs = colspecs = {
sqltypes.Numeric: _OracleNumeric,
sqltypes.Date: _OracleDate, # generic type, assume datetime.date is desired
sqltypes.LargeBinary: _OracleBinary,
sqltypes.Boolean: oracle._OracleBoolean,
sqltypes.Interval: _OracleInterval,
oracle.INTERVAL: _OracleInterval,
sqltypes.Text: _OracleText,
sqltypes.String: _OracleString,
sqltypes.UnicodeText: _OracleUnicodeText,
sqltypes.CHAR: _OracleChar,
# a raw LONG is a text type, but does *not*
# get the LobMixin with cx_oracle.
oracle.LONG: _OracleLong,
# this is only needed for OUT parameters.
# it would be nice if we could not use it otherwise.
sqltypes.Integer: _OracleInteger,
oracle.RAW: _OracleRaw,
sqltypes.Unicode: _OracleNVarChar,
sqltypes.NVARCHAR: _OracleNVarChar,
oracle.ROWID: _OracleRowid,
}
execute_sequence_format = list
def __init__(self,
auto_setinputsizes=True,
exclude_setinputsizes=("STRING", "UNICODE"),
auto_convert_lobs=True,
threaded=True,
allow_twophase=True,
coerce_to_decimal=True,
coerce_to_unicode=False,
arraysize=50, **kwargs):
OracleDialect.__init__(self, **kwargs)
self.threaded = threaded
self.arraysize = arraysize
self.allow_twophase = allow_twophase
self.supports_timestamp = self.dbapi is None or \
hasattr(self.dbapi, 'TIMESTAMP')
self.auto_setinputsizes = auto_setinputsizes
self.auto_convert_lobs = auto_convert_lobs
if hasattr(self.dbapi, 'version'):
self.cx_oracle_ver = tuple([int(x) for x in
self.dbapi.version.split('.')])
else:
self.cx_oracle_ver = (0, 0, 0)
def types(*names):
return set(
getattr(self.dbapi, name, None) for name in names
).difference([None])
self.exclude_setinputsizes = types(*(exclude_setinputsizes or ()))
self._cx_oracle_string_types = types("STRING", "UNICODE",
"NCLOB", "CLOB")
self._cx_oracle_unicode_types = types("UNICODE", "NCLOB")
self._cx_oracle_binary_types = types("BFILE", "CLOB", "NCLOB", "BLOB")
self.supports_unicode_binds = self.cx_oracle_ver >= (5, 0)
self.coerce_to_unicode = (
self.cx_oracle_ver >= (5, 0) and
coerce_to_unicode
)
self.supports_native_decimal = (
self.cx_oracle_ver >= (5, 0) and
coerce_to_decimal
)
self._cx_oracle_native_nvarchar = self.cx_oracle_ver >= (5, 0)
if self.cx_oracle_ver is None:
# this occurs in tests with mock DBAPIs
self._cx_oracle_string_types = set()
self._cx_oracle_with_unicode = False
elif self.cx_oracle_ver >= (5,) and not hasattr(self.dbapi, 'UNICODE'):
# cx_Oracle WITH_UNICODE mode. *only* python
# unicode objects accepted for anything
self.supports_unicode_statements = True
self.supports_unicode_binds = True
self._cx_oracle_with_unicode = True
if util.py2k:
# There's really no reason to run with WITH_UNICODE under Python 2.x.
# Give the user a hint.
util.warn(
"cx_Oracle is compiled under Python 2.xx using the "
"WITH_UNICODE flag. Consider recompiling cx_Oracle "
"without this flag, which is in no way necessary for full "
"support of Unicode. Otherwise, all string-holding bind "
"parameters must be explicitly typed using SQLAlchemy's "
"String type or one of its subtypes,"
"or otherwise be passed as Python unicode. "
"Plain Python strings passed as bind parameters will be "
"silently corrupted by cx_Oracle."
)
self.execution_ctx_cls = \
OracleExecutionContext_cx_oracle_with_unicode
else:
self._cx_oracle_with_unicode = False
if self.cx_oracle_ver is None or \
not self.auto_convert_lobs or \
not hasattr(self.dbapi, 'CLOB'):
self.dbapi_type_map = {}
else:
# only use this for LOB objects. using it for strings, dates
# etc. leads to a little too much magic, reflection doesn't know if it should
# expect encoded strings or unicodes, etc.
self.dbapi_type_map = {
self.dbapi.CLOB: oracle.CLOB(),
self.dbapi.NCLOB: oracle.NCLOB(),
self.dbapi.BLOB: oracle.BLOB(),
self.dbapi.BINARY: oracle.RAW(),
}
@classmethod
def dbapi(cls):
import cx_Oracle
return cx_Oracle
def initialize(self, connection):
super(OracleDialect_cx_oracle, self).initialize(connection)
if self._is_oracle_8:
self.supports_unicode_binds = False
self._detect_decimal_char(connection)
def _detect_decimal_char(self, connection):
"""detect if the decimal separator character is not '.', as
is the case with European locale settings for NLS_LANG.
cx_oracle itself uses similar logic when it formats Python
Decimal objects to strings on the bind side (as of 5.0.3),
as Oracle sends/receives string numerics only in the
current locale.
"""
if self.cx_oracle_ver < (5,):
# no output type handlers before version 5
return
cx_Oracle = self.dbapi
conn = connection.connection
# override the output_type_handler that's
# on the cx_oracle connection with a plain
# one on the cursor
def output_type_handler(cursor, name, defaultType,
size, precision, scale):
return cursor.var(
cx_Oracle.STRING,
255, arraysize=cursor.arraysize)
cursor = conn.cursor()
cursor.outputtypehandler = output_type_handler
cursor.execute("SELECT 0.1 FROM DUAL")
val = cursor.fetchone()[0]
cursor.close()
char = re.match(r"([\.,])", val).group(1)
if char != '.':
_detect_decimal = self._detect_decimal
self._detect_decimal = \
lambda value: _detect_decimal(value.replace(char, '.'))
self._to_decimal = \
lambda value: decimal.Decimal(value.replace(char, '.'))
def _detect_decimal(self, value):
if "." in value:
return decimal.Decimal(value)
else:
return int(value)
_to_decimal = decimal.Decimal
def on_connect(self):
if self.cx_oracle_ver < (5,):
# no output type handlers before version 5
return
cx_Oracle = self.dbapi
def output_type_handler(cursor, name, defaultType,
size, precision, scale):
# convert all NUMBER with precision + positive scale to Decimal
# this almost allows "native decimal" mode.
if self.supports_native_decimal and \
defaultType == cx_Oracle.NUMBER and \
precision and scale > 0:
return cursor.var(
cx_Oracle.STRING,
255,
outconverter=self._to_decimal,
arraysize=cursor.arraysize)
# if NUMBER with zero precision and 0 or neg scale, this appears
# to indicate "ambiguous". Use a slower converter that will
# make a decision based on each value received - the type
# may change from row to row (!). This kills
# off "native decimal" mode, handlers still needed.
elif self.supports_native_decimal and \
defaultType == cx_Oracle.NUMBER \
and not precision and scale <= 0:
return cursor.var(
cx_Oracle.STRING,
255,
outconverter=self._detect_decimal,
arraysize=cursor.arraysize)
# allow all strings to come back natively as Unicode
elif self.coerce_to_unicode and \
defaultType in (cx_Oracle.STRING, cx_Oracle.FIXED_CHAR):
return cursor.var(util.text_type, size, cursor.arraysize)
def on_connect(conn):
conn.outputtypehandler = output_type_handler
return on_connect
def create_connect_args(self, url):
dialect_opts = dict(url.query)
for opt in ('use_ansi', 'auto_setinputsizes', 'auto_convert_lobs',
'threaded', 'allow_twophase'):
if opt in dialect_opts:
util.coerce_kw_type(dialect_opts, opt, bool)
setattr(self, opt, dialect_opts[opt])
if url.database:
# if we have a database, then we have a remote host
port = url.port
if port:
port = int(port)
else:
port = 1521
dsn = self.dbapi.makedsn(url.host, port, url.database)
else:
# we have a local tnsname
dsn = url.host
opts = dict(
user=url.username,
password=url.password,
dsn=dsn,
threaded=self.threaded,
twophase=self.allow_twophase,
)
if util.py2k:
if self._cx_oracle_with_unicode:
for k, v in opts.items():
if isinstance(v, str):
opts[k] = unicode(v)
else:
for k, v in opts.items():
if isinstance(v, unicode):
opts[k] = str(v)
if 'mode' in url.query:
opts['mode'] = url.query['mode']
if isinstance(opts['mode'], util.string_types):
mode = opts['mode'].upper()
if mode == 'SYSDBA':
opts['mode'] = self.dbapi.SYSDBA
elif mode == 'SYSOPER':
opts['mode'] = self.dbapi.SYSOPER
else:
util.coerce_kw_type(opts, 'mode', int)
return ([], opts)
def _get_server_version_info(self, connection):
return tuple(
int(x)
for x in connection.connection.version.split('.')
)
def is_disconnect(self, e, connection, cursor):
error, = e.args
if isinstance(e, self.dbapi.InterfaceError):
return "not connected" in str(e)
elif hasattr(error, 'code'):
# ORA-00028: your session has been killed
# ORA-03114: not connected to ORACLE
# ORA-03113: end-of-file on communication channel
# ORA-03135: connection lost contact
# ORA-01033: ORACLE initialization or shutdown in progress
# ORA-02396: exceeded maximum idle time, please connect again
# TODO: Others ?
return error.code in (28, 3114, 3113, 3135, 1033, 2396)
else:
return False
def create_xid(self):
"""create a two-phase transaction ID.
this id will be passed to do_begin_twophase(), do_rollback_twophase(),
do_commit_twophase(). its format is unspecified."""
id = random.randint(0, 2 ** 128)
return (0x1234, "%032x" % id, "%032x" % 9)
def do_executemany(self, cursor, statement, parameters, context=None):
if isinstance(parameters, tuple):
parameters = list(parameters)
cursor.executemany(statement, parameters)
def do_begin_twophase(self, connection, xid):
connection.connection.begin(*xid)
def do_prepare_twophase(self, connection, xid):
result = connection.connection.prepare()
connection.info['cx_oracle_prepared'] = result
def do_rollback_twophase(self, connection, xid, is_prepared=True,
recover=False):
self.do_rollback(connection.connection)
def do_commit_twophase(self, connection, xid, is_prepared=True,
recover=False):
if not is_prepared:
self.do_commit(connection.connection)
else:
oci_prepared = connection.info['cx_oracle_prepared']
if oci_prepared:
self.do_commit(connection.connection)
def do_recover_twophase(self, connection):
connection.info.pop('cx_oracle_prepared', None)
dialect = OracleDialect_cx_oracle
| |
from django.db import models
from django.test import TestCase
from django.utils import timezone
from analytics.lib.counts import CountStat, COUNT_STATS, process_count_stat, \
zerver_count_user_by_realm, zerver_count_message_by_user, \
zerver_count_message_by_stream, zerver_count_stream_by_realm, \
do_fill_count_stat_at_hour, ZerverCountQuery
from analytics.models import BaseCount, InstallationCount, RealmCount, \
UserCount, StreamCount, FillState, get_fill_state, installation_epoch
from zerver.models import Realm, UserProfile, Message, Stream, Recipient, \
get_user_profile_by_email, get_client
from datetime import datetime, timedelta
from typing import Any, Type, Optional
from six import text_type
class AnalyticsTestCase(TestCase):
MINUTE = timedelta(seconds = 60)
HOUR = MINUTE * 60
DAY = HOUR * 24
TIME_ZERO = datetime(2042, 3, 14).replace(tzinfo=timezone.utc)
TIME_LAST_HOUR = TIME_ZERO - HOUR
def setUp(self):
# type: () -> None
self.default_realm = Realm.objects.create(
string_id='realmtest', name='Realm Test',
domain='analytics.test', date_created=self.TIME_ZERO - 2*self.DAY)
# Lightweight creation of users, streams, and messages
def create_user(self, email, **kwargs):
# type: (str, **Any) -> UserProfile
defaults = {
'date_joined': self.TIME_LAST_HOUR,
'full_name': 'full_name',
'short_name': 'short_name',
'pointer': -1,
'last_pointer_updater': 'seems unused?',
'realm': self.default_realm,
'api_key': '42'}
for key, value in defaults.items():
kwargs[key] = kwargs.get(key, value)
return UserProfile.objects.create(email=email, **kwargs)
def create_stream(self, **kwargs):
# type: (**Any) -> Stream
defaults = {'name': 'stream name',
'realm': self.default_realm,
'date_created': self.TIME_LAST_HOUR}
for key, value in defaults.items():
kwargs[key] = kwargs.get(key, value)
return Stream.objects.create(**kwargs)
def create_message(self, sender, recipient, **kwargs):
# type: (UserProfile, Recipient, **Any) -> Message
defaults = {
'sender': sender,
'recipient': recipient,
'subject': 'subject',
'content': 'hi',
'pub_date': self.TIME_LAST_HOUR,
'sending_client': get_client("website")}
for key, value in defaults.items():
kwargs[key] = kwargs.get(key, value)
return Message.objects.create(**kwargs)
# Note that this doesn't work for InstallationCount, since InstallationCount has no realm_id
# kwargs should only ever be a UserProfile or Stream.
def assertCountEquals(self, table, property, value, end_time = TIME_ZERO, interval = CountStat.HOUR,
realm = None, **kwargs):
# type: (Type[BaseCount], text_type, int, datetime, str, Optional[Realm], **models.Model) -> None
if realm is None:
realm = self.default_realm
self.assertEqual(table.objects.filter(realm=realm,
property=property,
interval=interval,
end_time=end_time)
.filter(**kwargs).values_list('value', flat=True)[0],
value)
# Tests manangement commands, backfilling, adding new stats, etc
class TestUpdateAnalyticsCounts(AnalyticsTestCase):
def test_analytics_stat_write(self):
# type: () -> None
# might change if we refactor count_query
stat = CountStat('test_stat_write', zerver_count_stream_by_realm,
{'invite_only': False}, None, CountStat.HOUR, False)
# add some stuff to zerver_*
self.create_stream(name='stream1')
self.create_stream(name='stream2')
self.create_stream(name='stream3')
# run do_pull_from_zerver
do_fill_count_stat_at_hour(stat, self.TIME_ZERO)
# check analytics_* values are correct
self.assertCountEquals(RealmCount, 'test_stat_write', 3)
def test_update_analytics_tables(self):
# type: () -> None
stat = CountStat('test_messages_sent', zerver_count_message_by_user, {}, None, CountStat.HOUR, False)
user1 = self.create_user('email1')
user2 = self.create_user('email2')
recipient = Recipient.objects.create(type_id=user2.id, type=Recipient.PERSONAL)
self.create_message(user1, recipient)
# run command
do_fill_count_stat_at_hour(stat, self.TIME_ZERO)
usercount_row = UserCount.objects.filter(realm=self.default_realm, interval=CountStat.HOUR,
property='test_messages_sent').values_list(
'value', flat=True)[0]
assert (usercount_row == 1)
# run command with date before message creation
do_fill_count_stat_at_hour(stat, self.TIME_LAST_HOUR)
# check no earlier rows created, old ones still there
self.assertFalse(UserCount.objects.filter(end_time__lt = self.TIME_LAST_HOUR).exists())
self.assertCountEquals(UserCount, 'test_messages_sent', 1, user = user1)
class TestProcessCountStat(AnalyticsTestCase):
def make_dummy_count_stat(self, current_time):
# type: (datetime) -> CountStat
dummy_query = """INSERT INTO analytics_realmcount (realm_id, property, end_time, interval, value)
VALUES (1, 'test stat', '%(end_time)s','hour', 22)""" % {'end_time': current_time}
count_stat = CountStat('test stat', ZerverCountQuery(Recipient, UserCount, dummy_query),
{}, None, CountStat.HOUR, False)
return count_stat
def assertFillStateEquals(self, end_time, state = FillState.DONE, property = None):
# type: (datetime, int, Optional[text_type]) -> None
count_stat = self.make_dummy_count_stat(end_time)
if property is None:
property = count_stat.property
fill_state = get_fill_state(property)
self.assertEqual(fill_state['end_time'], end_time)
self.assertEqual(fill_state['state'], state)
def test_process_stat(self):
# type: () -> None
# process new stat
current_time = installation_epoch() + self.HOUR
count_stat = self.make_dummy_count_stat(current_time)
process_count_stat(count_stat, current_time)
self.assertFillStateEquals(current_time)
self.assertEqual(InstallationCount.objects.filter(property = count_stat.property,
interval = CountStat.HOUR).count(), 1)
# dirty stat
FillState.objects.filter(property=count_stat.property).update(state=FillState.STARTED)
process_count_stat(count_stat, current_time)
self.assertFillStateEquals(current_time)
self.assertEqual(InstallationCount.objects.filter(property = count_stat.property,
interval = CountStat.HOUR).count(), 1)
# clean stat, no update
process_count_stat(count_stat, current_time)
self.assertFillStateEquals(current_time)
self.assertEqual(InstallationCount.objects.filter(property = count_stat.property,
interval = CountStat.HOUR).count(), 1)
# clean stat, with update
current_time = current_time + self.HOUR
count_stat = self.make_dummy_count_stat(current_time)
process_count_stat(count_stat, current_time)
self.assertFillStateEquals(current_time)
self.assertEqual(InstallationCount.objects.filter(property = count_stat.property,
interval = CountStat.HOUR).count(), 2)
# test users added in last hour
def test_add_new_users(self):
# type: () -> None
stat = CountStat('add_new_user_test', zerver_count_user_by_realm, {}, None, CountStat.HOUR, False)
# add new users to realm in last hour
self.create_user('email1')
self.create_user('email2')
# add a new user before an hour
self.create_user('email3', date_joined=self.TIME_ZERO - 2*self.HOUR)
# check if user added before the hour is not included
do_fill_count_stat_at_hour(stat, self.TIME_ZERO)
# do_update is writing the stat.property to all zerver tables
self.assertCountEquals(RealmCount, 'add_new_user_test', 2)
def test_count_before_realm_creation(self):
# type: () -> None
stat = CountStat('test_active_humans', zerver_count_user_by_realm,
{'is_bot': False, 'is_active': True}, None, CountStat.HOUR, False)
realm = Realm.objects.create(string_id='string_id', name='name', domain='domain',
date_created=self.TIME_ZERO)
self.create_user('email', realm=realm)
# run count prior to realm creation
do_fill_count_stat_at_hour(stat, self.TIME_LAST_HOUR)
self.assertFalse(RealmCount.objects.filter(realm=realm).exists())
def test_empty_counts_in_realm(self):
# type: () -> None
# test that rows with empty counts are returned if realm exists
stat = CountStat('test_active_humans', zerver_count_user_by_realm,
{'is_bot': False, 'is_active': True}, None, CountStat.HOUR, False)
do_fill_count_stat_at_hour(stat, self.TIME_ZERO)
self.assertFalse(RealmCount.objects.filter(realm=self.default_realm).exists())
def test_empty_message_aggregates(self):
# type: () -> None
# test that we write empty rows to realmcount in the event that we
# have no messages and no users
stat = COUNT_STATS['messages_sent']
do_fill_count_stat_at_hour(stat, self.TIME_ZERO)
self.assertFalse(RealmCount.objects.filter(realm=self.default_realm).exists())
class TestAggregates(AnalyticsTestCase):
pass
class TestXByYQueries(AnalyticsTestCase):
def test_message_to_stream_aggregation(self):
# type: () -> None
stat = CountStat('test_messages_to_stream', zerver_count_message_by_stream, {}, None, CountStat.HOUR, False)
# write some messages
user = self.create_user('email')
stream = self.create_stream(date_created=self.TIME_ZERO - 2*self.HOUR)
recipient = Recipient(type_id=stream.id, type=Recipient.STREAM)
recipient.save()
self.create_message(user, recipient = recipient)
# run command
do_fill_count_stat_at_hour(stat, self.TIME_ZERO)
self.assertCountEquals(StreamCount, 'test_messages_to_stream', 1)
class TestCountStats(AnalyticsTestCase):
def test_human_and_bot_count_by_realm(self):
# type: () -> None
stats = [
CountStat('test_active_humans', zerver_count_user_by_realm, {'is_bot': False, 'is_active': True}, None,
CountStat.HOUR, False),
CountStat('test_active_bots', zerver_count_user_by_realm, {'is_bot': True, 'is_active': True}, None,
CountStat.HOUR, False)]
self.create_user('email1-bot', is_bot=True)
self.create_user('email2-bot', is_bot=True)
self.create_user('email3-human', is_bot=False)
for stat in stats:
do_fill_count_stat_at_hour(stat, self.TIME_ZERO)
self.assertCountEquals(RealmCount, 'test_active_humans', 1)
self.assertCountEquals(RealmCount, 'test_active_bots', 2)
| |
import numpy as np
from numpy.testing import assert_array_equal
from nose.tools import (assert_equal, assert_raises, raises, assert_true,
assert_false)
from nose.tools import with_setup
try:
from nose.tools import assert_is, assert_is_instance
except ImportError:
from landlab.testing.tools import assert_is, assert_is_instance
from landlab import RasterModelGrid
from landlab import BAD_INDEX_VALUE as X
def setup_grid():
"""
These tests use a grid that 4x5 nodes::
15------16------17------18------19
| | | | |
| | | | |
| | | | |
10------11------12------13------14
| | | | |
| | | | |
| | | | |
5-------6-------7-------8-------9
| | | | |
| | | | |
| | | | |
0-------1-------2-------3-------4
"""
from landlab import RasterModelGrid
globals().update({
'rmg': RasterModelGrid(4, 5, 1.)
})
def test_init_with_kwds_classic():
grid = RasterModelGrid(num_rows=4, num_cols=5, dx=1.)
assert_equal(grid.number_of_node_rows, 4)
assert_equal(grid.number_of_node_columns, 5)
assert_equal(grid.dy, 1)
assert_equal(grid.dx, 1)
grid = RasterModelGrid(3, 7, 2)
assert_equal(grid.number_of_node_rows, 3)
assert_equal(grid.number_of_node_columns, 7)
assert_equal(grid.dy, 2.)
assert_equal(grid.dx, 2.)
def test_init_new_style():
grid = RasterModelGrid((4, 5), spacing=2)
assert_equal(grid.number_of_node_rows, 4)
assert_equal(grid.number_of_node_columns, 5)
assert_equal(grid.dy, 2.)
assert_equal(grid.dx, 2.)
grid = RasterModelGrid((4, 5))
assert_equal(grid.number_of_node_rows, 4)
assert_equal(grid.number_of_node_columns, 5)
assert_equal(grid.dy, 1.)
assert_equal(grid.dx, 1.)
def test_spacing_is_float():
grid = RasterModelGrid((4, 5))
assert_equal(grid.dy, 1.)
assert_is_instance(grid.dy, float)
assert_equal(grid.dx, 1.)
assert_is_instance(grid.dx, float)
grid = RasterModelGrid((4, 5), spacing=2)
assert_equal(grid.dy, 2.)
assert_is_instance(grid.dy, float)
assert_equal(grid.dx, 2.)
assert_is_instance(grid.dx, float)
@with_setup(setup_grid)
def test_grid_dimensions():
"""Test extent of grid with unit spacing."""
assert_equal(rmg.extent[0], rmg.number_of_node_rows - 1)
assert_equal(rmg.extent[1], rmg.number_of_node_columns - 1)
def test_grid_dimensions_non_unit_spacing():
"""Test extent of grid with non-unit spacing."""
rmg = RasterModelGrid((4, 5), spacing=2.)
assert_equal(rmg.extent[0], 6.)
assert_equal(rmg.extent[1], 8.)
@with_setup(setup_grid)
def test_nodes_around_point():
surrounding_ids = rmg.nodes_around_point(2.1, 1.1)
assert_array_equal(surrounding_ids, np.array([7, 12, 13, 8]))
surrounding_ids = rmg.nodes_around_point(2.1, .9)
assert_array_equal(surrounding_ids, np.array([2, 7, 8, 3]))
@with_setup(setup_grid)
def test_neighbor_list_with_scalar_arg():
assert_array_equal(rmg.active_neighbors_at_node[6], np.array([7, 11, 5, 1]))
assert_array_equal(rmg.active_neighbors_at_node[-1], np.array([X, X, X, X]))
assert_array_equal(rmg.active_neighbors_at_node[-2], np.array([X, X, X, 13]))
@with_setup(setup_grid)
def test_neighbor_list_with_array_arg():
assert_array_equal(rmg.active_neighbors_at_node[[6, -1]],
np.array([[7, 11, 5, 1], [X, X, X, X]]))
@with_setup(setup_grid)
def test_neighbor_list_with_no_args():
expected = np.array([
[X, X, X, X], [X, 6, X, X], [X, 7, X, X], [X, 8, X, X],
[X, X, X, X],
[6, X, X, X], [7, 11, 5, 1], [8, 12, 6, 2], [9, 13, 7, 3],
[X, X, 8, X],
[11, X, X, X], [12, 16, 10, 6], [13, 17, 11, 7], [14, 18, 12, 8],
[X, X, 13, X],
[X, X, X, X], [X, X, X, 11], [X, X, X, 12], [X, X, X, 13],
[X, X, X, X]])
assert_array_equal(rmg.active_neighbors_at_node, expected)
@with_setup(setup_grid)
def test_node_x():
assert_array_equal(rmg.node_x, np.array([0., 1., 2., 3., 4.,
0., 1., 2., 3., 4.,
0., 1., 2., 3., 4.,
0., 1., 2., 3., 4.]))
@with_setup(setup_grid)
def test_node_y():
assert_array_equal(rmg.node_y, np.array([0., 0., 0., 0., 0.,
1., 1., 1., 1., 1.,
2., 2., 2., 2., 2.,
3., 3., 3., 3., 3.]))
@with_setup(setup_grid)
@raises(ValueError)
def test_node_x_is_immutable():
rmg.node_x[0] = 0
@with_setup(setup_grid)
@raises(ValueError)
def test_node_y_is_immutable():
rmg.node_y[0] = 0
@with_setup(setup_grid)
def test_node_axis_coordinates():
assert_is(rmg.node_axis_coordinates(axis=0).base, rmg.node_y.base)
assert_is(rmg.node_axis_coordinates(axis=1).base, rmg.node_x.base)
assert_is(rmg.node_axis_coordinates(axis=-1).base, rmg.node_x.base)
assert_is(rmg.node_axis_coordinates(axis=-2).base, rmg.node_y.base)
@with_setup(setup_grid)
def test_diagonal_list():
assert_array_equal(rmg._get_diagonal_list(6), np.array([12, 10, 0, 2]))
assert_array_equal(rmg._get_diagonal_list(-1), np.array([X, X, 13, X]))
assert_array_equal(rmg._get_diagonal_list([6, -1]),
np.array([[12, 10, 0, 2], [X, X, 13, X]]))
assert_array_equal(
rmg._get_diagonal_list(),
np.array([[6, X, X, X], [7, 5, X, X], [8, 6, X, X],
[9, 7, X, X], [X, 8, X, X],
[11, X, X, 1], [12, 10, 0, 2], [13, 11, 1, 3],
[14, 12, 2, 4], [X, 13, 3, X],
[16, X, X, 6], [17, 15, 5, 7], [18, 16, 6, 8],
[19, 17, 7, 9], [X, 18, 8, X],
[X, X, X, 11], [X, X, 10, 12], [X, X, 11, 13],
[X, X, 12, 14], [X, X, 13, X]]))
@with_setup(setup_grid)
def test_diagonal_list_boundary():
assert_array_equal(rmg._get_diagonal_list(0), np.array([6, X, X, X]))
@with_setup(setup_grid)
def test_node_is_core():
for cell_id in [0, 1, 2, 3, 4, 5, 9, 10, 14, 15, 16, 17, 18, 19]:
assert_false(rmg.node_is_core(cell_id))
for cell_id in [6, 7, 8, 11, 12, 13]:
assert_true(rmg.node_is_core(cell_id))
@with_setup(setup_grid)
def test_get_interior_cells():
assert_array_equal(rmg.node_at_core_cell,
np.array([6, 7, 8, 11, 12, 13]))
@with_setup(setup_grid)
def test_active_links():
assert_equal(rmg.number_of_active_links, 17)
assert_array_equal(rmg.active_links,
np.array([ 5, 6, 7,
9, 10, 11, 12,
14, 15, 16,
18, 19, 20, 21,
23, 24, 25]))
#@with_setup(setup_grid)
#def test_active_link_fromnode():
# assert_array_equal(rmg._activelink_fromnode,
# np.array([1, 2, 3, 6, 7, 8, 11, 12, 13,
# 5, 6, 7, 8, 10, 11, 12, 13]))
#
#
#@with_setup(setup_grid)
#def test_active_link_tonode():
# assert_array_equal(rmg._activelink_tonode,
# np.array([6, 7, 8, 11, 12, 13, 16, 17, 18,
# 6, 7, 8, 9, 11, 12, 13, 14]))
@with_setup(setup_grid)
def test_active_link_num_inlink():
assert_array_equal(rmg._node_numactiveinlink,
np.array([0, 0, 0, 0, 0,
0, 2, 2, 2, 1,
0, 2, 2, 2, 1,
0, 1, 1, 1, 0]))
@with_setup(setup_grid)
def test_active_link_num_outlink():
assert_array_equal(rmg._node_numactiveoutlink, np.array([0, 1, 1, 1, 0,
1, 2, 2, 2, 0,
1, 2, 2, 2, 0,
0, 0, 0, 0, 0]))
@with_setup(setup_grid)
def test_active_inlink_matrix():
assert_array_equal(rmg._node_active_inlink_matrix,
np.array([[-1, -1, -1, -1, -1,
-1, 0, 1, 2, -1,
-1, 3, 4, 5, -1,
-1, 6, 7, 8, -1],
[-1, -1, -1, -1, -1,
-1, 9, 10, 11, 12,
-1, 13, 14, 15, 16,
-1, -1, -1, -1, -1]]))
@with_setup(setup_grid)
def test_active_outlink_matrix():
assert_array_equal(
rmg._node_active_outlink_matrix,
np.array([[-1, 0, 1, 2, -1,
-1, 3, 4, 5, -1,
-1, 6, 7, 8, -1,
-1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1,
9, 10, 11, 12, -1,
13, 14, 15, 16, -1,
-1, -1, -1, -1, -1]]))
@with_setup(setup_grid)
def test__active_links_at_node_scalar_interior():
assert_array_equal(rmg._active_links_at_node([6]),
np.array([[5, 9, 14, 10]]).T)
@with_setup(setup_grid)
def test__active_links_at_node_scalar_boundary():
assert_array_equal(rmg._active_links_at_node([1]),
np.array([[-1, -1, 5, -1]]).T)
@with_setup(setup_grid)
def test_active_node_with_array_arg():
assert_array_equal(rmg._active_links_at_node([6, 7]),
np.array([[5, 9, 14, 10],
[6, 10, 15, 11]]).T)
@with_setup(setup_grid)
def test__active_links_at_node_with_no_args():
assert_array_equal(
rmg._active_links_at_node(),
np.array([[-1, -1, -1, -1, -1, -1, 5, 6, 7, -1,
-1, 14, 15, 16, -1, -1, 23, 24, 25, -1],
[-1, -1, -1, -1, -1, -1, 9, 10, 11, 12,
-1, 18, 19, 20, 21, -1, -1, -1, -1, -1],
[-1, 5, 6, 7, -1, -1, 14, 15, 16, -1,
-1, 23, 24, 25, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, 9, 10, 11, 12, -1,
18, 19, 20, 21, -1, -1, -1, -1, -1, -1]]))
@with_setup(setup_grid)
def test_node_at_link_tail():
assert_array_equal(
rmg.node_at_link_tail,
np.array([0, 1, 2, 3,
0, 1, 2, 3, 4,
5, 6, 7, 8,
5, 6, 7, 8, 9,
10, 11, 12, 13,
10, 11, 12, 13, 14,
15, 16, 17, 18]))
@with_setup(setup_grid)
def test_node_at_link_head():
assert_array_equal(
rmg.node_at_link_head,
np.array([ 1, 2, 3, 4,
5, 6, 7, 8, 9,
6, 7, 8, 9,
10, 11, 12, 13, 14,
11, 12, 13, 14,
15, 16, 17, 18, 19,
16, 17, 18, 19]))
@with_setup(setup_grid)
def test_link_num_inlink():
assert_array_equal(rmg._node_numinlink,
np.array([0, 1, 1, 1, 1,
1, 2, 2, 2, 2,
1, 2, 2, 2, 2,
1, 2, 2, 2, 2]))
@with_setup(setup_grid)
def test_link_num_outlink():
assert_array_equal(rmg._node_numoutlink, np.array([2, 2, 2, 2, 1,
2, 2, 2, 2, 1,
2, 2, 2, 2, 1,
1, 1, 1, 1, 0]))
@with_setup(setup_grid)
def test__node_inlink_matrix():
assert_array_equal(rmg._node_inlink_matrix,
np.array([[-1, -1, -1, -1, -1,
4, 5, 6, 7, 8,
13, 14, 15, 16, 17,
22, 23, 24, 25, 26],
[-1, 0, 1, 2, 3,
-1, 9, 10, 11, 12,
-1, 18, 19, 20, 21,
-1, 27, 28, 29, 30]]))
@with_setup(setup_grid)
def test__node_outlink_matrix():
assert_array_equal(rmg._node_outlink_matrix,
np.array([[ 4, 5, 6, 7, 8,
13, 14, 15, 16, 17,
22, 23, 24, 25, 26,
-1, -1, -1, -1, -1],
[ 0, 1, 2, 3, -1,
9, 10, 11, 12, -1,
18, 19, 20, 21, -1,
27, 28, 29, 30, -1]]))
@with_setup(setup_grid)
def test_links_at_node_with_scalar_interior():
assert_array_equal(rmg.links_at_node[6],
np.array([10, 14, 9, 5]))
@with_setup(setup_grid)
def test_links_at_node_with_scalar_boundary():
assert_array_equal(rmg.links_at_node[1], np.array([1, 5, 0, -1]))
@with_setup(setup_grid)
def test_links_at_node_with_array_arg():
assert_array_equal(rmg.links_at_node[6:8],
np.array([[10, 14, 9, 5], [11, 15, 10, 6]]))
@with_setup(setup_grid)
def test_links_at_node_with_no_args():
assert_array_equal(
rmg.links_at_node,
np.array([[ 0, 4, -1, -1],
[ 1, 5, 0, -1],
[ 2, 6, 1, -1],
[ 3, 7, 2, -1],
[-1, 8, 3, -1],
[ 9, 13, -1, 4],
[10, 14, 9, 5],
[11, 15, 10, 6],
[12, 16, 11, 7],
[-1, 17, 12, 8],
[18, 22, -1, 13],
[19, 23, 18, 14],
[20, 24, 19, 15],
[21, 25, 20, 16],
[-1, 26, 21, 17],
[27, -1, -1, 22],
[28, -1, 27, 23],
[29, -1, 28, 24],
[30, -1, 29, 25],
[-1, -1, 30, 26]]))
@with_setup(setup_grid)
def test_face_at_link():
assert_array_equal(rmg.face_at_link,
np.array([X, X, X, X,
X, 0, 1, 2, X,
3, 4, 5, 6,
X, 7, 8, 9, X,
10, 11, 12, 13,
X, 14, 15, 16, X,
X, X, X, X]))
@with_setup(setup_grid)
def test_grid_coords_to_node_id_with_scalar():
assert_equal(rmg.grid_coords_to_node_id(3, 4), 19)
@with_setup(setup_grid)
def test_grid_coords_to_node_id_with_array():
assert_array_equal(rmg.grid_coords_to_node_id((3, 2), (4, 1)),
np.array([19, 11]))
@with_setup(setup_grid)
def test_grid_coords_to_node_id_outside_of_grid():
assert_raises(ValueError, rmg.grid_coords_to_node_id, 5, 0)
@with_setup(setup_grid)
def test_create_diagonal_list():
rmg._create_diagonal_list()
assert_array_equal(
rmg._get_diagonal_list(),
np.array([[6, X, X, X], [7, 5, X, X], [8, 6, X, X],
[9, 7, X, X], [X, 8, X, X],
[11, X, X, 1], [12, 10, 0, 2], [13, 11, 1, 3],
[14, 12, 2, 4], [X, 13, 3, X],
[16, X, X, 6], [17, 15, 5, 7], [18, 16, 6, 8],
[19, 17, 7, 9], [X, 18, 8, X],
[X, X, X, 11], [X, X, 10, 12], [X, X, 11, 13],
[X, X, 12, 14], [X, X, 13, X]]))
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Piston Cloud Computing, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
SQLAlchemy models for nova data.
"""
from sqlalchemy import Column, Index, Integer, BigInteger, Enum, String, schema
from sqlalchemy.dialects.mysql import MEDIUMTEXT
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import ForeignKey, DateTime, Boolean, Text, Float
from sqlalchemy.orm import relationship, backref, object_mapper
from oslo.config import cfg
from nova.db.sqlalchemy import types
from nova.openstack.common.db.sqlalchemy import models
from nova.openstack.common import timeutils
CONF = cfg.CONF
BASE = declarative_base()
def MediumText():
return Text().with_variant(MEDIUMTEXT(), 'mysql')
class NovaBase(models.SoftDeleteMixin,
models.TimestampMixin,
models.ModelBase):
metadata = None
class Service(BASE, NovaBase):
"""Represents a running service on a host."""
__tablename__ = 'services'
__table_args__ = (
schema.UniqueConstraint("host", "topic", "deleted",
name="uniq_services0host0topic0deleted"),
schema.UniqueConstraint("host", "binary", "deleted",
name="uniq_services0host0binary0deleted")
)
id = Column(Integer, primary_key=True)
host = Column(String(255)) # , ForeignKey('hosts.id'))
binary = Column(String(255))
topic = Column(String(255))
report_count = Column(Integer, nullable=False, default=0)
disabled = Column(Boolean, default=False)
disabled_reason = Column(String(255))
class ComputeNode(BASE, NovaBase):
"""Represents a running compute service on a host."""
__tablename__ = 'compute_nodes'
__table_args__ = ()
id = Column(Integer, primary_key=True)
service_id = Column(Integer, ForeignKey('services.id'), nullable=False)
service = relationship(Service,
backref=backref('compute_node'),
foreign_keys=service_id,
primaryjoin='and_('
'ComputeNode.service_id == Service.id,'
'ComputeNode.deleted == 0)')
vcpus = Column(Integer, nullable=False)
memory_mb = Column(Integer, nullable=False)
local_gb = Column(Integer, nullable=False)
vcpus_used = Column(Integer, nullable=False)
memory_mb_used = Column(Integer, nullable=False)
local_gb_used = Column(Integer, nullable=False)
hypervisor_type = Column(MediumText(), nullable=False)
hypervisor_version = Column(Integer, nullable=False)
hypervisor_hostname = Column(String(255))
hypervisor_qos = Column(String(255))
# Free Ram, amount of activity (resize, migration, boot, etc) and
# the number of running VM's are a good starting point for what's
# important when making scheduling decisions.
free_ram_mb = Column(Integer)
free_disk_gb = Column(Integer)
current_workload = Column(Integer)
running_vms = Column(Integer)
# Note(masumotok): Expected Strings example:
#
# '{"arch":"x86_64",
# "model":"Nehalem",
# "topology":{"sockets":1, "threads":2, "cores":3},
# "features":["tdtscp", "xtpr"]}'
#
# Points are "json translatable" and it must have all dictionary keys
# above, since it is copied from <cpu> tag of getCapabilities()
# (See libvirt.virtConnection).
cpu_info = Column(MediumText(), nullable=False)
disk_available_least = Column(Integer)
host_ip = Column(types.IPAddress())
supported_instances = Column(Text)
# Note(yongli): json string PCI Stats
# '{"vendor_id":"8086", "product_id":"1234", "count":3 }'
pci_stats = Column(Text)
class ComputeNodeStat(BASE, NovaBase):
"""Stats related to the current workload of a compute host that are
intended to aid in making scheduler decisions.
"""
__tablename__ = 'compute_node_stats'
__table_args__ = (
Index('ix_compute_node_stats_compute_node_id', 'compute_node_id'),
Index('compute_node_stats_node_id_and_deleted_idx',
'compute_node_id', 'deleted')
)
id = Column(Integer, primary_key=True)
key = Column(String(255), nullable=False)
value = Column(String(255))
compute_node_id = Column(Integer, ForeignKey('compute_nodes.id'),
nullable=False)
primary_join = ('and_(ComputeNodeStat.compute_node_id == '
'ComputeNode.id, ComputeNodeStat.deleted == 0)')
stats = relationship("ComputeNode", backref="stats",
primaryjoin=primary_join)
def __str__(self):
return "{%d: %s = %s}" % (self.compute_node_id, self.key, self.value)
class Certificate(BASE, NovaBase):
"""Represents a x509 certificate."""
__tablename__ = 'certificates'
__table_args__ = (
Index('certificates_project_id_deleted_idx', 'project_id', 'deleted'),
Index('certificates_user_id_deleted_idx', 'user_id', 'deleted')
)
id = Column(Integer, primary_key=True)
user_id = Column(String(255))
project_id = Column(String(255))
file_name = Column(String(255))
class Instance(BASE, NovaBase):
"""Represents a guest VM."""
__tablename__ = 'instances'
__table_args__ = (
Index('uuid', 'uuid', unique=True),
Index('project_id', 'project_id'),
Index('instances_host_deleted_idx',
'host', 'deleted'),
Index('instances_reservation_id_idx',
'reservation_id'),
Index('instances_terminated_at_launched_at_idx',
'terminated_at', 'launched_at'),
Index('instances_uuid_deleted_idx',
'uuid', 'deleted'),
Index('instances_task_state_updated_at_idx',
'task_state', 'updated_at'),
Index('instances_host_node_deleted_idx',
'host', 'node', 'deleted'),
Index('instances_host_deleted_cleaned_idx',
'host', 'deleted', 'cleaned'),
)
injected_files = []
id = Column(Integer, primary_key=True, autoincrement=True)
@property
def name(self):
try:
base_name = CONF.instance_name_template % self.id
except TypeError:
# Support templates like "uuid-%(uuid)s", etc.
info = {}
# NOTE(russellb): Don't use self.iteritems() here, as it will
# result in infinite recursion on the name property.
for column in iter(object_mapper(self).columns):
key = column.name
# prevent recursion if someone specifies %(name)s
# %(name)s will not be valid.
if key == 'name':
continue
info[key] = self[key]
try:
base_name = CONF.instance_name_template % info
except KeyError:
base_name = self.uuid
return base_name
def _extra_keys(self):
return ['name']
user_id = Column(String(255))
project_id = Column(String(255))
image_ref = Column(String(255))
kernel_id = Column(String(255))
ramdisk_id = Column(String(255))
hostname = Column(String(255))
launch_index = Column(Integer)
key_name = Column(String(255))
key_data = Column(MediumText())
power_state = Column(Integer)
vm_state = Column(String(255))
task_state = Column(String(255))
memory_mb = Column(Integer)
vcpus = Column(Integer)
root_gb = Column(Integer)
ephemeral_gb = Column(Integer)
# This is not related to hostname, above. It refers
# to the nova node.
host = Column(String(255)) # , ForeignKey('hosts.id'))
# To identify the "ComputeNode" which the instance resides in.
# This equals to ComputeNode.hypervisor_hostname.
node = Column(String(255))
# *not* flavorid, this is the internal primary_key
instance_type_id = Column(Integer)
user_data = Column(MediumText())
reservation_id = Column(String(255))
scheduled_at = Column(DateTime)
launched_at = Column(DateTime)
terminated_at = Column(DateTime)
availability_zone = Column(String(255))
# User editable field for display in user-facing UIs
display_name = Column(String(255))
display_description = Column(String(255))
# To remember on which host an instance booted.
# An instance may have moved to another host by live migration.
launched_on = Column(MediumText())
# NOTE(jdillaman): locked deprecated in favor of locked_by,
# to be removed in Icehouse
locked = Column(Boolean)
locked_by = Column(Enum('owner', 'admin'))
os_type = Column(String(255))
architecture = Column(String(255))
vm_mode = Column(String(255))
uuid = Column(String(36))
root_device_name = Column(String(255))
default_ephemeral_device = Column(String(255))
default_swap_device = Column(String(255))
config_drive = Column(String(255))
# User editable field meant to represent what ip should be used
# to connect to the instance
access_ip_v4 = Column(types.IPAddress())
access_ip_v6 = Column(types.IPAddress())
auto_disk_config = Column(Boolean())
progress = Column(Integer)
# EC2 instance_initiated_shutdown_terminate
# True: -> 'terminate'
# False: -> 'stop'
# Note(maoy): currently Nova will always stop instead of terminate
# no matter what the flag says. So we set the default to False.
shutdown_terminate = Column(Boolean(), default=False)
# EC2 disable_api_termination
disable_terminate = Column(Boolean(), default=False)
# OpenStack compute cell name. This will only be set at the top of
# the cells tree and it'll be a full cell name such as 'api!hop1!hop2'
cell_name = Column(String(255))
internal_id = Column(Integer)
# Records whether an instance has been deleted from disk
cleaned = Column(Integer, default=0)
class InstanceInfoCache(BASE, NovaBase):
"""
Represents a cache of information about an instance
"""
__tablename__ = 'instance_info_caches'
__table_args__ = (
schema.UniqueConstraint(
"instance_uuid",
name="uniq_instance_info_caches0instance_uuid"),)
id = Column(Integer, primary_key=True, autoincrement=True)
# text column used for storing a json object of network data for api
network_info = Column(MediumText())
instance_uuid = Column(String(36), ForeignKey('instances.uuid'),
nullable=False)
instance = relationship(Instance,
backref=backref('info_cache', uselist=False),
foreign_keys=instance_uuid,
primaryjoin=instance_uuid == Instance.uuid)
class InstanceTypes(BASE, NovaBase):
"""Represents possible flavors for instances.
Note: instance_type and flavor are synonyms and the term instance_type is
deprecated and in the process of being removed.
"""
__tablename__ = "instance_types"
__table_args__ = (
schema.UniqueConstraint("flavorid", "deleted",
name="uniq_instance_types0flavorid0deleted"),
schema.UniqueConstraint("name", "deleted",
name="uniq_instance_types0name0deleted")
)
# Internal only primary key/id
id = Column(Integer, primary_key=True)
name = Column(String(255))
memory_mb = Column(Integer, nullable=False)
vcpus = Column(Integer, nullable=False)
root_gb = Column(Integer)
ephemeral_gb = Column(Integer)
# Public facing id will be renamed public_id
flavorid = Column(String(255))
swap = Column(Integer, nullable=False, default=0)
rxtx_factor = Column(Float, default=1)
vcpu_weight = Column(Integer)
disabled = Column(Boolean, default=False)
is_public = Column(Boolean, default=True)
class Volume(BASE, NovaBase):
"""Represents a block storage device that can be attached to a VM."""
__tablename__ = 'volumes'
__table_args__ = (
Index('volumes_instance_uuid_idx', 'instance_uuid'),
)
id = Column(String(36), primary_key=True, nullable=False)
deleted = Column(String(36), default="")
@property
def name(self):
return CONF.volume_name_template % self.id
ec2_id = Column(String(255))
user_id = Column(String(255))
project_id = Column(String(255))
snapshot_id = Column(String(36))
host = Column(String(255))
size = Column(Integer)
availability_zone = Column(String(255))
instance_uuid = Column(String(36))
mountpoint = Column(String(255))
attach_time = Column(DateTime)
status = Column(String(255)) # TODO(vish): enum?
attach_status = Column(String(255)) # TODO(vish): enum
scheduled_at = Column(DateTime)
launched_at = Column(DateTime)
terminated_at = Column(DateTime)
display_name = Column(String(255))
display_description = Column(String(255))
provider_location = Column(String(256))
provider_auth = Column(String(256))
volume_type_id = Column(Integer)
class Quota(BASE, NovaBase):
"""Represents a single quota override for a project.
If there is no row for a given project id and resource, then the
default for the quota class is used. If there is no row for a
given quota class and resource, then the default for the
deployment is used. If the row is present but the hard limit is
Null, then the resource is unlimited.
"""
__tablename__ = 'quotas'
__table_args__ = (
schema.UniqueConstraint("project_id", "resource", "deleted",
name="uniq_quotas0project_id0resource0deleted"
),
)
id = Column(Integer, primary_key=True)
project_id = Column(String(255))
resource = Column(String(255), nullable=False)
hard_limit = Column(Integer)
class ProjectUserQuota(BASE, NovaBase):
"""Represents a single quota override for a user with in a project."""
__tablename__ = 'project_user_quotas'
uniq_name = "uniq_project_user_quotas0user_id0project_id0resource0deleted"
__table_args__ = (
schema.UniqueConstraint("user_id", "project_id", "resource", "deleted",
name=uniq_name),
Index('project_user_quotas_project_id_deleted_idx',
'project_id', 'deleted'),
Index('project_user_quotas_user_id_deleted_idx',
'user_id', 'deleted')
)
id = Column(Integer, primary_key=True, nullable=False)
project_id = Column(String(255), nullable=False)
user_id = Column(String(255), nullable=False)
resource = Column(String(255), nullable=False)
hard_limit = Column(Integer)
class QuotaClass(BASE, NovaBase):
"""Represents a single quota override for a quota class.
If there is no row for a given quota class and resource, then the
default for the deployment is used. If the row is present but the
hard limit is Null, then the resource is unlimited.
"""
__tablename__ = 'quota_classes'
__table_args__ = (
Index('ix_quota_classes_class_name', 'class_name'),
)
id = Column(Integer, primary_key=True)
class_name = Column(String(255))
resource = Column(String(255))
hard_limit = Column(Integer)
class QuotaUsage(BASE, NovaBase):
"""Represents the current usage for a given resource."""
__tablename__ = 'quota_usages'
__table_args__ = (
Index('ix_quota_usages_project_id', 'project_id'),
)
id = Column(Integer, primary_key=True)
project_id = Column(String(255))
user_id = Column(String(255))
resource = Column(String(255), nullable=False)
in_use = Column(Integer, nullable=False)
reserved = Column(Integer, nullable=False)
@property
def total(self):
return self.in_use + self.reserved
until_refresh = Column(Integer)
class Reservation(BASE, NovaBase):
"""Represents a resource reservation for quotas."""
__tablename__ = 'reservations'
__table_args__ = (
Index('ix_reservations_project_id', 'project_id'),
Index('reservations_uuid_idx', 'uuid'),
)
id = Column(Integer, primary_key=True, nullable=False)
uuid = Column(String(36), nullable=False)
usage_id = Column(Integer, ForeignKey('quota_usages.id'), nullable=False)
project_id = Column(String(255))
user_id = Column(String(255))
resource = Column(String(255))
delta = Column(Integer, nullable=False)
expire = Column(DateTime)
usage = relationship(
"QuotaUsage",
foreign_keys=usage_id,
primaryjoin='and_(Reservation.usage_id == QuotaUsage.id,'
'QuotaUsage.deleted == 0)')
class Snapshot(BASE, NovaBase):
"""Represents a block storage device that can be attached to a VM."""
__tablename__ = 'snapshots'
__table_args__ = ()
id = Column(String(36), primary_key=True, nullable=False)
deleted = Column(String(36), default="")
@property
def name(self):
return CONF.snapshot_name_template % self.id
@property
def volume_name(self):
return CONF.volume_name_template % self.volume_id
user_id = Column(String(255))
project_id = Column(String(255))
volume_id = Column(String(36), nullable=False)
status = Column(String(255))
progress = Column(String(255))
volume_size = Column(Integer)
scheduled_at = Column(DateTime)
display_name = Column(String(255))
display_description = Column(String(255))
class BlockDeviceMapping(BASE, NovaBase):
"""Represents block device mapping that is defined by EC2."""
__tablename__ = "block_device_mapping"
__table_args__ = (
Index('snapshot_id', 'snapshot_id'),
Index('volume_id', 'volume_id'),
Index('block_device_mapping_instance_uuid_device_name_idx',
'instance_uuid', 'device_name'),
Index('block_device_mapping_instance_uuid_volume_id_idx',
'instance_uuid', 'volume_id'),
Index('block_device_mapping_instance_uuid_idx', 'instance_uuid'),
#TODO(sshturm) Should be dropped. `virtual_name` was dropped
#in 186 migration,
#Duplicates `block_device_mapping_instance_uuid_device_name_idx` index.
Index("block_device_mapping_instance_uuid_virtual_name"
"_device_name_idx", 'instance_uuid', 'device_name'),
)
id = Column(Integer, primary_key=True, autoincrement=True)
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
instance = relationship(Instance,
backref=backref('block_device_mapping'),
foreign_keys=instance_uuid,
primaryjoin='and_(BlockDeviceMapping.'
'instance_uuid=='
'Instance.uuid,'
'BlockDeviceMapping.deleted=='
'0)')
source_type = Column(String(255))
destination_type = Column(String(255))
guest_format = Column(String(255))
device_type = Column(String(255))
disk_bus = Column(String(255))
boot_index = Column(Integer)
device_name = Column(String(255))
# default=False for compatibility of the existing code.
# With EC2 API,
# default True for ami specified device.
# default False for created with other timing.
#TODO(sshturm) add default in db
delete_on_termination = Column(Boolean, default=False)
snapshot_id = Column(String(36))
volume_id = Column(String(36))
volume_size = Column(Integer)
image_id = Column(String(36))
# for no device to suppress devices.
no_device = Column(Boolean)
connection_info = Column(MediumText())
class IscsiTarget(BASE, NovaBase):
"""Represents an iscsi target for a given host."""
__tablename__ = 'iscsi_targets'
__table_args__ = (
Index('iscsi_targets_volume_id_fkey', 'volume_id'),
Index('iscsi_targets_host_idx', 'host'),
Index('iscsi_targets_host_volume_id_deleted_idx', 'host', 'volume_id',
'deleted')
)
id = Column(Integer, primary_key=True, nullable=False)
target_num = Column(Integer)
host = Column(String(255))
volume_id = Column(String(36), ForeignKey('volumes.id'))
volume = relationship(Volume,
backref=backref('iscsi_target', uselist=False),
foreign_keys=volume_id,
primaryjoin='and_(IscsiTarget.volume_id==Volume.id,'
'IscsiTarget.deleted==0)')
class SecurityGroupInstanceAssociation(BASE, NovaBase):
__tablename__ = 'security_group_instance_association'
__table_args__ = (
Index('security_group_instance_association_instance_uuid_idx',
'instance_uuid'),
)
id = Column(Integer, primary_key=True, nullable=False)
security_group_id = Column(Integer, ForeignKey('security_groups.id'))
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
class SecurityGroup(BASE, NovaBase):
"""Represents a security group."""
__tablename__ = 'security_groups'
__table_args__ = (
Index('uniq_security_groups0project_id0name0deleted', 'project_id',
'name', 'deleted'),
)
id = Column(Integer, primary_key=True)
name = Column(String(255))
description = Column(String(255))
user_id = Column(String(255))
project_id = Column(String(255))
instances = relationship(Instance,
secondary="security_group_instance_association",
primaryjoin='and_('
'SecurityGroup.id == '
'SecurityGroupInstanceAssociation.security_group_id,'
'SecurityGroupInstanceAssociation.deleted == 0,'
'SecurityGroup.deleted == 0)',
secondaryjoin='and_('
'SecurityGroupInstanceAssociation.instance_uuid == Instance.uuid,'
# (anthony) the condition below shouldn't be necessary now that the
# association is being marked as deleted. However, removing this
# may cause existing deployments to choke, so I'm leaving it
'Instance.deleted == 0)',
backref='security_groups')
class SecurityGroupIngressRule(BASE, NovaBase):
"""Represents a rule in a security group."""
__tablename__ = 'security_group_rules'
__table_args__ = ()
id = Column(Integer, primary_key=True)
parent_group_id = Column(Integer, ForeignKey('security_groups.id'))
parent_group = relationship("SecurityGroup", backref="rules",
foreign_keys=parent_group_id,
primaryjoin='and_('
'SecurityGroupIngressRule.parent_group_id == SecurityGroup.id,'
'SecurityGroupIngressRule.deleted == 0)')
protocol = Column(String(255))
from_port = Column(Integer)
to_port = Column(Integer)
cidr = Column(types.CIDR())
# Note: This is not the parent SecurityGroup. It's SecurityGroup we're
# granting access for.
group_id = Column(Integer, ForeignKey('security_groups.id'))
grantee_group = relationship("SecurityGroup",
foreign_keys=group_id,
primaryjoin='and_('
'SecurityGroupIngressRule.group_id == SecurityGroup.id,'
'SecurityGroupIngressRule.deleted == 0)')
class SecurityGroupIngressDefaultRule(BASE, NovaBase):
__tablename__ = 'security_group_default_rules'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False)
protocol = Column(String(5)) # "tcp", "udp" or "icmp"
from_port = Column(Integer)
to_port = Column(Integer)
cidr = Column(types.CIDR())
class ProviderFirewallRule(BASE, NovaBase):
"""Represents a rule in a security group."""
__tablename__ = 'provider_fw_rules'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False)
protocol = Column(String(5)) # "tcp", "udp", or "icmp"
from_port = Column(Integer)
to_port = Column(Integer)
cidr = Column(types.CIDR())
class KeyPair(BASE, NovaBase):
"""Represents a public key pair for ssh."""
__tablename__ = 'key_pairs'
__table_args__ = (
schema.UniqueConstraint("user_id", "name", "deleted",
name="uniq_key_pairs0user_id0name0deleted"),
)
id = Column(Integer, primary_key=True, nullable=False)
name = Column(String(255))
user_id = Column(String(255))
fingerprint = Column(String(255))
public_key = Column(MediumText())
class Migration(BASE, NovaBase):
"""Represents a running host-to-host migration."""
__tablename__ = 'migrations'
__table_args__ = (
Index('migrations_instance_uuid_and_status_idx', 'instance_uuid',
'status'),
Index('migrations_by_host_nodes_and_status_idx', 'deleted',
'source_compute', 'dest_compute', 'source_node', 'dest_node',
'status'),
)
id = Column(Integer, primary_key=True, nullable=False)
# NOTE(tr3buchet): the ____compute variables are instance['host']
source_compute = Column(String(255))
dest_compute = Column(String(255))
# nodes are equivalent to a compute node's 'hypvervisor_hostname'
source_node = Column(String(255))
dest_node = Column(String(255))
# NOTE(tr3buchet): dest_host, btw, is an ip address
dest_host = Column(String(255))
old_instance_type_id = Column(Integer())
new_instance_type_id = Column(Integer())
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
#TODO(_cerberus_): enum
status = Column(String(255))
instance = relationship("Instance", foreign_keys=instance_uuid,
primaryjoin='and_(Migration.instance_uuid == '
'Instance.uuid, Instance.deleted == '
'0)')
class Network(BASE, NovaBase):
"""Represents a network."""
__tablename__ = 'networks'
__table_args__ = (
schema.UniqueConstraint("vlan", "deleted",
name="uniq_networks0vlan0deleted"),
Index('networks_bridge_deleted_idx', 'bridge', 'deleted'),
Index('networks_host_idx', 'host'),
Index('networks_project_id_deleted_idx', 'project_id', 'deleted'),
Index('networks_uuid_project_id_deleted_idx', 'uuid',
'project_id', 'deleted'),
Index('networks_vlan_deleted_idx', 'vlan', 'deleted'),
Index('networks_cidr_v6_idx', 'cidr_v6')
)
id = Column(Integer, primary_key=True, nullable=False)
label = Column(String(255))
injected = Column(Boolean, default=False)
cidr = Column(types.CIDR())
cidr_v6 = Column(types.CIDR())
multi_host = Column(Boolean, default=False)
gateway_v6 = Column(types.IPAddress())
netmask_v6 = Column(types.IPAddress())
netmask = Column(types.IPAddress())
bridge = Column(String(255))
bridge_interface = Column(String(255))
gateway = Column(types.IPAddress())
broadcast = Column(types.IPAddress())
dns1 = Column(types.IPAddress())
dns2 = Column(types.IPAddress())
vlan = Column(Integer)
vpn_public_address = Column(types.IPAddress())
vpn_public_port = Column(Integer)
vpn_private_address = Column(types.IPAddress())
dhcp_start = Column(types.IPAddress())
rxtx_base = Column(Integer)
project_id = Column(String(255))
priority = Column(Integer)
host = Column(String(255)) # , ForeignKey('hosts.id'))
uuid = Column(String(36))
class VirtualInterface(BASE, NovaBase):
"""Represents a virtual interface on an instance."""
__tablename__ = 'virtual_interfaces'
__table_args__ = (
schema.UniqueConstraint("address", "deleted",
name="uniq_virtual_interfaces0address0deleted"),
Index('network_id', 'network_id'),
Index('virtual_interfaces_instance_uuid_fkey', 'instance_uuid'),
)
id = Column(Integer, primary_key=True, nullable=False)
address = Column(String(255))
network_id = Column(Integer)
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
uuid = Column(String(36))
# TODO(vish): can these both come from the same baseclass?
class FixedIp(BASE, NovaBase):
"""Represents a fixed ip for an instance."""
__tablename__ = 'fixed_ips'
__table_args__ = (
schema.UniqueConstraint(
"address", "deleted", name="uniq_fixed_ips0address0deleted"),
Index('fixed_ips_virtual_interface_id_fkey', 'virtual_interface_id'),
Index('network_id', 'network_id'),
Index('address', 'address'),
Index('fixed_ips_instance_uuid_fkey', 'instance_uuid'),
Index('fixed_ips_host_idx', 'host'),
Index('fixed_ips_network_id_host_deleted_idx', 'network_id', 'host',
'deleted'),
Index('fixed_ips_address_reserved_network_id_deleted_idx',
'address', 'reserved', 'network_id', 'deleted'),
Index('fixed_ips_deleted_allocated_idx', 'address', 'deleted',
'allocated')
)
id = Column(Integer, primary_key=True)
address = Column(types.IPAddress())
network_id = Column(Integer)
virtual_interface_id = Column(Integer)
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
# associated means that a fixed_ip has its instance_id column set
# allocated means that a fixed_ip has its virtual_interface_id column set
#TODO(sshturm) add default in db
allocated = Column(Boolean, default=False)
# leased means dhcp bridge has leased the ip
#TODO(sshturm) add default in db
leased = Column(Boolean, default=False)
#TODO(sshturm) add default in db
reserved = Column(Boolean, default=False)
host = Column(String(255))
network = relationship(Network,
backref=backref('fixed_ips'),
foreign_keys=network_id,
primaryjoin='and_('
'FixedIp.network_id == Network.id,'
'FixedIp.deleted == 0,'
'Network.deleted == 0)')
instance = relationship(Instance,
foreign_keys=instance_uuid,
primaryjoin='and_('
'FixedIp.instance_uuid == Instance.uuid,'
'FixedIp.deleted == 0,'
'Instance.deleted == 0)')
class FloatingIp(BASE, NovaBase):
"""Represents a floating ip that dynamically forwards to a fixed ip."""
__tablename__ = 'floating_ips'
__table_args__ = (
schema.UniqueConstraint("address", "deleted",
name="uniq_floating_ips0address0deleted"),
Index('fixed_ip_id', 'fixed_ip_id'),
Index('floating_ips_host_idx', 'host'),
Index('floating_ips_project_id_idx', 'project_id'),
Index('floating_ips_pool_deleted_fixed_ip_id_project_id_idx',
'pool', 'deleted', 'fixed_ip_id', 'project_id')
)
id = Column(Integer, primary_key=True)
address = Column(types.IPAddress())
fixed_ip_id = Column(Integer)
project_id = Column(String(255))
host = Column(String(255)) # , ForeignKey('hosts.id'))
auto_assigned = Column(Boolean, default=False)
#TODO(sshturm) add default in db
pool = Column(String(255))
interface = Column(String(255))
fixed_ip = relationship(FixedIp,
backref=backref('floating_ips'),
foreign_keys=fixed_ip_id,
primaryjoin='and_('
'FloatingIp.fixed_ip_id == FixedIp.id,'
'FloatingIp.deleted == 0,'
'FixedIp.deleted == 0)')
class DNSDomain(BASE, NovaBase):
"""Represents a DNS domain with availability zone or project info."""
__tablename__ = 'dns_domains'
__table_args__ = (
Index('project_id', 'project_id'),
Index('dns_domains_domain_deleted_idx', 'domain', 'deleted'),
)
deleted = Column(Boolean, default=False)
domain = Column(String(255), primary_key=True)
scope = Column(String(255))
availability_zone = Column(String(255))
project_id = Column(String(255))
class ConsolePool(BASE, NovaBase):
"""Represents pool of consoles on the same physical node."""
__tablename__ = 'console_pools'
__table_args__ = (
schema.UniqueConstraint(
"host", "console_type", "compute_host", "deleted",
name="uniq_console_pools0host0console_type0compute_host0deleted"),
)
id = Column(Integer, primary_key=True)
address = Column(types.IPAddress())
username = Column(String(255))
password = Column(String(255))
console_type = Column(String(255))
public_hostname = Column(String(255))
host = Column(String(255))
compute_host = Column(String(255))
class Console(BASE, NovaBase):
"""Represents a console session for an instance."""
__tablename__ = 'consoles'
__table_args__ = (
Index('consoles_instance_uuid_idx', 'instance_uuid'),
)
id = Column(Integer, primary_key=True)
instance_name = Column(String(255))
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
password = Column(String(255))
port = Column(Integer)
pool_id = Column(Integer, ForeignKey('console_pools.id'))
pool = relationship(ConsolePool, backref=backref('consoles'))
class InstanceMetadata(BASE, NovaBase):
"""Represents a user-provided metadata key/value pair for an instance."""
__tablename__ = 'instance_metadata'
__table_args__ = (
Index('instance_metadata_instance_uuid_idx', 'instance_uuid'),
)
id = Column(Integer, primary_key=True)
key = Column(String(255))
value = Column(String(255))
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
instance = relationship(Instance, backref="metadata",
foreign_keys=instance_uuid,
primaryjoin='and_('
'InstanceMetadata.instance_uuid == '
'Instance.uuid,'
'InstanceMetadata.deleted == 0)')
class InstanceSystemMetadata(BASE, NovaBase):
"""Represents a system-owned metadata key/value pair for an instance."""
__tablename__ = 'instance_system_metadata'
__table_args__ = ()
id = Column(Integer, primary_key=True)
key = Column(String(255), nullable=False)
value = Column(String(255))
instance_uuid = Column(String(36),
ForeignKey('instances.uuid'),
nullable=False)
primary_join = ('and_(InstanceSystemMetadata.instance_uuid == '
'Instance.uuid, InstanceSystemMetadata.deleted == 0)')
instance = relationship(Instance, backref="system_metadata",
foreign_keys=instance_uuid,
primaryjoin=primary_join)
class InstanceTypeProjects(BASE, NovaBase):
"""Represent projects associated instance_types."""
__tablename__ = "instance_type_projects"
__table_args__ = (schema.UniqueConstraint(
"instance_type_id", "project_id", "deleted",
name="uniq_instance_type_projects0instance_type_id0project_id0deleted"
),
)
id = Column(Integer, primary_key=True)
instance_type_id = Column(Integer, ForeignKey('instance_types.id'),
nullable=False)
project_id = Column(String(255))
instance_type = relationship(InstanceTypes, backref="projects",
foreign_keys=instance_type_id,
primaryjoin='and_('
'InstanceTypeProjects.instance_type_id == InstanceTypes.id,'
'InstanceTypeProjects.deleted == 0)')
class InstanceTypeExtraSpecs(BASE, NovaBase):
"""Represents additional specs as key/value pairs for an instance_type."""
__tablename__ = 'instance_type_extra_specs'
__table_args__ = (
Index('instance_type_extra_specs_instance_type_id_key_idx',
'instance_type_id', 'key'),
schema.UniqueConstraint(
"instance_type_id", "key", "deleted",
name=("uniq_instance_type_extra_specs0"
"instance_type_id0key0deleted")
),
)
id = Column(Integer, primary_key=True)
key = Column(String(255))
value = Column(String(255))
instance_type_id = Column(Integer, ForeignKey('instance_types.id'),
nullable=False)
instance_type = relationship(InstanceTypes, backref="extra_specs",
foreign_keys=instance_type_id,
primaryjoin='and_('
'InstanceTypeExtraSpecs.instance_type_id == InstanceTypes.id,'
'InstanceTypeExtraSpecs.deleted == 0)')
class Cell(BASE, NovaBase):
"""Represents parent and child cells of this cell. Cells can
have multiple parents and children, so there could be any number
of entries with is_parent=True or False
"""
__tablename__ = 'cells'
__table_args__ = (schema.UniqueConstraint(
"name", "deleted", name="uniq_cells0name0deleted"
),
)
id = Column(Integer, primary_key=True)
# Name here is the 'short name' of a cell. For instance: 'child1'
name = Column(String(255))
api_url = Column(String(255))
transport_url = Column(String(255), nullable=False)
weight_offset = Column(Float(), default=0.0)
weight_scale = Column(Float(), default=1.0)
is_parent = Column(Boolean())
class AggregateHost(BASE, NovaBase):
"""Represents a host that is member of an aggregate."""
__tablename__ = 'aggregate_hosts'
__table_args__ = (schema.UniqueConstraint(
"host", "aggregate_id", "deleted",
name="uniq_aggregate_hosts0host0aggregate_id0deleted"
),
)
id = Column(Integer, primary_key=True, autoincrement=True)
host = Column(String(255))
aggregate_id = Column(Integer, ForeignKey('aggregates.id'), nullable=False)
class AggregateMetadata(BASE, NovaBase):
"""Represents a metadata key/value pair for an aggregate."""
__tablename__ = 'aggregate_metadata'
__table_args__ = (
schema.UniqueConstraint("aggregate_id", "key", "deleted",
name="uniq_aggregate_metadata0aggregate_id0key0deleted"
),
Index('aggregate_metadata_key_idx', 'key'),
)
id = Column(Integer, primary_key=True)
key = Column(String(255), nullable=False)
value = Column(String(255), nullable=False)
aggregate_id = Column(Integer, ForeignKey('aggregates.id'), nullable=False)
class Aggregate(BASE, NovaBase):
"""Represents a cluster of hosts that exists in this zone."""
__tablename__ = 'aggregates'
__table_args__ = ()
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(255))
_hosts = relationship(AggregateHost,
primaryjoin='and_('
'Aggregate.id == AggregateHost.aggregate_id,'
'AggregateHost.deleted == 0,'
'Aggregate.deleted == 0)')
_metadata = relationship(AggregateMetadata,
primaryjoin='and_('
'Aggregate.id == AggregateMetadata.aggregate_id,'
'AggregateMetadata.deleted == 0,'
'Aggregate.deleted == 0)')
def _extra_keys(self):
return ['hosts', 'metadetails', 'availability_zone']
@property
def hosts(self):
return [h.host for h in self._hosts]
@property
def metadetails(self):
return dict([(m.key, m.value) for m in self._metadata])
@property
def availability_zone(self):
if 'availability_zone' not in self.metadetails:
return None
return self.metadetails['availability_zone']
class AgentBuild(BASE, NovaBase):
"""Represents an agent build."""
__tablename__ = 'agent_builds'
__table_args__ = (
Index('agent_builds_hypervisor_os_arch_idx', 'hypervisor', 'os',
'architecture'),
schema.UniqueConstraint("hypervisor", "os", "architecture", "deleted",
name="uniq_agent_builds0hypervisor0os0architecture0deleted"),
)
id = Column(Integer, primary_key=True)
hypervisor = Column(String(255))
os = Column(String(255))
architecture = Column(String(255))
version = Column(String(255))
url = Column(String(255))
md5hash = Column(String(255))
class BandwidthUsage(BASE, NovaBase):
"""Cache for instance bandwidth usage data pulled from the hypervisor."""
__tablename__ = 'bw_usage_cache'
__table_args__ = (
Index('bw_usage_cache_uuid_start_period_idx', 'uuid',
'start_period'),
)
id = Column(Integer, primary_key=True, nullable=False)
uuid = Column(String(36))
mac = Column(String(255))
start_period = Column(DateTime, nullable=False)
last_refreshed = Column(DateTime)
bw_in = Column(BigInteger)
bw_out = Column(BigInteger)
last_ctr_in = Column(BigInteger)
last_ctr_out = Column(BigInteger)
class VolumeUsage(BASE, NovaBase):
"""Cache for volume usage data pulled from the hypervisor."""
__tablename__ = 'volume_usage_cache'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False)
volume_id = Column(String(36), nullable=False)
instance_uuid = Column(String(36))
project_id = Column(String(36))
user_id = Column(String(36))
availability_zone = Column(String(255))
tot_last_refreshed = Column(DateTime)
tot_reads = Column(BigInteger, default=0)
tot_read_bytes = Column(BigInteger, default=0)
tot_writes = Column(BigInteger, default=0)
tot_write_bytes = Column(BigInteger, default=0)
curr_last_refreshed = Column(DateTime)
curr_reads = Column(BigInteger, default=0)
curr_read_bytes = Column(BigInteger, default=0)
curr_writes = Column(BigInteger, default=0)
curr_write_bytes = Column(BigInteger, default=0)
class S3Image(BASE, NovaBase):
"""Compatibility layer for the S3 image service talking to Glance."""
__tablename__ = 's3_images'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
uuid = Column(String(36), nullable=False)
class VolumeIdMapping(BASE, NovaBase):
"""Compatibility layer for the EC2 volume service."""
__tablename__ = 'volume_id_mappings'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
uuid = Column(String(36), nullable=False)
class SnapshotIdMapping(BASE, NovaBase):
"""Compatibility layer for the EC2 snapshot service."""
__tablename__ = 'snapshot_id_mappings'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
uuid = Column(String(36), nullable=False)
class InstanceFault(BASE, NovaBase):
__tablename__ = 'instance_faults'
__table_args__ = (
Index('instance_faults_host_idx', 'host'),
Index('instance_faults_instance_uuid_deleted_created_at_idx',
'instance_uuid', 'deleted', 'created_at')
)
id = Column(Integer, primary_key=True, nullable=False)
instance_uuid = Column(String(36),
ForeignKey('instances.uuid'))
code = Column(Integer(), nullable=False)
message = Column(String(255))
details = Column(MediumText())
host = Column(String(255))
class InstanceAction(BASE, NovaBase):
"""Track client actions on an instance.
The intention is that there will only be one of these per user request. A
lookup by (instance_uuid, request_id) should always return a single result.
"""
__tablename__ = 'instance_actions'
__table_args__ = (
Index('instance_uuid_idx', 'instance_uuid'),
Index('request_id_idx', 'request_id')
)
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
action = Column(String(255))
instance_uuid = Column(String(36),
ForeignKey('instances.uuid'))
request_id = Column(String(255))
user_id = Column(String(255))
project_id = Column(String(255))
start_time = Column(DateTime, default=timeutils.utcnow)
finish_time = Column(DateTime)
message = Column(String(255))
class InstanceActionEvent(BASE, NovaBase):
"""Track events that occur during an InstanceAction."""
__tablename__ = 'instance_actions_events'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
event = Column(String(255))
action_id = Column(Integer, ForeignKey('instance_actions.id'))
start_time = Column(DateTime, default=timeutils.utcnow)
finish_time = Column(DateTime)
result = Column(String(255))
traceback = Column(Text)
class InstanceIdMapping(BASE, NovaBase):
"""Compatibility layer for the EC2 instance service."""
__tablename__ = 'instance_id_mappings'
__table_args__ = (
Index('ix_instance_id_mappings_uuid', 'uuid'),
)
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
uuid = Column(String(36), nullable=False)
class TaskLog(BASE, NovaBase):
"""Audit log for background periodic tasks."""
__tablename__ = 'task_log'
__table_args__ = (
schema.UniqueConstraint(
'task_name', 'host', 'period_beginning', 'period_ending',
name="uniq_task_log0task_name0host0period_beginning0period_ending"
),
Index('ix_task_log_period_beginning', 'period_beginning'),
Index('ix_task_log_host', 'host'),
Index('ix_task_log_period_ending', 'period_ending'),
)
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
task_name = Column(String(255), nullable=False)
state = Column(String(255), nullable=False)
host = Column(String(255), nullable=False)
period_beginning = Column(DateTime, default=timeutils.utcnow,
nullable=False)
period_ending = Column(DateTime, default=timeutils.utcnow,
nullable=False)
message = Column(String(255), nullable=False)
task_items = Column(Integer(), default=0)
errors = Column(Integer(), default=0)
class InstanceGroupMember(BASE, NovaBase):
"""Represents the members for an instance group."""
__tablename__ = 'instance_group_member'
__table_args__ = (
Index('instance_group_member_instance_idx', 'instance_id'),
)
id = Column(Integer, primary_key=True, nullable=False)
instance_id = Column(String(255))
group_id = Column(Integer, ForeignKey('instance_groups.id'),
nullable=False)
class InstanceGroupPolicy(BASE, NovaBase):
"""Represents the policy type for an instance group."""
__tablename__ = 'instance_group_policy'
__table_args__ = (
Index('instance_group_policy_policy_idx', 'policy'),
)
id = Column(Integer, primary_key=True, nullable=False)
policy = Column(String(255))
group_id = Column(Integer, ForeignKey('instance_groups.id'),
nullable=False)
class InstanceGroupMetadata(BASE, NovaBase):
"""Represents a key/value pair for an instance group."""
__tablename__ = 'instance_group_metadata'
__table_args__ = (
Index('instance_group_metadata_key_idx', 'key'),
)
id = Column(Integer, primary_key=True, nullable=False)
key = Column(String(255))
value = Column(String(255))
group_id = Column(Integer, ForeignKey('instance_groups.id'),
nullable=False)
class InstanceGroup(BASE, NovaBase):
"""Represents an instance group.
A group will maintain a collection of instances and the relationship
between them.
"""
__tablename__ = 'instance_groups'
__table_args__ = (
schema.UniqueConstraint("uuid", "deleted",
name="uniq_instance_groups0uuid0deleted"),
)
id = Column(Integer, primary_key=True, autoincrement=True)
user_id = Column(String(255))
project_id = Column(String(255))
uuid = Column(String(36), nullable=False)
name = Column(String(255))
_policies = relationship(InstanceGroupPolicy, primaryjoin='and_('
'InstanceGroup.id == InstanceGroupPolicy.group_id,'
'InstanceGroupPolicy.deleted == 0,'
'InstanceGroup.deleted == 0)')
_metadata = relationship(InstanceGroupMetadata, primaryjoin='and_('
'InstanceGroup.id == InstanceGroupMetadata.group_id,'
'InstanceGroupMetadata.deleted == 0,'
'InstanceGroup.deleted == 0)')
_members = relationship(InstanceGroupMember, primaryjoin='and_('
'InstanceGroup.id == InstanceGroupMember.group_id,'
'InstanceGroupMember.deleted == 0,'
'InstanceGroup.deleted == 0)')
@property
def policies(self):
return [p.policy for p in self._policies]
@property
def metadetails(self):
return dict((m.key, m.value) for m in self._metadata)
@property
def members(self):
return [m.instance_id for m in self._members]
class PciDevice(BASE, NovaBase):
"""
Represents a PCI host device that can be passed through to instances.
"""
__tablename__ = 'pci_devices'
__table_args__ = (
Index('ix_pci_devices_compute_node_id_deleted',
'compute_node_id', 'deleted'),
Index('ix_pci_devices_instance_uuid_deleted',
'instance_uuid', 'deleted'),
schema.UniqueConstraint(
"compute_node_id", "address", "deleted",
name="uniq_pci_devices0compute_node_id0address0deleted")
)
id = Column(Integer, primary_key=True)
compute_node_id = Column(Integer, ForeignKey('compute_nodes.id'),
nullable=False)
# physical address of device domain:bus:slot.func (0000:09:01.1)
address = Column(String(12), nullable=False)
vendor_id = Column(String(4), nullable=False)
product_id = Column(String(4), nullable=False)
dev_type = Column(String(8), nullable=False)
dev_id = Column(String(255))
# label is abstract device name, that is used to unify devices with the
# same functionality with different addresses or host.
label = Column(String(255), nullable=False)
status = Column(String(36), nullable=False)
extra_info = Column(Text)
instance_uuid = Column(String(36))
instance = relationship(Instance, backref="pci_devices",
foreign_keys=instance_uuid,
primaryjoin='and_('
'PciDevice.instance_uuid == Instance.uuid,'
'PciDevice.deleted == 0)')
| |
from __future__ import print_function, absolute_import
import os
import subprocess
import sys
import tempfile
from .common import safe_mkdtemp, safe_rmtree
from .interpreter import PythonInterpreter, PythonCapability
from .tracer import TRACER
from pkg_resources import Distribution, PathMetadata
__all__ = (
'Installer',
'Packager'
)
def after_installation(function):
def function_wrapper(self, *args, **kw):
self._installed = self.run()
if not self._installed:
raise Installer.InstallFailure('Failed to install %s' % self._source_dir)
return function(self, *args, **kw)
return function_wrapper
class InstallerBase(object):
SETUP_BOOTSTRAP_HEADER = "import sys"
SETUP_BOOTSTRAP_MODULE = "sys.path.insert(0, %(path)r); import %(module)s"
SETUP_BOOTSTRAP_FOOTER = """
__file__ = 'setup.py'
exec(compile(open(__file__).read().replace('\\r\\n', '\\n'), __file__, 'exec'))
"""
class Error(Exception): pass
class InstallFailure(Error): pass
class IncapableInterpreter(Error): pass
def __init__(self, source_dir, strict=True, interpreter=None, install_dir=None):
"""
Create an installer from an unpacked source distribution in source_dir.
If strict=True, fail if any installation dependencies (e.g. distribute)
are missing.
"""
self._source_dir = source_dir
self._install_tmp = install_dir or safe_mkdtemp()
self._installed = None
self._strict = strict
self._interpreter = interpreter or PythonInterpreter.get()
if not self._interpreter.satisfies(self.capability) and strict:
raise self.IncapableInterpreter('Interpreter %s not capable of running %s' % (
self._interpreter, self.__class__.__name__))
def mixins(self):
"""Return a map from import name to requirement to load into setup script prior to invocation.
May be subclassed.
"""
return {}
@property
def install_tmp(self):
return self._install_tmp
def _setup_command(self):
"""the setup command-line to run, to be implemented by subclasses."""
raise NotImplementedError
def _postprocess(self):
"""a post-processing function to run following setup.py invocation."""
@property
def capability(self):
"""returns the PythonCapability necessary for the interpreter to run this installer."""
return PythonCapability(self.mixins().values())
@property
def bootstrap_script(self):
bootstrap_modules = []
for module, requirement in self.mixins().items():
path = self._interpreter.get_location(requirement)
if not path:
assert not self._strict # This should be caught by validation
continue
bootstrap_modules.append(self.SETUP_BOOTSTRAP_MODULE % {'path': path, 'module': module})
return '\n'.join(
[self.SETUP_BOOTSTRAP_HEADER] + bootstrap_modules + [self.SETUP_BOOTSTRAP_FOOTER])
def run(self):
if self._installed is not None:
return self._installed
with TRACER.timed('Installing %s' % self._install_tmp, V=2):
command = [self._interpreter.binary, '-']
command.extend(self._setup_command())
po = subprocess.Popen(command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=self._interpreter.sanitized_environment(),
cwd=self._source_dir)
so, se = po.communicate(self.bootstrap_script.encode('ascii'))
self._installed = po.returncode == 0
if not self._installed:
name = os.path.basename(self._source_dir)
print('**** Failed to install %s. stdout:\n%s' % (name, so.decode('utf-8')), file=sys.stderr)
print('**** Failed to install %s. stderr:\n%s' % (name, se.decode('utf-8')), file=sys.stderr)
return self._installed
self._postprocess()
return self._installed
def cleanup(self):
safe_rmtree(self._install_tmp)
class Installer(InstallerBase):
"""
Install an unpacked distribution with a setup.py.
Simple example:
>>> from twitter.common.python.package import SourcePackage
>>> from twitter.common.python.http import Web
>>> tornado_tgz = SourcePackage(
... 'http://pypi.python.org/packages/source/t/tornado/tornado-2.3.tar.gz',
... opener=Web())
>>> tornado_installer = Installer(tornado_tgz.fetch())
>>> tornado_installer.distribution()
tornado 2.3 (/private/var/folders/Uh/UhXpeRIeFfGF7HoogOKC+++++TI/-Tmp-/tmpLLe_Ph/lib/python2.6/site-packages)
You can then take that distribution and activate it:
>>> tornado_distribution = tornado_installer.distribution()
>>> tornado_distribution.activate()
>>> import tornado
Alternately you can use the EggInstaller to create an egg instead:
>>> from twitter.common.python.installer import EggInstaller
>>> EggInstaller(tornado_tgz.fetch()).bdist()
'/var/folders/Uh/UhXpeRIeFfGF7HoogOKC+++++TI/-Tmp-/tmpufgZOO/tornado-2.3-py2.6.egg'
"""
def __init__(self, source_dir, strict=True, interpreter=None):
"""
Create an installer from an unpacked source distribution in source_dir.
If strict=True, fail if any installation dependencies (e.g. setuptools)
are missing.
"""
super(Installer, self).__init__(source_dir, strict=strict, interpreter=interpreter)
self._egg_info = None
fd, self._install_record = tempfile.mkstemp()
os.close(fd)
def _setup_command(self):
return ['install',
'--root=%s' % self._install_tmp,
'--prefix=',
'--single-version-externally-managed',
'--record', self._install_record]
def _postprocess(self):
installed_files = []
egg_info = None
with open(self._install_record) as fp:
installed_files = fp.read().splitlines()
for line in installed_files:
if line.endswith('.egg-info'):
assert line.startswith('/'), 'Expect .egg-info to be within install_tmp!'
egg_info = line
break
if not egg_info:
self._installed = False
return self._installed
installed_files = [os.path.relpath(fn, egg_info) for fn in installed_files if fn != egg_info]
self._egg_info = os.path.join(self._install_tmp, egg_info[1:])
with open(os.path.join(self._egg_info, 'installed-files.txt'), 'w') as fp:
fp.write('\n'.join(installed_files))
fp.write('\n')
return self._installed
@after_installation
def egg_info(self):
return self._egg_info
@after_installation
def root(self):
egg_info = self.egg_info()
assert egg_info
return os.path.realpath(os.path.dirname(egg_info))
@after_installation
def distribution(self):
base_dir = self.root()
egg_info = self.egg_info()
metadata = PathMetadata(base_dir, egg_info)
return Distribution.from_location(base_dir, os.path.basename(egg_info), metadata=metadata)
class DistributionPackager(InstallerBase):
def mixins(self):
mixins = super(DistributionPackager, self).mixins().copy()
mixins.update(setuptools='setuptools>=1')
return mixins
def find_distribution(self):
dists = os.listdir(self.install_tmp)
if len(dists) == 0:
raise self.InstallFailure('No distributions were produced!')
elif len(dists) > 1:
raise self.InstallFailure('Ambiguous source distributions found: %s' % (' '.join(dists)))
else:
return os.path.join(self.install_tmp, dists[0])
class Packager(DistributionPackager):
"""
Create a source distribution from an unpacked setup.py-based project.
"""
def _setup_command(self):
return ['sdist', '--formats=gztar', '--dist-dir=%s' % self._install_tmp]
@after_installation
def sdist(self):
return self.find_distribution()
class EggInstaller(DistributionPackager):
"""
Create a source distribution from an unpacked setup.py-based project.
"""
def _setup_command(self):
return ['bdist_egg', '--dist-dir=%s' % self._install_tmp]
@after_installation
def bdist(self):
return self.find_distribution()
class WheelInstaller(DistributionPackager):
"""
Create a source distribution from an unpacked setup.py-based project.
"""
MIXINS = {
'setuptools': 'setuptools>=2',
'wheel': 'wheel>=0.17',
}
def mixins(self):
mixins = super(WheelInstaller, self).mixins().copy()
mixins.update(self.MIXINS)
return mixins
def _setup_command(self):
return ['bdist_wheel', '--dist-dir=%s' % self._install_tmp]
@after_installation
def bdist(self):
return self.find_distribution()
| |
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_context import context as oslo_context
from oslo_log import log
from oslo_log import versionutils
from oslo_middleware import sizelimit
from oslo_serialization import jsonutils
from keystone.common import authorization
from keystone.common import tokenless_auth
from keystone.common import wsgi
from keystone.contrib.federation import constants as federation_constants
from keystone.contrib.federation import utils
from keystone import exception
from keystone.i18n import _, _LI, _LW
from keystone.models import token_model
from keystone.token.providers import common
CONF = cfg.CONF
LOG = log.getLogger(__name__)
# Header used to transmit the auth token
AUTH_TOKEN_HEADER = 'X-Auth-Token'
# Header used to transmit the subject token
SUBJECT_TOKEN_HEADER = 'X-Subject-Token'
# Environment variable used to pass the request context
CONTEXT_ENV = wsgi.CONTEXT_ENV
# Environment variable used to pass the request params
PARAMS_ENV = wsgi.PARAMS_ENV
class TokenAuthMiddleware(wsgi.Middleware):
def process_request(self, request):
token = request.headers.get(AUTH_TOKEN_HEADER)
context = request.environ.get(CONTEXT_ENV, {})
context['token_id'] = token
if SUBJECT_TOKEN_HEADER in request.headers:
context['subject_token_id'] = request.headers[SUBJECT_TOKEN_HEADER]
request.environ[CONTEXT_ENV] = context
class AdminTokenAuthMiddleware(wsgi.Middleware):
"""A trivial filter that checks for a pre-defined admin token.
Sets 'is_admin' to true in the context, expected to be checked by
methods that are admin-only.
"""
def process_request(self, request):
token = request.headers.get(AUTH_TOKEN_HEADER)
context = request.environ.get(CONTEXT_ENV, {})
context['is_admin'] = (token == CONF.admin_token)
request.environ[CONTEXT_ENV] = context
class PostParamsMiddleware(wsgi.Middleware):
"""Middleware to allow method arguments to be passed as POST parameters.
Filters out the parameters `self`, `context` and anything beginning with
an underscore.
"""
def process_request(self, request):
params_parsed = request.params
params = {}
for k, v in params_parsed.items():
if k in ('self', 'context'):
continue
if k.startswith('_'):
continue
params[k] = v
request.environ[PARAMS_ENV] = params
class JsonBodyMiddleware(wsgi.Middleware):
"""Middleware to allow method arguments to be passed as serialized JSON.
Accepting arguments as JSON is useful for accepting data that may be more
complex than simple primitives.
Filters out the parameters `self`, `context` and anything beginning with
an underscore.
"""
def process_request(self, request):
# Abort early if we don't have any work to do
params_json = request.body
if not params_json:
return
# Reject unrecognized content types. Empty string indicates
# the client did not explicitly set the header
if request.content_type not in ('application/json', ''):
e = exception.ValidationError(attribute='application/json',
target='Content-Type header')
return wsgi.render_exception(e, request=request)
params_parsed = {}
try:
params_parsed = jsonutils.loads(params_json)
except ValueError:
e = exception.ValidationError(attribute='valid JSON',
target='request body')
return wsgi.render_exception(e, request=request)
finally:
if not params_parsed:
params_parsed = {}
if not isinstance(params_parsed, dict):
e = exception.ValidationError(attribute='valid JSON object',
target='request body')
return wsgi.render_exception(e, request=request)
params = {}
for k, v in params_parsed.items():
if k in ('self', 'context'):
continue
if k.startswith('_'):
continue
params[k] = v
request.environ[PARAMS_ENV] = params
class NormalizingFilter(wsgi.Middleware):
"""Middleware filter to handle URL normalization."""
def process_request(self, request):
"""Normalizes URLs."""
# Removes a trailing slash from the given path, if any.
if (len(request.environ['PATH_INFO']) > 1 and
request.environ['PATH_INFO'][-1] == '/'):
request.environ['PATH_INFO'] = request.environ['PATH_INFO'][:-1]
# Rewrites path to root if no path is given.
elif not request.environ['PATH_INFO']:
request.environ['PATH_INFO'] = '/'
class RequestBodySizeLimiter(sizelimit.RequestBodySizeLimiter):
@versionutils.deprecated(
versionutils.deprecated.KILO,
in_favor_of='oslo_middleware.sizelimit.RequestBodySizeLimiter',
remove_in=+1,
what='keystone.middleware.RequestBodySizeLimiter')
def __init__(self, *args, **kwargs):
super(RequestBodySizeLimiter, self).__init__(*args, **kwargs)
class AuthContextMiddleware(wsgi.Middleware):
"""Build the authentication context from the request auth token."""
def _build_auth_context(self, request):
token_id = request.headers.get(AUTH_TOKEN_HEADER).strip()
if token_id == CONF.admin_token:
# NOTE(gyee): no need to proceed any further as the special admin
# token is being handled by AdminTokenAuthMiddleware. This code
# will not be impacted even if AdminTokenAuthMiddleware is removed
# from the pipeline as "is_admin" is default to "False". This code
# is independent of AdminTokenAuthMiddleware.
return {}
context = {'token_id': token_id}
context['environment'] = request.environ
try:
token_ref = token_model.KeystoneToken(
token_id=token_id,
token_data=self.token_provider_api.validate_token(token_id))
# TODO(gyee): validate_token_bind should really be its own
# middleware
wsgi.validate_token_bind(context, token_ref)
return authorization.token_to_auth_context(token_ref)
except exception.TokenNotFound:
LOG.warning(_LW('RBAC: Invalid token'))
raise exception.Unauthorized()
def _build_tokenless_auth_context(self, env):
"""Build the authentication context.
The context is built from the attributes provided in the env,
such as certificate and scope attributes.
"""
tokenless_helper = tokenless_auth.TokenlessAuthHelper(env)
(domain_id, project_id, trust_ref, unscoped) = (
tokenless_helper.get_scope())
user_ref = tokenless_helper.get_mapped_user(
project_id,
domain_id)
# NOTE(gyee): if it is an ephemeral user, the
# given X.509 SSL client cert does not need to map to
# an existing user.
if user_ref['type'] == utils.UserType.EPHEMERAL:
auth_context = {}
auth_context['group_ids'] = user_ref['group_ids']
auth_context[federation_constants.IDENTITY_PROVIDER] = (
user_ref[federation_constants.IDENTITY_PROVIDER])
auth_context[federation_constants.PROTOCOL] = (
user_ref[federation_constants.PROTOCOL])
if domain_id and project_id:
msg = _('Scoping to both domain and project is not allowed')
raise ValueError(msg)
if domain_id:
auth_context['domain_id'] = domain_id
if project_id:
auth_context['project_id'] = project_id
auth_context['roles'] = user_ref['roles']
else:
# it's the local user, so token data is needed.
token_helper = common.V3TokenDataHelper()
token_data = token_helper.get_token_data(
user_id=user_ref['id'],
method_names=[CONF.tokenless_auth.protocol],
domain_id=domain_id,
project_id=project_id)
auth_context = {'user_id': user_ref['id']}
auth_context['is_delegated_auth'] = False
if domain_id:
auth_context['domain_id'] = domain_id
if project_id:
auth_context['project_id'] = project_id
auth_context['roles'] = [role['name'] for role
in token_data['token']['roles']]
return auth_context
def _validate_trusted_issuer(self, env):
"""To further filter the certificates that are trusted.
If the config option 'trusted_issuer' is absent or does
not contain the trusted issuer DN, no certificates
will be allowed in tokenless authorization.
:param env: The env contains the client issuer's attributes
:type env: dict
:returns: True if client_issuer is trusted; otherwise False
"""
client_issuer = env.get(CONF.tokenless_auth.issuer_attribute)
if not client_issuer:
msg = _LI('Cannot find client issuer in env by the '
'issuer attribute - %s.')
LOG.info(msg, CONF.tokenless_auth.issuer_attribute)
return False
if client_issuer in CONF.tokenless_auth.trusted_issuer:
return True
msg = _LI('The client issuer %(client_issuer)s does not match with '
'the trusted issuer %(trusted_issuer)s')
LOG.info(
msg, {'client_issuer': client_issuer,
'trusted_issuer': CONF.tokenless_auth.trusted_issuer})
return False
def process_request(self, request):
# The request context stores itself in thread-local memory for logging.
oslo_context.RequestContext(
request_id=request.environ.get('openstack.request_id'))
if authorization.AUTH_CONTEXT_ENV in request.environ:
msg = _LW('Auth context already exists in the request '
'environment; it will be used for authorization '
'instead of creating a new one.')
LOG.warning(msg)
return
# NOTE(gyee): token takes precedence over SSL client certificates.
# This will preserve backward compatibility with the existing
# behavior. Tokenless authorization with X.509 SSL client
# certificate is effectively disabled if no trusted issuers are
# provided.
if AUTH_TOKEN_HEADER in request.headers:
auth_context = self._build_auth_context(request)
elif self._validate_trusted_issuer(request.environ):
auth_context = self._build_tokenless_auth_context(
request.environ)
else:
LOG.debug('There is either no auth token in the request or '
'the certificate issuer is not trusted. No auth '
'context will be set.')
return
LOG.debug('RBAC: auth_context: %s', auth_context)
request.environ[authorization.AUTH_CONTEXT_ENV] = auth_context
| |
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright 2002 Ben Escoto <ben@emerose.org>
# Copyright 2007 Kenneth Loafman <kenneth@loafman.com>
# Copyright 2008 Michael Terry <mike@mterry.name>
# Copyright 2011 Canonical Ltd
#
# This file is part of duplicity.
#
# Duplicity is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# Duplicity is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with duplicity; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Log various messages depending on verbosity level"""
import os
import sys
import logging
import datetime
MIN = 0
ERROR = 0
WARNING = 2
NOTICE = 3
INFO = 5
DEBUG = 9
MAX = 9
_logger = None
def DupToLoggerLevel(verb):
"""Convert duplicity level to the logging module's system, where higher is
more severe"""
return MAX - verb + 1
def LoggerToDupLevel(verb):
"""Convert logging module level to duplicity's system, where lowere is
more severe"""
return DupToLoggerLevel(verb)
def LevelName(level):
level = LoggerToDupLevel(level)
if level >= 9: return "DEBUG"
elif level >= 5: return "INFO"
elif level >= 3: return "NOTICE"
elif level >= 1: return "WARNING"
else: return "ERROR"
def Log(s, verb_level, code=1, extra=None, force_print=False):
"""Write s to stderr if verbosity level low enough"""
global _logger
# controlLine is a terrible hack until duplicity depends on Python 2.5
# and its logging 'extra' keyword that allows a custom record dictionary.
if extra:
_logger.controlLine = '%d %s' % (code, extra)
else:
_logger.controlLine = '%d' % (code)
if not s:
s = '' # If None is passed, standard logging would render it as 'None'
if force_print:
initial_level = _logger.getEffectiveLevel()
_logger.setLevel(DupToLoggerLevel(MAX))
# If all the backends kindly gave us unicode, we could enable this next
# assert line. As it is, we'll attempt to convert s to unicode if we
# are handed bytes. One day we should update the backends.
#assert isinstance(s, unicode)
if not isinstance(s, unicode):
s = s.decode("utf8", "replace")
_logger.log(DupToLoggerLevel(verb_level), s)
_logger.controlLine = None
if force_print:
_logger.setLevel(initial_level)
def Debug(s):
"""Shortcut used for debug message (verbosity 9)."""
Log(s, DEBUG)
class InfoCode:
"""Enumeration class to hold info code values.
These values should never change, as frontends rely upon them.
Don't use 0 or negative numbers."""
generic = 1
progress = 2
collection_status = 3
diff_file_new = 4
diff_file_changed = 5
diff_file_deleted = 6
patch_file_writing = 7
patch_file_patching = 8
#file_list = 9 # 9 isn't used anymore. It corresponds to an older syntax for listing files
file_list = 10
synchronous_upload_begin = 11
asynchronous_upload_begin = 12
synchronous_upload_done = 13
asynchronous_upload_done = 14
skipping_socket = 15
upload_progress = 16
def Info(s, code=InfoCode.generic, extra=None):
"""Shortcut used for info messages (verbosity 5)."""
Log(s, INFO, code, extra)
def Progress(s, current, total=None):
"""Shortcut used for progress messages (verbosity 5)."""
if total:
controlLine = '%d %d' % (current, total)
else:
controlLine = '%d' % current
Log(s, INFO, InfoCode.progress, controlLine)
def _ElapsedSecs2Str(secs):
tdelta = datetime.timedelta(seconds=secs)
hours,rem = divmod(tdelta.seconds, 3600)
minutes,seconds = divmod(rem, 60)
fmt = ""
if tdelta.days > 0:
fmt = "%dd," % (tdelta.days)
fmt = "%s%02d:%02d:%02d" % (fmt, hours, minutes, seconds)
return fmt
def _RemainingSecs2Str(secs):
tdelta = datetime.timedelta(seconds=secs)
hours,rem = divmod(tdelta.seconds, 3600)
minutes,seconds = divmod(rem, 60)
fmt = ""
if tdelta.days > 0:
fmt = "%dd" % (tdelta.days)
if hours > 0:
fmt = "%s %dh" % (fmt, hours)
if minutes > 0:
fmt = "%s %dmin" % (fmt, minutes)
elif hours > 0:
fmt = "%dh" % hours
if minutes > 0:
fmt = "%s %dmin" % (fmt, minutes)
elif minutes > 5:
fmt = "%dmin" % minutes
elif minutes > 0:
fmt = "%dmin" % minutes
if seconds >= 30:
fmt = "%s 30sec" % fmt
elif seconds > 45:
fmt = "< 1min"
elif seconds > 30:
fmt = "< 45sec"
elif seconds > 15:
fmt = "< 30sec"
else:
fmt = "%dsec" % seconds
return fmt
def TransferProgress(progress, eta, changed_bytes, elapsed, speed, stalled):
"""Shortcut used for upload progress messages (verbosity 5)."""
dots = int(0.4 * progress) # int(40.0 * progress / 100.0) -- for 40 chars
data_amount = float(changed_bytes) / 1024.0
data_scale = "KB"
if data_amount > 1000.0:
data_amount /= 1024.0
data_scale = "MB"
if data_amount > 1000.0:
data_amount /= 1024.0
data_scale = "GB"
if stalled:
eta_str = "Stalled!"
speed_amount = 0
speed_scale = "B"
else:
eta_str = _RemainingSecs2Str(eta)
speed_amount = float(speed) / 1024.0
speed_scale = "KB"
if speed_amount > 1000.0:
speed_amount /= 1024.0
speed_scale = "MB"
if speed_amount > 1000.0:
speed_amount /= 1024.0
speed_scale = "GB"
s = "%.1f%s %s [%.1f%s/s] [%s>%s] %d%% ETA %s" % (data_amount, data_scale,
_ElapsedSecs2Str(elapsed),
speed_amount, speed_scale,
'='*dots, ' '*(40-dots),
progress,
eta_str
)
controlLine = "%d %d %d %d %d %d" % (changed_bytes, elapsed, progress, eta, speed, stalled)
Log(s, NOTICE, InfoCode.upload_progress, controlLine)
def PrintCollectionStatus(col_stats, force_print=False):
"""Prints a collection status to the log"""
Log(unicode(col_stats), 8, InfoCode.collection_status,
'\n' + '\n'.join(col_stats.to_log_info()), force_print)
def Notice(s):
"""Shortcut used for notice messages (verbosity 3, the default)."""
Log(s, NOTICE)
class WarningCode:
"""Enumeration class to hold warning code values.
These values should never change, as frontends rely upon them.
Don't use 0 or negative numbers."""
generic = 1
orphaned_sig = 2
unnecessary_sig = 3
unmatched_sig = 4
incomplete_backup = 5
orphaned_backup = 6
ftp_ncftp_v320 = 7 # moved from error
cannot_iterate = 8
cannot_stat = 9
cannot_read = 10
no_sig_for_time = 11
cannot_process = 12
process_skipped = 13
def Warn(s, code=WarningCode.generic, extra=None):
"""Shortcut used for warning messages (verbosity 2)"""
Log(s, WARNING, code, extra)
class ErrorCode:
"""Enumeration class to hold error code values.
These values should never change, as frontends rely upon them.
Don't use 0 or negative numbers. This code is returned by duplicity
to indicate which error occurred via both exit code and log."""
generic = 1 # Don't use if possible, please create a new code and use it
command_line = 2
hostname_mismatch = 3
no_manifests = 4
mismatched_manifests = 5
unreadable_manifests = 6
cant_open_filelist = 7
bad_url = 8
bad_archive_dir = 9
bad_sign_key = 10
restore_dir_exists = 11
verify_dir_doesnt_exist = 12
backup_dir_doesnt_exist = 13
file_prefix_error = 14
globbing_error = 15
redundant_inclusion = 16
inc_without_sigs = 17
no_sigs = 18
restore_dir_not_found = 19
no_restore_files = 20
mismatched_hash = 21
unsigned_volume = 22
user_error = 23
boto_old_style = 24
boto_lib_too_old = 25
boto_calling_format = 26
ftp_ncftp_missing = 27
ftp_ncftp_too_old = 28
#ftp_ncftp_v320 = 29 # moved to warning
exception = 30
gpg_failed = 31
s3_bucket_not_style = 32
not_implemented = 33
get_freespace_failed = 34
not_enough_freespace = 35
get_ulimit_failed = 36
maxopen_too_low = 37
connection_failed = 38
restart_file_not_found = 39
gio_not_available = 40
source_dir_mismatch = 42 # 41 is reserved for par2
ftps_lftp_missing = 43
volume_wrong_size = 44
enryption_mismatch = 45
pythonoptimize_set = 46
dpbx_nologin = 47
# 50->69 reserved for backend errors
backend_error = 50
backend_permission_denied = 51
backend_not_found = 52
backend_no_space = 53
backend_command_error = 54
backend_code_error = 55
# Reserve 126 because it is used as an error code for pkexec
# Reserve 127 because it is used as an error code for pkexec
# Reserve 255 because it is used as an error code for gksu
def Error(s, code=ErrorCode.generic, extra=None):
"""Write error message"""
Log(s, ERROR, code, extra)
def FatalError(s, code=ErrorCode.generic, extra=None):
"""Write fatal error message and exit"""
Log(s, ERROR, code, extra)
shutdown()
sys.exit(code)
class DupLogRecord(logging.LogRecord):
"""Custom log record that holds a message code"""
def __init__(self, controlLine, *args, **kwargs):
global _logger
logging.LogRecord.__init__(self, *args, **kwargs)
self.controlLine = controlLine
self.levelName = LevelName(self.levelno)
class DupLogger(logging.Logger):
"""Custom logger that creates special code-bearing records"""
# controlLine is a terrible hack until duplicity depends on Python 2.5
# and its logging 'extra' keyword that allows a custom record dictionary.
controlLine = None
def makeRecord(self, name, lvl, fn, lno, msg, args, exc_info, *argv, **kwargs):
return DupLogRecord(self.controlLine, name, lvl, fn, lno, msg, args, exc_info)
class OutFilter(logging.Filter):
"""Filter that only allows warning or less important messages"""
def filter(self, record):
return record.msg and record.levelno <= DupToLoggerLevel(WARNING)
class ErrFilter(logging.Filter):
"""Filter that only allows messages more important than warnings"""
def filter(self, record):
return record.msg and record.levelno > DupToLoggerLevel(WARNING)
def setup():
"""Initialize logging"""
global _logger
if _logger:
return
logging.setLoggerClass(DupLogger)
_logger = logging.getLogger("duplicity")
# Default verbosity allows notices and above
setverbosity(NOTICE)
# stdout and stderr are for different logging levels
outHandler = logging.StreamHandler(sys.stdout)
outHandler.addFilter(OutFilter())
_logger.addHandler(outHandler)
errHandler = logging.StreamHandler(sys.stderr)
errHandler.addFilter(ErrFilter())
_logger.addHandler(errHandler)
class MachineFormatter(logging.Formatter):
"""Formatter that creates messages in a syntax easily consumable by other
processes."""
def __init__(self):
# 'message' will be appended by format()
# Note that we use our own, custom-created 'levelName' instead of the
# standard 'levelname'. This is because the standard 'levelname' can
# be adjusted by any library anywhere in our stack without us knowing.
# But we control 'levelName'.
logging.Formatter.__init__(self, "%(levelName)s %(controlLine)s")
def format(self, record):
s = logging.Formatter.format(self, record)
# Add user-text hint of 'message' back in, with each line prefixed by a
# dot, so consumers know it's not part of 'controlLine'
if record.message:
s += ('\n' + record.message).replace('\n', '\n. ')
# Add a newline so consumers know the message is over.
return s + '\n'
class MachineFilter(logging.Filter):
"""Filter that only allows levels that are consumable by other processes."""
def filter(self, record):
# We only want to allow records that have our custom level names
return hasattr(record, 'levelName')
def add_fd(fd):
"""Add stream to which to write machine-readable logging"""
global _logger
handler = logging.StreamHandler(os.fdopen(fd, 'w'))
handler.setFormatter(MachineFormatter())
handler.addFilter(MachineFilter())
_logger.addHandler(handler)
def add_file(filename):
"""Add file to which to write machine-readable logging"""
global _logger
handler = logging.FileHandler(filename)
handler.setFormatter(MachineFormatter())
handler.addFilter(MachineFilter())
_logger.addHandler(handler)
def setverbosity(verb):
"""Set the verbosity level"""
global _logger
_logger.setLevel(DupToLoggerLevel(verb))
def getverbosity():
"""Get the verbosity level"""
global _logger
return LoggerToDupLevel(_logger.getEffectiveLevel())
def shutdown():
"""Cleanup and flush loggers"""
logging.shutdown()
| |
"""The tests for the Input number component."""
# pylint: disable=protected-access
import asyncio
from homeassistant.core import CoreState, State, Context
from homeassistant.components.input_number import (
ATTR_VALUE,
DOMAIN,
SERVICE_DECREMENT,
SERVICE_INCREMENT,
SERVICE_SET_VALUE,
)
from homeassistant.const import ATTR_ENTITY_ID
from homeassistant.loader import bind_hass
from homeassistant.setup import async_setup_component
from tests.common import mock_restore_cache
@bind_hass
def set_value(hass, entity_id, value):
"""Set input_number to value.
This is a legacy helper method. Do not use it for new tests.
"""
hass.async_create_task(
hass.services.async_call(
DOMAIN, SERVICE_SET_VALUE, {ATTR_ENTITY_ID: entity_id, ATTR_VALUE: value}
)
)
@bind_hass
def increment(hass, entity_id):
"""Increment value of entity.
This is a legacy helper method. Do not use it for new tests.
"""
hass.async_create_task(
hass.services.async_call(DOMAIN, SERVICE_INCREMENT, {ATTR_ENTITY_ID: entity_id})
)
@bind_hass
def decrement(hass, entity_id):
"""Decrement value of entity.
This is a legacy helper method. Do not use it for new tests.
"""
hass.async_create_task(
hass.services.async_call(DOMAIN, SERVICE_DECREMENT, {ATTR_ENTITY_ID: entity_id})
)
async def test_config(hass):
"""Test config."""
invalid_configs = [
None,
{},
{"name with space": None},
{"test_1": {"min": 50, "max": 50}},
]
for cfg in invalid_configs:
assert not await async_setup_component(hass, DOMAIN, {DOMAIN: cfg})
async def test_set_value(hass):
"""Test set_value method."""
assert await async_setup_component(
hass, DOMAIN, {DOMAIN: {"test_1": {"initial": 50, "min": 0, "max": 100}}}
)
entity_id = "input_number.test_1"
state = hass.states.get(entity_id)
assert 50 == float(state.state)
set_value(hass, entity_id, "30.4")
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert 30.4 == float(state.state)
set_value(hass, entity_id, "70")
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert 70 == float(state.state)
set_value(hass, entity_id, "110")
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert 70 == float(state.state)
async def test_increment(hass):
"""Test increment method."""
assert await async_setup_component(
hass, DOMAIN, {DOMAIN: {"test_2": {"initial": 50, "min": 0, "max": 51}}}
)
entity_id = "input_number.test_2"
state = hass.states.get(entity_id)
assert 50 == float(state.state)
increment(hass, entity_id)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert 51 == float(state.state)
increment(hass, entity_id)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert 51 == float(state.state)
async def test_decrement(hass):
"""Test decrement method."""
assert await async_setup_component(
hass, DOMAIN, {DOMAIN: {"test_3": {"initial": 50, "min": 49, "max": 100}}}
)
entity_id = "input_number.test_3"
state = hass.states.get(entity_id)
assert 50 == float(state.state)
decrement(hass, entity_id)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert 49 == float(state.state)
decrement(hass, entity_id)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert 49 == float(state.state)
async def test_mode(hass):
"""Test mode settings."""
assert await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
"test_default_slider": {"min": 0, "max": 100},
"test_explicit_box": {"min": 0, "max": 100, "mode": "box"},
"test_explicit_slider": {"min": 0, "max": 100, "mode": "slider"},
}
},
)
state = hass.states.get("input_number.test_default_slider")
assert state
assert "slider" == state.attributes["mode"]
state = hass.states.get("input_number.test_explicit_box")
assert state
assert "box" == state.attributes["mode"]
state = hass.states.get("input_number.test_explicit_slider")
assert state
assert "slider" == state.attributes["mode"]
@asyncio.coroutine
def test_restore_state(hass):
"""Ensure states are restored on startup."""
mock_restore_cache(
hass, (State("input_number.b1", "70"), State("input_number.b2", "200"))
)
hass.state = CoreState.starting
yield from async_setup_component(
hass,
DOMAIN,
{DOMAIN: {"b1": {"min": 0, "max": 100}, "b2": {"min": 10, "max": 100}}},
)
state = hass.states.get("input_number.b1")
assert state
assert float(state.state) == 70
state = hass.states.get("input_number.b2")
assert state
assert float(state.state) == 10
@asyncio.coroutine
def test_initial_state_overrules_restore_state(hass):
"""Ensure states are restored on startup."""
mock_restore_cache(
hass, (State("input_number.b1", "70"), State("input_number.b2", "200"))
)
hass.state = CoreState.starting
yield from async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
"b1": {"initial": 50, "min": 0, "max": 100},
"b2": {"initial": 60, "min": 0, "max": 100},
}
},
)
state = hass.states.get("input_number.b1")
assert state
assert float(state.state) == 50
state = hass.states.get("input_number.b2")
assert state
assert float(state.state) == 60
@asyncio.coroutine
def test_no_initial_state_and_no_restore_state(hass):
"""Ensure that entity is create without initial and restore feature."""
hass.state = CoreState.starting
yield from async_setup_component(
hass, DOMAIN, {DOMAIN: {"b1": {"min": 0, "max": 100}}}
)
state = hass.states.get("input_number.b1")
assert state
assert float(state.state) == 0
async def test_input_number_context(hass, hass_admin_user):
"""Test that input_number context works."""
assert await async_setup_component(
hass, "input_number", {"input_number": {"b1": {"min": 0, "max": 100}}}
)
state = hass.states.get("input_number.b1")
assert state is not None
await hass.services.async_call(
"input_number",
"increment",
{"entity_id": state.entity_id},
True,
Context(user_id=hass_admin_user.id),
)
state2 = hass.states.get("input_number.b1")
assert state2 is not None
assert state.state != state2.state
assert state2.context.user_id == hass_admin_user.id
| |
from datetime import time
from pytz import utc
from behave import given, when, then, step
from launchkey.entities.service import TimeFence, GeoFence
from launchkey.entities.service.policy import FactorsPolicy
# Retrieve Organization Service Policy
from launchkey.entities.service.policy import ConditionalGeoFencePolicy, \
TerritoryFence, GeoCircleFence
@step("I retrieve the Policy for the Current Organization Service")
def retrieve_policy_for_current_organization_service(context):
current_service = context.entity_manager.get_current_organization_service()
context.organization_service_policy_manager.retrieve_service_policy(
current_service.id
)
@step("I retrieve the Advanced Policy for the Current Organization Service")
def retrieve_policy_for_current_organization_service(context):
current_service = context.entity_manager.get_current_organization_service()
context.organization_service_policy_manager \
.retrieve_advanced_service_policy(current_service.id)
@then("the Organization Service Policy has no requirement for inherence")
def verify_current_organization_service_policy_has_no_inherence_requirement(
context):
current_policy = context.entity_manager.\
get_current_organization_service_policy()
if "inherence" in current_policy.minimum_requirements:
raise Exception(
"Found inherence in current policy requirements when it "
"should not have been: %s" % current_policy.minimum_requirements
)
@then("the Organization Service Policy has no requirement for knowledge")
def verify_current_organization_service_policy_has_no_knowledge_requirement(
context):
current_policy = context.entity_manager.\
get_current_organization_service_policy()
if "knowledge" in current_policy.minimum_requirements:
raise Exception(
"Found knowledge in current policy requirements when it "
"should not have been: %s" % current_policy.minimum_requirements
)
@then("the Organization Service Policy has no requirement for possession")
def verify_current_organization_service_policy_has_no_possession_requirement(
context):
current_policy = context.entity_manager.\
get_current_organization_service_policy()
if "possession" in current_policy.minimum_requirements:
raise Exception(
"Found possession in current policy requirements when it "
"should not have been: %s" % current_policy.minimum_requirements
)
@then("the Organization Service Policy has no requirement for number of "
"factors")
def verify_current_organization_service_policy_has_factor_count_requirement(
context):
current_policy = context.entity_manager.\
get_current_organization_service_policy()
if current_policy.minimum_amount != 0:
raise Exception(
"Expected minimum requirement amount to be 0 but it was %s" %
current_policy.minimum_amount
)
@then("the Organization Service Policy requires {count:d} factors")
def verify_organization_service_policy_requires_count(context, count):
current_policy = context.entity_manager. \
get_current_organization_service_policy()
if current_policy.minimum_amount is not count:
raise Exception(
"Expected minimum requirement amount to be %s but it was %s" %
(count, current_policy.minimum_amount)
)
@when("I attempt to retrieve the Policy for the Organization Service with "
"the ID \"{service_id}\"")
def attempt_to_retrieve_policy_for_given_organization_service_id(context,
service_id):
try:
context.organization_service_policy_manager.retrieve_service_policy(
service_id
)
except Exception as e:
context.current_exception = e
# Set Organization Service Policy
@given("the Organization Service Policy is set to require {count:d} factor")
@step("the Organization Service Policy is set to require {count:d} factors")
def set_current_organization_policy_require_count(context, count):
current_service = context.entity_manager.get_current_organization_service()
context.organization_service_policy_manager.retrieve_service_policy(
current_service.id
)
policy = context.entity_manager.get_current_organization_service_policy()
policy.set_minimum_requirements(minimum_amount=count)
context.entity_manager.set_current_organization_service_policy(policy)
def set_current_organization_policy_require_type(context, policy_type):
policy = context.entity_manager.get_current_organization_service_policy()
kwargs = {}
# Make sure the previously set requirements remain
for additional_policy_type in policy.minimum_requirements:
kwargs[additional_policy_type] = True
kwargs[policy_type] = True
try:
policy.set_minimum_requirements(**kwargs)
except TypeError:
raise Exception("Invalid policy input %s" % policy_type)
context.entity_manager.set_current_organization_service_policy(policy)
@when("the Organization Service Policy is set to require knowledge")
@given("the Organization Service Policy is set to require knowledge")
def set_current_organization_policy_require_type_knowledge(context):
set_current_organization_policy_require_type(context, "knowledge")
@when("the Organization Service Policy is set to require inherence")
@given("the Organization Service Policy is set to require inherence")
def set_current_organization_policy_require_type_inherence(context):
set_current_organization_policy_require_type(context, "inherence")
@when("the Organization Service Policy is set to require possession")
@given("the Organization Service Policy is set to require possession")
def set_current_organization_policy_require_type_possession(context):
set_current_organization_policy_require_type(context, "possession")
@step("the Organization Service Policy is set to require jail "
"break protection")
def set_organization_policy_required_jailbreak_protection(context):
policy = context.entity_manager.get_current_organization_service_policy()
policy.require_jailbreak_protection(True)
def verify_organization_service_policy_requires_type(context, policy_type):
policy = context.entity_manager.get_current_organization_service_policy()
if policy_type.lower() not in policy.minimum_requirements:
raise Exception("%s not in the requested Service policies, %s" %
(policy_type, policy.minimum_requirements))
@then("the Organization Service Policy does require knowledge")
def verify_organization_service_policy_requires_type_knowledge(context):
verify_organization_service_policy_requires_type(context, "knowledge")
@then("the Organization Service Policy does require inherence")
def verify_organization_service_policy_requires_type_inherence(context):
verify_organization_service_policy_requires_type(context, "inherence")
@then("the Organization Service Policy does require possession")
def verify_organization_service_policy_requires_type_possession(context):
verify_organization_service_policy_requires_type(context, "possession")
@then("the Organization Service Policy does require jail break protection")
def verify_organization_service_policy_requires_jailbreak_protection(context):
policy = context.entity_manager.get_current_organization_service_policy()
if policy.jailbreak_protection is False:
raise Exception("Policy did not required jailbreak protection when "
"it should")
@then("the Organization Service Policy has no requirement for jail break "
"protection")
def verify_organization_service_policy_does_not_require_jailbreak_protection(
context):
policy = context.entity_manager.get_current_organization_service_policy()
if policy.jailbreak_protection is True:
raise Exception("Policy ailbreak protection when it should not have")
@given(u"I set the Policy for the Organization Service")
@step(u"I set the Policy for the Current Organization Service")
def set_organization_service_policy_require_to_current_policy(context):
current_service = context.entity_manager.get_current_organization_service()
policy = context.entity_manager.get_current_organization_service_policy()
context.organization_service_policy_manager.set_service_policy(
current_service.id,
policy
)
@when("the Organization Service Policy is set to have the following "
"Time Fences")
@given("the Organization Service Policy is set to have the following "
"Time Fences")
def set_organization_service_policy_time_fences_from_table(context):
policy = context.entity_manager.get_current_organization_service_policy()
for row in context.table:
days = {}
for day in row['Days'].split(","):
days[day.lower()] = True
policy.add_timefence(
row['Name'],
time(hour=int(row['Start Hour']), minute=int(row['Start Minute'])),
time(hour=int(row['End Hour']), minute=int(row['End Minute'])),
**days
)
context.entity_manager.set_current_organization_service_policy(policy)
@then("the Organization Service Policy has the following Time Fences")
def verify_organization_service_policy_time_fences_from_table(context):
policy = context.entity_manager.get_current_organization_service_policy()
for row in context.table:
days = {}
for day in row['Days'].split(","):
days[day.lower()] = True
timefence = TimeFence(
row['Name'],
time(hour=int(row['Start Hour']), minute=int(row['Start Minute']),
tzinfo=utc),
time(hour=int(row['End Hour']), minute=int(row['End Minute']),
tzinfo=utc),
**days
)
if timefence not in policy.timefences:
raise Exception("%s not in policy timefences: %s" %
(timefence, policy.timefences))
@when("the Organization Service Policy is set to have the following Geofence "
"locations")
@given("the Organization Service Policy is set to have the following "
"Geofence locations")
def set_organization_service_policy_geo_fences_from_table(context):
policy = context.entity_manager.get_current_organization_service_policy()
for row in context.table:
policy.add_geofence(
row['Latitude'],
row['Longitude'],
row['Radius'],
name=row['Name']
)
context.entity_manager.set_current_organization_service_policy(policy)
@then("the Organization Service Policy has the following Geofence locations")
def verify_organization_service_geofence_locations_from_table(context):
policy = context.entity_manager.get_current_organization_service_policy()
for row in context.table:
geofence = GeoFence(
row['Latitude'],
row['Longitude'],
row['Radius'],
row['Name']
)
if geofence not in policy.geofences:
raise Exception("%s not in policy geofences: %s" %
(geofence, policy.geofences))
@then("the Organization Service Policy has {count:d} locations")
def verify_organization_service_policy_has_count_locations(context, count):
policy = context.entity_manager.get_current_organization_service_policy()
found_locations = len(policy.geofences)
if found_locations != count:
raise Exception("Found %s locations when it should have been %s: %s" %
(found_locations, count, policy.geofences))
@then("the Organization Service Policy has {count:d} time fences")
def verify_organization_service_policy_has_count_timefences(context, count):
policy = context.entity_manager.get_current_organization_service_policy()
found_locations = len(policy.timefences)
if found_locations != count:
raise Exception("Found %s timefences when it should have been %s: %s" %
(found_locations, count, policy.timefences))
@when("I attempt to set the Policy for the Organization Service with the ID "
"\"{service_id}\"")
def attempt_to_set_policy_for_organization_service_from_id(context,
service_id):
policy = context.entity_manager.get_current_organization_service_policy()
try:
context.organization_service_policy_manager.set_service_policy(
service_id,
policy
)
except Exception as e:
context.current_exception = e
# Remove Organization Service Policy
@when("I remove the Policy for the Organization Service")
def remove_policy_for_current_organization_service(context):
current_service = context.entity_manager.get_current_organization_service()
context.organization_service_policy_manager.remove_service_policy(
current_service.id
)
@when("I attempt to remove the Policy for the Organization Service with "
"the ID \"{service_id}\"")
def attempt_to_remove_policy_from_given_organization_service_id(context,
service_id):
try:
context.organization_service_policy_manager.remove_service_policy(
service_id
)
except Exception as e:
context.current_exception = e
@step("I set the Policy for the Current Organization Service "
"to the new policy")
def step_impl(context):
current_service = context.entity_manager.get_current_organization_service()
policy = context.entity_manager.get_current_auth_policy()
context.organization_service_policy_manager.set_service_policy(
current_service.id,
policy
)
@step("I set the Advanced Policy for the Current Organization Service "
"to the new policy")
def step_impl(context):
current_service = context.entity_manager.get_current_organization_service()
policy = context.entity_manager.get_current_auth_policy()
context.organization_service_policy_manager.set_advanced_service_policy(
current_service.id,
policy
)
@given("the Organization Service is set to any Conditional Geofence Policy")
def step_impl(context):
default_nested_policy = FactorsPolicy(
knowledge_required=True,
deny_emulator_simulator=None,
deny_rooted_jailbroken=None,
fences=None
)
default_cond_geo_policy = ConditionalGeoFencePolicy(
inside=default_nested_policy,
outside=default_nested_policy,
fences=[TerritoryFence("US", name="test1")]
)
context.entity_manager.set_current_organization_service_policy(
default_cond_geo_policy)
context.entity_manager.set_current_auth_policy(default_cond_geo_policy)
@then(
'the Organization Service Policy contains the GeoCircleFence "{name}"')
def step_impl(context, name):
policy = context.entity_manager.get_current_organization_service_policy()
for fence in policy.fences:
if fence.name == name:
if isinstance(fence, GeoCircleFence):
return True
raise ValueError("Fence {0} was not found".format(name))
@step('the Organization Service Policy contains the TerritoryFence "{name}"')
def step_impl(context, name):
policy = context.entity_manager.get_current_organization_service_policy()
for fence in policy.fences:
if fence.name == name:
if isinstance(fence, TerritoryFence):
return True
raise ValueError("Fence {0} was not found".format(name))
@then(u'the Organization Service Policy has "{amount}" fences')
def organization_service_amount_fences(context, amount):
policy = context.entity_manager.get_current_organization_service_policy()
if len(policy.fences) != int(amount):
raise ValueError(
"{0} does not equal current policy amount of {1}".format(
amount,
len(policy.fences)
)
)
@then(u'the Organization Service Policy has "{amount}" fence')
def single_fence(context, amount):
# Handles the english phrasing for a single fence without
# changing the behave matcher
organization_service_amount_fences(context, amount)
| |
#!/usr/bin/python
#
# Autofocosing routines.
#
# You will need: scipy matplotlib sextractor
# This should work on Debian/ubuntu:
# sudo apt-get install python-matplotlib python-scipy python-pyfits sextractor
#
# If you would like to see sextractor results, get DS9 and pyds9:
#
# http://hea-www.harvard.edu/saord/ds9/
#
# Please be aware that current sextractor Ubuntu packages does not work
# properly. The best workaround is to install package, and the overwrite
# sextractor binary with one compiled from sources (so you will have access
# to sextractor configuration files, which program assumes).
#
# (C) 2002-2008 Stanislav Vitek
# (C) 2002-2010 Martin Jelinek
# (C) 2009-2010 Markus Wildi
# (C) 2010-2014 Petr Kubanek, Institute of Physics <kubanek@fzu.cz>
# (C) 2010 Francisco Forster Buron, Universidad de Chile
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
from rts2 import scriptcomm
from rts2 import sextractor
from scottSock import scottSock
sepPresent = False
try:
import sep
sepPresent = True
except Exception as ex:
pass
from pylab import *
from scipy import *
from scipy import optimize
import numpy
import pickle
LINEAR = 0
"""Linear fit"""
P2 = 1
"""Fit using 2 power polynomial"""
P4 = 2
"""Fit using 4 power polynomial"""
H3 = 3
"""Fit using general Hyperbola (three free parameters)"""
H2 = 4
"""Fit using Hyperbola with fixed slope at infinity (two free parameters)"""
class Focusing (scriptcomm.Rts2Comm):
"""Take and process focussing data."""
def __init__(self,exptime = 10,step=20,attempts=10,filterGalaxies=False):
scriptcomm.Rts2Comm.__init__(self)
self.log('I', 'This is a test')
self.exptime = exptime
self.step = step
self.focuser = "F0"
self.attempts = attempts
# if |offset| is above this value, try linear fit
self.linear_fit = self.step * self.attempts / 2.0
# target FWHM for linear fit
self.linear_fit_fwhm = 3.5
self.filterGalaxies = filterGalaxies
def doFit(self,fit):
b = None
errfunc = None
fitfunc_r = None
p0 = None
# try to fit..
# this function is for flux..
#fitfunc = lambda p, x: p[0] * p[4] / (p[4] + p[3] * (abs(x - p[1])) ** (p[2]))
# prepare fit based on its type..
if fit == LINEAR:
fitfunc = lambda p, x: p[0] + p[1] * x
errfunc = lambda p, x, y: fitfunc(p, x) - y # LINEAR - distance to the target function
p0 = [1, 1]
fitfunc_r = lambda x, p0, p1: p0 + p1 * x
elif fit == P2:
fitfunc = lambda p, x: p[0] + p[1] * x + p[2] * (x ** 2)
errfunc = lambda p, x, y: fitfunc(p, x) - y # P2 - distance to the target function
p0 = [1, 1, 1]
fitfunc_r = lambda x, p0, p1, p2 : p0 + p1 * x + p2 * (x ** 2)
elif fit == P4:
fitfunc = lambda p, x: p[0] + p[1] * x + p[2] * (x ** 2) + p[3] * (x ** 3) + p[4] * (x ** 4)
errfunc = lambda p, x, y: fitfunc(p, x) - y # P4 - distance to the target function
p0 = [1, 1, 1, 1, 1]
fitfunc_r = lambda x, p0, p1: p0 + p1 * x + p2 * (x ** 2) + p3 * (x ** 3) + p4 * (x ** 4)
elif fit == H3:
fitfunc = lambda p, x: sqrt(p[0] ** 2 + p[1] ** 2 * (x - p[2])**2)
errfunc = lambda p, x, y: fitfunc(p, x) - y # H3 - distance to the target function
p0 = [400., 3.46407715307, self.fwhm_MinimumX] # initial guess based on real data
fitfunc_r = lambda x, p0, p1, p2 : sqrt(p0 ** 2 + p1 ** 2 * (x - p2) ** 2)
elif fit == H2:
fitfunc = lambda p, x: sqrt(p[0] ** 2 + 3.46407715307 ** 2 * (x - p[1])**2) # 3.46 based on H3 fits
errfunc = lambda p, x, y: fitfunc(p, x) - y # H2 - distance to the target function
p0 = [400., self.fwhm_MinimumX] # initial guess based on real data
fitfunc_r = lambda x, p0, p1 : sqrt(p0 ** 2 + 3.46407715307 ** 2 * (x - p1) ** 2)
else:
raise Exception('Unknow fit type {0}'.format(fit))
self.fwhm_poly, success = optimize.leastsq(errfunc, p0[:], args=(self.focpos, self.fwhm))
b = None
if fit == LINEAR:
b = (self.linear_fit_fwhm - self.fwhm_poly[0]) / self.fwhm_poly[1]
elif fit == H3:
b = self.fwhm_poly[2]
self.log('I', 'found minimum FWHM: {0}'.format(abs(self.fwhm_poly[0])))
self.log('I', 'found slope at infinity: {0}'.format(abs(self.fwhm_poly[1])))
elif fit == H2:
b = self.fwhm_poly[1]
self.log('I', 'found minimum FWHM: {0}'.format(abs(self.fwhm_poly[0])))
else:
b = optimize.fmin(fitfunc_r,self.fwhm_MinimumX,args=(self.fwhm_poly), disp=0)[0]
self.log('I', 'found FWHM minimum at offset {0}'.format(b))
return b
def tryFit(self,defaultFit):
"""Try fit, change to linear fit if outside allowed range."""
b = self.doFit(defaultFit)
if (abs(b - numpy.average(self.focpos)) >= self.linear_fit):
self.log('W','cannot do find best FWHM inside limits, trying H2 fit - best fit is {0}, average focuser position is {1}'.format(b, numpy.average(self.focpos)))
b = self.doFit(H2)
if (abs(b - numpy.average(self.focpos)) >= self.linear_fit):
self.log('W','cannot do find best FWHM inside limits, trying linear fit - best fit is {0}, average focuser position is {1}'.format(b, numpy.average(self.focpos)))
b = self.doFit(LINEAR)
return b,LINEAR
return b,H2
return b,defaultFit
def doFitOnArrays(self,fwhm,focpos,defaultFit):
self.fwhm = array(fwhm)
self.focpos = array(focpos)
self.fwhm_MinimumX = 0
min_fwhm=fwhm[0]
for x in range(0,len(fwhm)):
if fwhm[x] < min_fwhm:
self.fwhm_MinimumX = x
min_fwhm = fwhm[x]
return self.tryFit(defaultFit)
def findBestFWHM(self,tries,defaultFit=P2,min_stars=95,ds9display=False,threshold=2.7,deblendmin=0.03):
# X is FWHM, Y is offset value
self.focpos=[]
self.fwhm=[]
fwhm_min = None
self.fwhm_MinimumX = None
keys = list(tries.keys())
keys.sort()
sextr = sextractor.Sextractor(threshold=threshold,deblendmin=deblendmin)
for k in keys:
try:
sextr.runSExtractor(tries[k])
fwhm,fwhms,nstars = sextr.calculate_FWHM(min_stars,self.filterGalaxies)
except Exception as ex:
self.log('W','offset {0}: {1}'.format(k,ex))
continue
self.log('I','offset {0} fwhm {1} with {2} stars'.format(k,fwhm,nstars))
focpos.append(k)
fwhm.append(fwhm)
if (fwhm_min is None or fwhm < fwhm_min):
fwhm_MinimumX = k
fwhm_min = fwhm
return focpos,fwhm,fwhm_min,fwhm_MinimumX
def __sepFindFWHM(self,tries):
from astropy.io import fits
import math
import traceback
focpos=[]
fwhm=[]
fwhm_min=None
fwhm_MinimumX=None
keys = list(tries.keys())
keys.sort()
ln2=math.log(2)
for k in keys:
try:
fwhms=[]
ff=fits.open(tries[k])
# loop on images..
for i in range(1,len(ff)-1):
data=ff[i].data
bkg=sep.Background(numpy.array(data,numpy.float))
sources=sep.extract(data-bkg, 5.0 * bkg.globalrms)
self.log('I','bkg gobalrms {}'.format(bkg.globalrms))
for s in sources:
fwhms.append(2 * math.sqrt(ln2 * (s[15]**2 + s[16]**2)))
im_fwhm=numpy.median(fwhms)
# find median from fwhms measurements..
self.log('I','median fwhm {}'.format(numpy.median(fwhms)))
self.log('I','offset {0} fwhm {1} with {2} stars'.format(k,im_fwhm,len(fwhms)))
focpos.append(k)
fwhm.append(im_fwhm)
if (fwhm_min is None or im_fwhm < fwhm_min):
fwhm_MinimumX = k
fwhm_min = im_fwhm
except Exception as ex:
self.log('W','offset {0}: {1} {2}'.format(k,ex,traceback.format_exc()))
self.log('I','pickling')
fd = open( "rts2.pkl", 'w' )
pickle.dump(sources, fd)
fd.close()
return focpos,fwhm,fwhm_min,fwhm_MinimumX
def findBestFWHM(self,tries,defaultFit=H3,min_stars=15,ds9display=False,threshold=2.7,deblendmin=0.03):
# X is FWHM, Y is offset value
self.focpos=[]
self.fwhm=[]
self.fwhm_min = None
self.fwhm_MinimumX = None
if sepPresent:
self.focpos,self.fwhm,self.fwhm_min,self.fwhm_MinimumX = self.__sepFindFWHM(tries)
else:
self.focpos,self.fwhm,self.fwhm_min,self.fwhm_MinimumX = self.__sexFindFWHM(tries,threshold,deblendmin)
self.focpos = array(self.focpos)
self.fwhm = array(self.fwhm)
return self.tryFit(defaultFit)
def beforeReadout(self):
self.current_focus = self.getValueFloat('FOC_POS',self.focuser)
if (self.num == self.attempts):
self.setValue('FOC_TOFF',0,self.focuser)
else:
self.off += self.step
self.setValue('FOC_TOFF',self.off,self.focuser)
def takeImages(self):
self.setValue('exposure',self.exptime)
self.setValue('SHUTTER','LIGHT')
self.off = -1 * self.step * (self.attempts / 2)
self.setValue('FOC_TOFF',self.off,self.focuser)
tries = {}
# must be overwritten in beforeReadout
self.current_focus = None
for self.num in range(1,self.attempts+1):
self.log('I','starting {0}s exposure on offset {1}'.format(self.exptime,self.off))
img = self.exposure(self.beforeReadout,'%b/foc_%N_{0}.fits'.format(self.num))
tries[self.current_focus] = img
self.log('I','all focusing exposures finished, processing data')
return self.findBestFWHM(tries)
def run(self):
self.focuser = self.getValue('focuser')
# send to some other coordinates if you wish so, or disable this for target for fixed coordinates
#self.altaz (89,90)
b,fit = self.takeImages()
if fit == LINEAR:
self.setValue('FOC_DEF',b,self.focuser)
b,fit = self.takeImages()
self.setValue('FOC_DEF',b,self.focuser)
def plotFit(self,b,ftype):
"""Plot fit graph."""
fitfunc = None
if ftype == LINEAR:
fitfunc = lambda p, x: p[0] + p[1] * x
elif ftype == P2:
fitfunc = lambda p, x: p[0] + p[1] * x + p[2] * (x ** 2)
elif ftype == P4:
fitfunc = lambda p, x: p[0] + p[1] * x + p[2] * (x ** 2) + p[3] * (x ** 3) + p[4] * (x ** 4)
elif ftype == H3:
fitfunc = lambda p, x: sqrt(p[0] ** 2 + p[1] ** 2 * (x - p[2]) ** 2)
elif ftype == H2:
fitfunc = lambda p, x: sqrt(p[0] ** 2 + 3.46407715307 ** 2 * (x - p[1]) ** 2) # 3.46 based on HYPERBOLA fits
else:
raise Exception('Unknow fit type {0}'.format(ftype))
x = linspace(self.focpos.min() - 1, self.focpos.max() + 1)
plot (self.focpos, self.fwhm, "r+", x, fitfunc(self.fwhm_poly, x), "r-")
show()
def to_dataserver( fname, outfile='test.fits', clobber=True ):
fitsfd = fits.open( fname )
width = 0
height = 0
for ext in fitsfd:
if hasattr( ext, 'data' ):
if ext.data is not None:
width+=ext.data.shape[0]
height+=ext.data.shape[1]
fitsfd.close()
fsize = os.stat(fname).st_size
fd = open(fname, 'rb')
if clobber:
clobber_char = '!'
else:
clobber_char = ''
meta = " {} {}{} 1 {} {} 0".format( fsize, clobber_char, '/home/bigobs/data/rts2'+outfile, width, height )
meta = meta + (256-len(meta))*' '
data = meta+fd.read()
lendata = len(data)
soc = scottSock( '10.30.1.1', 6543 )
counter = 0
socsize = 1024
buffsize = 0
while buffsize < len(data):
sent = soc.send( data[buffsize:buffsize+1024] )
buffsize+=sent
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""## Functions for working with arbitrarily nested sequences of elements.
This module is used to perform any operations on nested structures, which can be
specified as sequences that contain non-sequence elements or other sequences.
The utilities here assume (and do not check) that the nested structures form a
'tree', i.e. no references in the structure of the input of these functions
should be recursive.
@@assert_same_structure
@@is_sequence
@@flatten
@@flatten_dict_items
@@pack_sequence_as
@@map_structure
@@assert_shallow_structure
@@flatten_up_to
@@map_structure_up_to
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import six
def _sequence_like(instance, args):
"""Converts the sequence `args` to the same type as `instance`.
Args:
instance: an instance of `tuple`, `list`, or a `namedtuple` class.
args: elements to be converted to a sequence.
Returns:
`args` with the type of `instance`.
"""
if (isinstance(instance, tuple) and
hasattr(instance, "_fields") and
isinstance(instance._fields, collections.Sequence) and
all(isinstance(f, six.string_types) for f in instance._fields)):
# This is a namedtuple
return type(instance)(*args)
else:
# Not a namedtuple
return type(instance)(args)
def _yield_flat_nest(nest):
for n in nest:
if is_sequence(n):
for ni in _yield_flat_nest(n):
yield ni
else:
yield n
def is_sequence(seq):
"""Returns a true if its input is a collections.Sequence (except strings).
Args:
seq: an input sequence.
Returns:
True if the sequence is a not a string and is a collections.Sequence.
"""
return (isinstance(seq, collections.Sequence)
and not isinstance(seq, six.string_types))
def flatten(nest):
"""Returns a flat sequence from a given nested structure.
If `nest` is not a sequence, this returns a single-element list: `[nest]`.
Args:
nest: an arbitrarily nested structure or a scalar object.
Note, numpy arrays are considered scalars.
Returns:
A Python list, the flattened version of the input.
"""
return list(_yield_flat_nest(nest)) if is_sequence(nest) else [nest]
def _recursive_assert_same_structure(nest1, nest2):
is_sequence_nest1 = is_sequence(nest1)
if is_sequence_nest1 != is_sequence(nest2):
raise ValueError(
"The two structures don't have the same nested structure. "
"First structure: %s, second structure: %s." % (nest1, nest2))
if is_sequence_nest1:
type_nest1 = type(nest1)
type_nest2 = type(nest2)
if type_nest1 != type_nest2:
raise TypeError(
"The two structures don't have the same sequence type. First "
"structure has type %s, while second structure has type %s."
% (type_nest1, type_nest2))
for n1, n2 in zip(nest1, nest2):
_recursive_assert_same_structure(n1, n2)
def assert_same_structure(nest1, nest2):
"""Asserts that two structures are nested in the same way.
Args:
nest1: an arbitrarily nested structure.
nest2: an arbitrarily nested structure.
Raises:
ValueError: If the two structures do not have the same number of elements or
if the two structures are not nested in the same way.
TypeError: If the two structures differ in the type of sequence in any of
their substructures.
"""
len_nest1 = len(flatten(nest1)) if is_sequence(nest1) else 1
len_nest2 = len(flatten(nest2)) if is_sequence(nest2) else 1
if len_nest1 != len_nest2:
raise ValueError("The two structures don't have the same number of "
"elements. First structure: %s, second structure: %s."
% (nest1, nest2))
_recursive_assert_same_structure(nest1, nest2)
def flatten_dict_items(dictionary):
"""Returns a dictionary with flattened keys and values.
This function flattens the keys and values of a dictionary, which can be
arbitrarily nested structures, and returns the flattened version of such
structures:
```python
example_dictionary = {(4, 5, (6, 8)): ("a", "b", ("c", "d"))}
result = {4: "a", 5: "b", 6: "c", 8: "d"}
flatten_dict_items(example_dictionary) == result
```
The input dictionary must satisfy two properties:
1. Its keys and values should have the same exact nested structure.
2. The set of all flattened keys of the dictionary must not contain repeated
keys.
Args:
dictionary: the dictionary to zip
Returns:
The zipped dictionary.
Raises:
TypeError: If the input is not a dictionary.
ValueError: If any key and value have not the same structure, or if keys are
not unique.
"""
if not isinstance(dictionary, dict):
raise TypeError("input must be a dictionary")
flat_dictionary = {}
for i, v in six.iteritems(dictionary):
if not is_sequence(i):
if i in flat_dictionary:
raise ValueError(
"Could not flatten dictionary: key %s is not unique." % i)
flat_dictionary[i] = v
else:
flat_i = flatten(i)
flat_v = flatten(v)
if len(flat_i) != len(flat_v):
raise ValueError(
"Could not flatten dictionary. Key had %d elements, but value had "
"%d elements. Key: %s, value: %s."
% (len(flat_i), len(flat_v), flat_i, flat_v))
for new_i, new_v in zip(flat_i, flat_v):
if new_i in flat_dictionary:
raise ValueError(
"Could not flatten dictionary: key %s is not unique."
% (new_i))
flat_dictionary[new_i] = new_v
return flat_dictionary
def _packed_nest_with_indices(structure, flat, index):
"""Helper function for pack_nest_as.
Args:
structure: Substructure (tuple of elements and/or tuples) to mimic
flat: Flattened values to output substructure for.
index: Index at which to start reading from flat.
Returns:
The tuple (new_index, child), where:
* new_index - the updated index into `flat` having processed `structure`.
* packed - the subset of `flat` corresponding to `structure`,
having started at `index`, and packed into the same nested
format.
Raises:
ValueError: if `structure` contains more elements than `flat`
(assuming indexing starts from `index`).
"""
packed = []
for s in structure:
if is_sequence(s):
new_index, child = _packed_nest_with_indices(s, flat, index)
packed.append(_sequence_like(s, child))
index = new_index
else:
packed.append(flat[index])
index += 1
return index, packed
def pack_sequence_as(structure, flat_sequence):
"""Returns a given flattened sequence packed into a nest.
If `structure` is a scalar, `flat_sequence` must be a single-element list;
in this case the return value is `flat_sequence[0]`.
Args:
structure: tuple or list constructed of scalars and/or other tuples/lists,
or a scalar. Note: numpy arrays are considered scalars.
flat_sequence: flat sequence to pack.
Returns:
packed: `flat_sequence` converted to have the same recursive structure as
`structure`.
Raises:
ValueError: If nest and structure have different element counts.
"""
if not is_sequence(flat_sequence):
raise TypeError("flat_sequence must be a sequence")
if not is_sequence(structure):
if len(flat_sequence) != 1:
raise ValueError("Structure is a scalar but len(flat_sequence) == %d > 1"
% len(flat_sequence))
return flat_sequence[0]
flat_structure = flatten(structure)
if len(flat_structure) != len(flat_sequence):
raise ValueError(
"Could not pack sequence. Structure had %d elements, but flat_sequence "
"had %d elements. Structure: %s, flat_sequence: %s."
% (len(flat_structure), len(flat_sequence), structure, flat_sequence))
_, packed = _packed_nest_with_indices(structure, flat_sequence, 0)
return _sequence_like(structure, packed)
def map_structure(func, *structure):
"""Applies `func` to each entry in `structure` and returns a new structure.
Applies `func(x[0], x[1], ...)` where x[i] is an entry in
`structure[i]`. All structures in `structure` must have the same arity,
and the return value will contain the results in the same structure.
Args:
func: A callable that acceps as many arguments are there are structures.
*structure: scalar, or tuple or list of constructed scalars and/or other
tuples/lists, or scalars. Note: numpy arrays are considered scalars.
Returns:
A new structure with the same arity as `structure`, whose values correspond
to `func(x[0], x[1], ...)` where `x[i]` is a value in the corresponding
location in `structure[i]`.
Raises:
TypeError: If `func` is not callable or if the structures do not match
each other by depth tree.
ValueError: If no structure is provided or if the structures do not match
each other by type.
"""
if not callable(func):
raise TypeError("func must be callable, got: %s" % func)
if not structure:
raise ValueError("Must provide at least one structure")
for other in structure[1:]:
assert_same_structure(structure[0], other)
flat_structure = [flatten(s) for s in structure]
entries = zip(*flat_structure)
return pack_sequence_as(
structure[0], [func(*x) for x in entries])
def _yield_flat_up_to(shallow_tree, input_tree):
"""Yields elements `input_tree` partially flattened up to `shallow_tree`."""
if is_sequence(shallow_tree):
for shallow_branch, input_branch in zip(shallow_tree, input_tree):
for input_leaf in _yield_flat_up_to(shallow_branch, input_branch):
yield input_leaf
else:
yield input_tree
def assert_shallow_structure(shallow_tree, input_tree):
"""Asserts that `shallow_tree` is a shallow structure of `input_tree`.
That is, this function tests if the `input_tree` structure can be created from
the `shallow_tree` structure by replacing its leaf nodes with deeper
tree structures.
Examples:
The following code will raise an exception:
```python
shallow_tree = ["a", "b"]
input_tree = ["c", ["d", "e"], "f"]
assert_shallow_structure(shallow_tree, input_tree)
```
The following code will not raise an exception:
```python
shallow_tree = ["a", "b"]
input_tree = ["c", ["d", "e"]]
assert_shallow_structure(shallow_tree, input_tree)
```
Args:
shallow_tree: an arbitrarily nested structure.
input_tree: an arbitrarily nested structure.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
"""
if is_sequence(shallow_tree):
if not is_sequence(input_tree):
raise TypeError(
"If shallow structure is a sequence, input must also be a sequence. "
"Input has type: %s." % type(input_tree))
if not isinstance(input_tree, type(shallow_tree)):
raise TypeError(
"The two structures don't have the same sequence type. Input "
"structure has type %s, while shallow structure has type %s."
% (type(input_tree), type(shallow_tree)))
if len(input_tree) != len(shallow_tree):
raise ValueError(
"The two structures don't have the same sequence length. Input "
"structure has length %s, while shallow structure has length %s."
% (len(input_tree), len(shallow_tree)))
for shallow_branch, input_branch in zip(shallow_tree, input_tree):
assert_shallow_structure(shallow_branch, input_branch)
def flatten_up_to(shallow_tree, input_tree):
"""Flattens `input_tree` up to `shallow_tree`.
Any further depth in structure in `input_tree` is retained as elements in the
partially flatten output.
If `shallow_tree` and `input_tree` are not sequences, this returns a
single-element list: `[input_tree]`.
Use Case:
Sometimes we may wish to partially flatten a nested sequence, retaining some
of the nested structure. We achieve this by specifying a shallow structure,
`shallow_tree`, we wish to flatten up to.
The input, `input_tree`, can be thought of as having the same structure as
`shallow_tree`, but with leaf nodes that are themselves tree structures.
Examples:
```python
input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]]
shallow_tree = [[True, True], [False, True]]
flattened_input_tree = flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = flatten_up_to(shallow_tree, shallow_tree)
# Output is:
# [[2, 2], [3, 3], [4, 9], [5, 5]]
# [True, True, False, True]
```
```python
input_tree = [[('a', 1), [('b', 2), [('c', 3), [('d', 4)]]]]]
shallow_tree = [['level_1', ['level_2', ['level_3', ['level_4']]]]]
input_tree_flattened_as_shallow_tree = flatten_up_to(shallow_tree, input_tree)
input_tree_flattened = flatten(input_tree)
# Output is:
# [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
# ['a', 1, 'b', 2, 'c', 3, 'd', 4]
```
Non-Sequence Edge Cases:
```python
flatten_up_to(0, 0) # Output: [0]
flatten_up_to(0, [0, 1, 2]) # Output: [[0, 1, 2]]
flatten_up_to([0, 1, 2], 0) # Output: TypeError
flatten_up_to([0, 1, 2], [0, 1, 2]) # Output: [0, 1, 2]
```
Args:
shallow_tree: a possibly pruned structure of input_tree.
input_tree: an arbitrarily nested structure or a scalar object.
Note, numpy arrays are considered scalars.
Returns:
A Python list, the partially flattened version of `input_tree` according to
the structure of `shallow_tree`.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
"""
assert_shallow_structure(shallow_tree, input_tree)
return list(_yield_flat_up_to(shallow_tree, input_tree))
def map_structure_up_to(shallow_tree, func, *inputs):
"""Applies a function or op to a number of partially flattened inputs.
The `inputs` are flattened up to `shallow_tree` before being mapped.
Use Case:
Sometimes we wish to apply a function to a partially flattened
sequence (for example when the function itself takes sequence inputs). We
achieve this by specifying a shallow structure, `shallow_tree` we wish to
flatten up to.
The `inputs`, can be thought of as having the same structure as
`shallow_tree`, but with leaf nodes that are themselves tree structures.
This function therefore will return something with the same base structure as
`shallow_tree`.
Examples:
```python
ab_tuple = collections.namedtuple("ab_tuple", "a, b")
op_tuple = collections.namedtuple("op_tuple", "add, mul")
inp_val = ab_tuple(a=2, b=3)
inp_ops = ab_tuple(a=op_tuple(add=1, mul=2), b=op_tuple(add=2, mul=3))
out = map_structure_up_to(inp_val, lambda val, ops: (val + ops.add) * ops.mul,
inp_val, inp_ops)
# Output is: ab_tuple(a=6, b=15)
```
```python
data_list = [[2, 4, 6, 8], [[1, 3, 5, 7, 9], [3, 5, 7]]]
name_list = ['evens', ['odds', 'primes']]
out = map_structure_up_to(
name_list,
lambda name, sec: "first_{}_{}".format(len(sec), name),
name_list, data_list)
# Output is: ['first_4_evens', ['first_5_odds', 'first_3_primes']]
```
Args:
shallow_tree: a shallow tree, common to all the inputs.
func: callable which will be applied to each input individually.
*inputs: arbitrarily nested combination of objects that are compatible with
shallow_tree. The function `func` is applied to corresponding
partially flattened elements of each input, so the function must support
arity of `len(inputs)`.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
Returns:
result of repeatedly applying `func`, with same structure as
`shallow_tree`.
"""
if not inputs:
raise ValueError("Cannot map over no sequences")
for input_tree in inputs:
assert_shallow_structure(shallow_tree, input_tree)
# Flatten each input separately, apply the function to corresponding elements,
# then repack based on the structure of the first input.
all_flattened_up_to = [flatten_up_to(shallow_tree, input_tree)
for input_tree in inputs]
results = [func(*tensors) for tensors in zip(*all_flattened_up_to)]
return pack_sequence_as(structure=shallow_tree, flat_sequence=results)
| |
import os
import tempfile
from pathlib import Path
from unittest import mock
import numpy as np
import pytest
import yaml
import bigbang
from bigbang.ingress.listserv import (
ListservMessageParser,
ListservMailList,
ListservMailListDomain,
)
from config.config import CONFIG
dir_temp = tempfile.gettempdir()
file_temp_mbox = dir_temp + "/listserv.mbox"
file_auth = CONFIG.config_path + "authentication.yaml"
auth_key_mock = {"username": "bla", "password": "bla"}
@pytest.fixture(name="mlist", scope="module")
def get_maillist():
mlist = ListservMailList.from_listserv_directories(
name="3GPP_TSG_SA_ITUT_AHG",
directorypaths=[CONFIG.test_data_path + "3GPP/3GPP_TSG_SA_ITUT_AHG/"],
)
return mlist
@pytest.fixture(name="msg_parser", scope="module")
def get_message_parser():
msg_parser = ListservMessageParser()
return msg_parser
@pytest.fixture(name="msg", scope="module")
def get_message(msg_parser):
file_path = (
CONFIG.test_data_path
+ "3GPP/3GPP_TSG_SA_ITUT_AHG/3GPP_TSG_SA_ITUT_AHG.LOG1705B"
)
msg = msg_parser.from_listserv_file(
list_name="3GPP_TSG_SA_ITUT_AHG",
file_path=file_path,
header_start_line_nr=1,
fields="total",
)
return msg
class TestListservMessageParser:
def test__first_message_header(self, msg):
assert msg["From"] == "Stephen Hayes <stephen.hayes@ERICSSON.COM>"
assert msg["Reply-To"] == "Stephen Hayes <stephen.hayes@ERICSSON.COM>"
assert (
msg["In-Reply-To"]
== "<3d326663df91466eaa406d2ac87bd662@PREWE13M05.ad.sprint.com>"
)
assert msg["Date"] == "Mon, 08 May 2017 10:47:41 +0000"
def test__first_message_body(self, msg):
# print(msg.get_payload())
assert msg.get_payload().split("\n")[3] == "Hi,"
assert len(msg.get_payload()) == 24809
def test__to_pandas_dataframe(self, msg_parser, msg):
df = msg_parser.to_pandas_dataframe(msg)
assert len(df.columns.values) == 12
assert len(df.index.values) == 1
def test__to_mbox(self, msg_parser, msg):
file_temp_mbox = f"{dir_temp}/bigbang_test_listserv.mbox"
msg_parser.to_mbox(msg, filepath=file_temp_mbox)
f = open(file_temp_mbox, "r")
lines = f.readlines()
assert len(lines) == 638
assert "See my comments below.\n" in lines
f.close()
Path(file_temp_mbox).unlink()
class TestListservMailList:
def test__from_mbox(self):
mlist_name = "3GPP_TSG_SA_WG4_EVS"
mlist = ListservMailList.from_mbox(
name=mlist_name,
filepath=CONFIG.test_data_path + f"3GPP_mbox/{mlist_name}.mbox",
)
assert len(mlist) == 50
assert (
mlist.messages[0]["From"]
== "Tomas =?utf-8?q?Toftg=C3=A5rd?= <tomas.toftgard@ERICSSON.COM>"
)
def test__from_listserv_files(self):
filepath = (
CONFIG.test_data_path
+ "3GPP/3GPP_TSG_SA_ITUT_AHG/3GPP_TSG_SA_ITUT_AHG.LOG1703B"
)
mlist = ListservMailList.from_listserv_files(
name="3GPP_TSG_SA_ITUT_AHG",
filepaths=[filepath],
)
assert len(mlist) == 1
assert (
mlist.messages[0]["From"] == "Kevin Holley <kevin.holley@BT.COM>"
)
def test__number_of_messages(self, mlist):
assert len(mlist) == 25
def test__to_dict(self, mlist):
dic = mlist.to_dict()
keys = list(dic.keys())
lengths = [len(value) for value in dic.values()]
assert len(keys) == 13
assert all([diff == 0 for diff in np.diff(lengths)])
assert lengths[0] == 25
def test__to_mbox(self, mlist):
mlist.to_mbox(dir_temp, filename=mlist.name)
file_temp_mbox = f"{dir_temp}/{mlist.name}.mbox"
f = open(file_temp_mbox, "r")
lines = f.readlines()
assert len(lines) >= 48940
assert "What do you think of the approach?\n" in lines
f.close()
Path(file_temp_mbox).unlink()
def test__missing_date_in_message(self, mlist):
"""
Test that when a message has no date show, a default value
"""
msg = [
msg
for msg in mlist.messages
if msg["Subject"] == "R: How to proceed with ITUT-AH"
][0]
assert msg["Date"] is None
ListservMessageParser().to_mbox(
msg, filepath=f"{dir_temp}/msg_test.mbox"
)
file_temp_mbox = f"{dir_temp}/msg_test.mbox"
f = open(file_temp_mbox, "r")
lines = f.readlines()
assert len(lines) == 547
assert "Inviato: mercoled=3DEC 15 marzo 2017 16:06\n" in lines
f.close()
Path(file_temp_mbox).unlink()
class TestListservMailListDomain:
def test__from_mbox(self):
march = ListservMailListDomain.from_mbox(
name="3GPP_mbox_test",
directorypath=CONFIG.test_data_path + "3GPP_mbox/",
)
mlist_names = [mlist.name for mlist in march.lists]
mlist_index = mlist_names.index("3GPP_TSG_SA_WG4_EVS")
assert len(march.lists) == 2
assert len(march.lists[mlist_index].messages) == 50
assert (
march.lists[mlist_index].messages[0]["From"]
== "Tomas =?utf-8?q?Toftg=C3=A5rd?= <tomas.toftgard@ERICSSON.COM>"
)
@pytest.fixture(name="mlistdom", scope="session")
def get_mailarchive(self):
mlistdom = ListservMailListDomain.from_listserv_directory(
name="3GPP",
directorypath=CONFIG.test_data_path + "3GPP/",
)
return mlistdom
def test__mailinglist_in_archive(self, mlistdom):
assert mlistdom.name == "3GPP"
mlist_names = [mlist.name for mlist in mlistdom.lists]
assert "3GPP_TSG_SA_ITUT_AHG" in mlist_names
assert "3GPP_TSG_SA_WG2_MTCE" in mlist_names
ahg_index = mlist_names.index("3GPP_TSG_SA_ITUT_AHG")
mtce_index = mlist_names.index("3GPP_TSG_SA_WG2_MTCE")
global mlist_ahg_length, mlist_mtce_length
mlist_ahg_length = len(mlistdom.lists[ahg_index])
mlist_mtce_length = len(mlistdom.lists[mtce_index])
assert mlist_ahg_length == 25
assert mlist_mtce_length == 57
def test__message_in_mailinglist_in_archive(self, mlistdom):
mlist_names = [mlist.name for mlist in mlistdom.lists]
mtce_index = mlist_names.index("3GPP_TSG_SA_WG2_MTCE")
msg = [
msg
for msg in mlistdom.lists[mtce_index].messages
if msg["Subject"] == "test email - please ignore"
][0]
assert msg["From"] == '"Jain, Puneet" <puneet.jain@INTEL.COM>'
assert msg["Reply-To"] == '"Jain, Puneet" <puneet.jain@INTEL.COM>'
assert msg["Date"] == "Thu, 28 Feb 2013 18:58:18 +0000"
def test__to_dict(self, mlistdom):
dic = mlistdom.to_dict()
keys = list(dic.keys())
lengths = [len(value) for value in dic.values()]
assert len(keys) == 14
assert all([diff == 0 for diff in np.diff(lengths)])
assert lengths[0] == (mlist_ahg_length + mlist_mtce_length)
def test__to_mbox(self, mlistdom):
mlistdom.to_mbox(dir_temp)
file_dic = {
f"{dir_temp}/{mlistdom.name}/3GPP_TSG_SA_ITUT_AHG.mbox": 40000,
f"{dir_temp}/{mlistdom.name}/3GPP_TSG_SA_WG2_MTCE.mbox": 60000,
}
for filepath, line_nr in file_dic.items():
print(filepath)
assert Path(filepath).is_file()
f = open(filepath, "r")
lines = f.readlines()
assert line_nr < len(lines)
f.close()
Path(filepath).unlink()
Path(f"{dir_temp}/{mlistdom.name}/").rmdir()
| |
from django.core.exceptions import NON_FIELD_ERRORS, FieldDoesNotExist, ValidationError
__ALL__ = (
"_get_pk_val",
"serializable_value",
"full_clean",
"_get_unique_checks",
"_perform_unique_checks",
"_perform_date_checks",
"validate_unique",
"clean",
"clean_fields",
)
def _get_pk_val(self, meta=None):
if not meta:
meta = self._meta
return getattr(self, meta.pk.attname)
def serializable_value(self, field_name):
try:
field = self._meta.get_field(field_name)
except FieldDoesNotExist:
return getattr(self, field_name)
return getattr(self, field.attname)
def full_clean(self, exclude=None, validate_unique=True):
# Taken from django.db.models.base
errors = {}
if exclude is None:
exclude = []
else:
exclude = list(exclude)
try:
self.clean_fields(exclude=exclude)
except ValidationError as e:
errors = e.update_error_dict(errors)
# Form.clean() is run even if other validation fails, so do the
# same with Model.clean() for consistency.
try:
self.clean()
except ValidationError as e:
errors = e.update_error_dict(errors)
# Run unique checks, but only for fields that passed validation.
if validate_unique:
for name in errors.keys():
if name != NON_FIELD_ERRORS and name not in exclude:
exclude.append(name)
try:
self.validate_unique(exclude=exclude)
except ValidationError as e:
errors = e.update_error_dict(errors)
if errors:
raise ValidationError(errors)
def _get_unique_checks(self, exclude=None):
# Taken from django.db.models.base
if exclude is None:
exclude = []
unique_checks = []
unique_togethers = [(self.__class__, self._meta.unique_together)]
for parent_class in self._meta.get_parent_list():
if parent_class._meta.unique_together:
unique_togethers.append((parent_class, parent_class._meta.unique_together))
for model_class, unique_together in unique_togethers:
for check in unique_together:
for name in check:
# If this is an excluded field, don't add this check.
if name in exclude:
break
else:
unique_checks.append((model_class, tuple(check)))
# These are checks for the unique_for_<date/year/month>.
date_checks = []
# Gather a list of checks for fields declared as unique and add them to
# the list of checks.
fields_with_class = [(self.__class__, self._meta.local_fields)]
for parent_class in self._meta.get_parent_list():
fields_with_class.append((parent_class, parent_class._meta.local_fields))
for model_class, fields in fields_with_class:
for f in fields:
name = f.name
if name in exclude:
continue
if f.unique:
unique_checks.append((model_class, (name,)))
if f.unique_for_date and f.unique_for_date not in exclude:
date_checks.append((model_class, "date", name, f.unique_for_date))
if f.unique_for_year and f.unique_for_year not in exclude:
date_checks.append((model_class, "year", name, f.unique_for_year))
if f.unique_for_month and f.unique_for_month not in exclude:
date_checks.append((model_class, "month", name, f.unique_for_month))
return unique_checks, date_checks
def _perform_unique_checks(self, unique_checks):
# Taken from django.db.models.base
errors = {}
for model_class, unique_check in unique_checks:
# Try to look up an existing object with the same values as this
# object's values for all the unique field.
lookup_kwargs = {}
for field_name in unique_check:
f = self._meta.get_field(field_name)
lookup_value = getattr(self, f.attname)
if lookup_value is None:
# no value, skip the lookup
continue
if f.primary_key and not self._state.adding:
# no need to check for unique primary key when editing
continue
lookup_kwargs[str(field_name)] = lookup_value
# some fields were skipped, no reason to do the check
if len(unique_check) != len(lookup_kwargs):
continue
qs = model_class._default_manager.filter(**lookup_kwargs)
# Exclude the current object from the query if we are editing an
# instance (as opposed to creating a new one)
# Note that we need to use the pk as defined by model_class, not
# self.pk. These can be different fields because model inheritance
# allows single model to have effectively multiple primary keys.
# Refs #17615.
model_class_pk = self._get_pk_val(model_class._meta)
if not self._state.adding and model_class_pk is not None:
qs = qs.exclude(pk=model_class_pk)
if qs.exists():
if len(unique_check) == 1:
key = unique_check[0]
else:
key = NON_FIELD_ERRORS
errors.setdefault(key, []).append(
self.unique_error_message(model_class, unique_check)
)
return errors
def _perform_date_checks(self, date_checks):
# Taken from django.db.models.base
errors = {}
for model_class, lookup_type, field, unique_for in date_checks:
lookup_kwargs = {}
# there's a ticket to add a date lookup, we can remove this special
# case if that makes it's way in
date = getattr(self, unique_for)
if date is None:
continue
if lookup_type == "date":
lookup_kwargs["%s__day" % unique_for] = date.day
lookup_kwargs["%s__month" % unique_for] = date.month
lookup_kwargs["%s__year" % unique_for] = date.year
else:
lookup_kwargs["%s__%s" % (unique_for, lookup_type)] = getattr(
date, lookup_type
)
lookup_kwargs[field] = getattr(self, field)
qs = model_class._default_manager.filter(**lookup_kwargs)
# Exclude the current object from the query if we are editing an
# instance (as opposed to creating a new one)
if not self._state.adding and self.pk is not None:
qs = qs.exclude(pk=self.pk)
if qs.exists():
errors.setdefault(field, []).append(
self.date_error_message(lookup_type, field, unique_for)
)
return errors
def validate_unique(self, exclude=None):
# Taken from django.db.models.base
unique_checks, date_checks = self._get_unique_checks(exclude=exclude)
errors = self._perform_unique_checks(unique_checks)
date_errors = self._perform_date_checks(date_checks)
for k, v in date_errors.items():
errors.setdefault(k, []).extend(v)
if errors:
raise ValidationError(errors)
def clean(self):
# Taken from django.db.models.base
pass
def clean_fields(self, exclude=None):
# Taken from django.db.models.base
if exclude is None:
exclude = []
errors = {}
for f in self._meta.fields:
if f.name in exclude:
continue
# Skip validation for empty fields with blank=True. The developer
# is responsible for making sure they have a valid value.
raw_value = getattr(self, f.attname)
if f.blank and raw_value in f.empty_values:
continue
try:
setattr(self, f.attname, f.clean(raw_value, self))
except ValidationError as e:
errors[f.name] = e.error_list
if errors:
raise ValidationError(errors)
| |
#
# Test suite for the textwrap module.
#
# Original tests written by Greg Ward <gward@python.net>.
# Converted to PyUnit by Peter Hansen <peter@engcorp.com>.
# Currently maintained by Greg Ward.
#
# $Id$
#
import unittest
from textwrap import TextWrapper, wrap, fill, dedent, indent, shorten
class BaseTestCase(unittest.TestCase):
'''Parent class with utility methods for textwrap tests.'''
def show(self, textin):
if isinstance(textin, list):
result = []
for i in range(len(textin)):
result.append(" %d: %r" % (i, textin[i]))
result = "\n".join(result) if result else " no lines"
elif isinstance(textin, str):
result = " %s\n" % repr(textin)
return result
def check(self, result, expect):
self.assertEqual(result, expect,
'expected:\n%s\nbut got:\n%s' % (
self.show(expect), self.show(result)))
def check_wrap(self, text, width, expect, **kwargs):
result = wrap(text, width, **kwargs)
self.check(result, expect)
def check_split(self, text, expect):
result = self.wrapper._split(text)
self.assertEqual(result, expect,
"\nexpected %r\n"
"but got %r" % (expect, result))
class WrapTestCase(BaseTestCase):
def setUp(self):
self.wrapper = TextWrapper(width=45)
def test_simple(self):
# Simple case: just words, spaces, and a bit of punctuation
text = "Hello there, how are you this fine day? I'm glad to hear it!"
self.check_wrap(text, 12,
["Hello there,",
"how are you",
"this fine",
"day? I'm",
"glad to hear",
"it!"])
self.check_wrap(text, 42,
["Hello there, how are you this fine day?",
"I'm glad to hear it!"])
self.check_wrap(text, 80, [text])
def test_empty_string(self):
# Check that wrapping the empty string returns an empty list.
self.check_wrap("", 6, [])
self.check_wrap("", 6, [], drop_whitespace=False)
def test_empty_string_with_initial_indent(self):
# Check that the empty string is not indented.
self.check_wrap("", 6, [], initial_indent="++")
self.check_wrap("", 6, [], initial_indent="++", drop_whitespace=False)
def test_whitespace(self):
# Whitespace munging and end-of-sentence detection
text = """\
This is a paragraph that already has
line breaks. But some of its lines are much longer than the others,
so it needs to be wrapped.
Some lines are \ttabbed too.
What a mess!
"""
expect = ["This is a paragraph that already has line",
"breaks. But some of its lines are much",
"longer than the others, so it needs to be",
"wrapped. Some lines are tabbed too. What a",
"mess!"]
wrapper = TextWrapper(45, fix_sentence_endings=True)
result = wrapper.wrap(text)
self.check(result, expect)
result = wrapper.fill(text)
self.check(result, '\n'.join(expect))
text = "\tTest\tdefault\t\ttabsize."
expect = [" Test default tabsize."]
self.check_wrap(text, 80, expect)
text = "\tTest\tcustom\t\ttabsize."
expect = [" Test custom tabsize."]
self.check_wrap(text, 80, expect, tabsize=4)
def test_fix_sentence_endings(self):
wrapper = TextWrapper(60, fix_sentence_endings=True)
# SF #847346: ensure that fix_sentence_endings=True does the
# right thing even on input short enough that it doesn't need to
# be wrapped.
text = "A short line. Note the single space."
expect = ["A short line. Note the single space."]
self.check(wrapper.wrap(text), expect)
# Test some of the hairy end cases that _fix_sentence_endings()
# is supposed to handle (the easy stuff is tested in
# test_whitespace() above).
text = "Well, Doctor? What do you think?"
expect = ["Well, Doctor? What do you think?"]
self.check(wrapper.wrap(text), expect)
text = "Well, Doctor?\nWhat do you think?"
self.check(wrapper.wrap(text), expect)
text = 'I say, chaps! Anyone for "tennis?"\nHmmph!'
expect = ['I say, chaps! Anyone for "tennis?" Hmmph!']
self.check(wrapper.wrap(text), expect)
wrapper.width = 20
expect = ['I say, chaps!', 'Anyone for "tennis?"', 'Hmmph!']
self.check(wrapper.wrap(text), expect)
text = 'And she said, "Go to hell!"\nCan you believe that?'
expect = ['And she said, "Go to',
'hell!" Can you',
'believe that?']
self.check(wrapper.wrap(text), expect)
wrapper.width = 60
expect = ['And she said, "Go to hell!" Can you believe that?']
self.check(wrapper.wrap(text), expect)
text = 'File stdio.h is nice.'
expect = ['File stdio.h is nice.']
self.check(wrapper.wrap(text), expect)
def test_wrap_short(self):
# Wrapping to make short lines longer
text = "This is a\nshort paragraph."
self.check_wrap(text, 20, ["This is a short",
"paragraph."])
self.check_wrap(text, 40, ["This is a short paragraph."])
def test_wrap_short_1line(self):
# Test endcases
text = "This is a short line."
self.check_wrap(text, 30, ["This is a short line."])
self.check_wrap(text, 30, ["(1) This is a short line."],
initial_indent="(1) ")
def test_hyphenated(self):
# Test breaking hyphenated words
text = ("this-is-a-useful-feature-for-"
"reformatting-posts-from-tim-peters'ly")
self.check_wrap(text, 40,
["this-is-a-useful-feature-for-",
"reformatting-posts-from-tim-peters'ly"])
self.check_wrap(text, 41,
["this-is-a-useful-feature-for-",
"reformatting-posts-from-tim-peters'ly"])
self.check_wrap(text, 42,
["this-is-a-useful-feature-for-reformatting-",
"posts-from-tim-peters'ly"])
# The test tests current behavior but is not testing parts of the API.
expect = ("this-|is-|a-|useful-|feature-|for-|"
"reformatting-|posts-|from-|tim-|peters'ly").split('|')
self.check_wrap(text, 1, expect, break_long_words=False)
self.check_split(text, expect)
self.check_split('e-mail', ['e-mail'])
self.check_split('Jelly-O', ['Jelly-O'])
# The test tests current behavior but is not testing parts of the API.
self.check_split('half-a-crown', 'half-|a-|crown'.split('|'))
def test_hyphenated_numbers(self):
# Test that hyphenated numbers (eg. dates) are not broken like words.
text = ("Python 1.0.0 was released on 1994-01-26. Python 1.0.1 was\n"
"released on 1994-02-15.")
self.check_wrap(text, 30, ['Python 1.0.0 was released on',
'1994-01-26. Python 1.0.1 was',
'released on 1994-02-15.'])
self.check_wrap(text, 40, ['Python 1.0.0 was released on 1994-01-26.',
'Python 1.0.1 was released on 1994-02-15.'])
self.check_wrap(text, 1, text.split(), break_long_words=False)
text = "I do all my shopping at 7-11."
self.check_wrap(text, 25, ["I do all my shopping at",
"7-11."])
self.check_wrap(text, 27, ["I do all my shopping at",
"7-11."])
self.check_wrap(text, 29, ["I do all my shopping at 7-11."])
self.check_wrap(text, 1, text.split(), break_long_words=False)
def test_em_dash(self):
# Test text with em-dashes
text = "Em-dashes should be written -- thus."
self.check_wrap(text, 25,
["Em-dashes should be",
"written -- thus."])
# Probe the boundaries of the properly written em-dash,
# ie. " -- ".
self.check_wrap(text, 29,
["Em-dashes should be written",
"-- thus."])
expect = ["Em-dashes should be written --",
"thus."]
self.check_wrap(text, 30, expect)
self.check_wrap(text, 35, expect)
self.check_wrap(text, 36,
["Em-dashes should be written -- thus."])
# The improperly written em-dash is handled too, because
# it's adjacent to non-whitespace on both sides.
text = "You can also do--this or even---this."
expect = ["You can also do",
"--this or even",
"---this."]
self.check_wrap(text, 15, expect)
self.check_wrap(text, 16, expect)
expect = ["You can also do--",
"this or even---",
"this."]
self.check_wrap(text, 17, expect)
self.check_wrap(text, 19, expect)
expect = ["You can also do--this or even",
"---this."]
self.check_wrap(text, 29, expect)
self.check_wrap(text, 31, expect)
expect = ["You can also do--this or even---",
"this."]
self.check_wrap(text, 32, expect)
self.check_wrap(text, 35, expect)
# All of the above behaviour could be deduced by probing the
# _split() method.
text = "Here's an -- em-dash and--here's another---and another!"
expect = ["Here's", " ", "an", " ", "--", " ", "em-", "dash", " ",
"and", "--", "here's", " ", "another", "---",
"and", " ", "another!"]
self.check_split(text, expect)
text = "and then--bam!--he was gone"
expect = ["and", " ", "then", "--", "bam!", "--",
"he", " ", "was", " ", "gone"]
self.check_split(text, expect)
def test_unix_options (self):
# Test that Unix-style command-line options are wrapped correctly.
# Both Optik (OptionParser) and Docutils rely on this behaviour!
text = "You should use the -n option, or --dry-run in its long form."
self.check_wrap(text, 20,
["You should use the",
"-n option, or --dry-",
"run in its long",
"form."])
self.check_wrap(text, 21,
["You should use the -n",
"option, or --dry-run",
"in its long form."])
expect = ["You should use the -n option, or",
"--dry-run in its long form."]
self.check_wrap(text, 32, expect)
self.check_wrap(text, 34, expect)
self.check_wrap(text, 35, expect)
self.check_wrap(text, 38, expect)
expect = ["You should use the -n option, or --dry-",
"run in its long form."]
self.check_wrap(text, 39, expect)
self.check_wrap(text, 41, expect)
expect = ["You should use the -n option, or --dry-run",
"in its long form."]
self.check_wrap(text, 42, expect)
# Again, all of the above can be deduced from _split().
text = "the -n option, or --dry-run or --dryrun"
expect = ["the", " ", "-n", " ", "option,", " ", "or", " ",
"--dry-", "run", " ", "or", " ", "--dryrun"]
self.check_split(text, expect)
def test_funky_hyphens (self):
# Screwy edge cases cooked up by David Goodger. All reported
# in SF bug #596434.
self.check_split("what the--hey!", ["what", " ", "the", "--", "hey!"])
self.check_split("what the--", ["what", " ", "the--"])
self.check_split("what the--.", ["what", " ", "the--."])
self.check_split("--text--.", ["--text--."])
# When I first read bug #596434, this is what I thought David
# was talking about. I was wrong; these have always worked
# fine. The real problem is tested in test_funky_parens()
# below...
self.check_split("--option", ["--option"])
self.check_split("--option-opt", ["--option-", "opt"])
self.check_split("foo --option-opt bar",
["foo", " ", "--option-", "opt", " ", "bar"])
def test_punct_hyphens(self):
# Oh bother, SF #965425 found another problem with hyphens --
# hyphenated words in single quotes weren't handled correctly.
# In fact, the bug is that *any* punctuation around a hyphenated
# word was handled incorrectly, except for a leading "--", which
# was special-cased for Optik and Docutils. So test a variety
# of styles of punctuation around a hyphenated word.
# (Actually this is based on an Optik bug report, #813077).
self.check_split("the 'wibble-wobble' widget",
['the', ' ', "'wibble-", "wobble'", ' ', 'widget'])
self.check_split('the "wibble-wobble" widget',
['the', ' ', '"wibble-', 'wobble"', ' ', 'widget'])
self.check_split("the (wibble-wobble) widget",
['the', ' ', "(wibble-", "wobble)", ' ', 'widget'])
self.check_split("the ['wibble-wobble'] widget",
['the', ' ', "['wibble-", "wobble']", ' ', 'widget'])
# The test tests current behavior but is not testing parts of the API.
self.check_split("what-d'you-call-it.",
"what-d'you-|call-|it.".split('|'))
def test_funky_parens (self):
# Second part of SF bug #596434: long option strings inside
# parentheses.
self.check_split("foo (--option) bar",
["foo", " ", "(--option)", " ", "bar"])
# Related stuff -- make sure parens work in simpler contexts.
self.check_split("foo (bar) baz",
["foo", " ", "(bar)", " ", "baz"])
self.check_split("blah (ding dong), wubba",
["blah", " ", "(ding", " ", "dong),",
" ", "wubba"])
def test_drop_whitespace_false(self):
# Check that drop_whitespace=False preserves whitespace.
# SF patch #1581073
text = " This is a sentence with much whitespace."
self.check_wrap(text, 10,
[" This is a", " ", "sentence ",
"with ", "much white", "space."],
drop_whitespace=False)
def test_drop_whitespace_false_whitespace_only(self):
# Check that drop_whitespace=False preserves a whitespace-only string.
self.check_wrap(" ", 6, [" "], drop_whitespace=False)
def test_drop_whitespace_false_whitespace_only_with_indent(self):
# Check that a whitespace-only string gets indented (when
# drop_whitespace is False).
self.check_wrap(" ", 6, [" "], drop_whitespace=False,
initial_indent=" ")
def test_drop_whitespace_whitespace_only(self):
# Check drop_whitespace on a whitespace-only string.
self.check_wrap(" ", 6, [])
def test_drop_whitespace_leading_whitespace(self):
# Check that drop_whitespace does not drop leading whitespace (if
# followed by non-whitespace).
# SF bug #622849 reported inconsistent handling of leading
# whitespace; let's test that a bit, shall we?
text = " This is a sentence with leading whitespace."
self.check_wrap(text, 50,
[" This is a sentence with leading whitespace."])
self.check_wrap(text, 30,
[" This is a sentence with", "leading whitespace."])
def test_drop_whitespace_whitespace_line(self):
# Check that drop_whitespace skips the whole line if a non-leading
# line consists only of whitespace.
text = "abcd efgh"
# Include the result for drop_whitespace=False for comparison.
self.check_wrap(text, 6, ["abcd", " ", "efgh"],
drop_whitespace=False)
self.check_wrap(text, 6, ["abcd", "efgh"])
def test_drop_whitespace_whitespace_only_with_indent(self):
# Check that initial_indent is not applied to a whitespace-only
# string. This checks a special case of the fact that dropping
# whitespace occurs before indenting.
self.check_wrap(" ", 6, [], initial_indent="++")
def test_drop_whitespace_whitespace_indent(self):
# Check that drop_whitespace does not drop whitespace indents.
# This checks a special case of the fact that dropping whitespace
# occurs before indenting.
self.check_wrap("abcd efgh", 6, [" abcd", " efgh"],
initial_indent=" ", subsequent_indent=" ")
def test_split(self):
# Ensure that the standard _split() method works as advertised
# in the comments
text = "Hello there -- you goof-ball, use the -b option!"
result = self.wrapper._split(text)
self.check(result,
["Hello", " ", "there", " ", "--", " ", "you", " ", "goof-",
"ball,", " ", "use", " ", "the", " ", "-b", " ", "option!"])
def test_break_on_hyphens(self):
# Ensure that the break_on_hyphens attributes work
text = "yaba daba-doo"
self.check_wrap(text, 10, ["yaba daba-", "doo"],
break_on_hyphens=True)
self.check_wrap(text, 10, ["yaba", "daba-doo"],
break_on_hyphens=False)
def test_bad_width(self):
# Ensure that width <= 0 is caught.
text = "Whatever, it doesn't matter."
self.assertRaises(ValueError, wrap, text, 0)
self.assertRaises(ValueError, wrap, text, -1)
def test_no_split_at_umlaut(self):
text = "Die Empf\xe4nger-Auswahl"
self.check_wrap(text, 13, ["Die", "Empf\xe4nger-", "Auswahl"])
def test_umlaut_followed_by_dash(self):
text = "aa \xe4\xe4-\xe4\xe4"
self.check_wrap(text, 7, ["aa \xe4\xe4-", "\xe4\xe4"])
def test_non_breaking_space(self):
text = 'This is a sentence with non-breaking\N{NO-BREAK SPACE}space.'
self.check_wrap(text, 20,
['This is a sentence',
'with non-',
'breaking\N{NO-BREAK SPACE}space.'],
break_on_hyphens=True)
self.check_wrap(text, 20,
['This is a sentence',
'with',
'non-breaking\N{NO-BREAK SPACE}space.'],
break_on_hyphens=False)
def test_narrow_non_breaking_space(self):
text = ('This is a sentence with non-breaking'
'\N{NARROW NO-BREAK SPACE}space.')
self.check_wrap(text, 20,
['This is a sentence',
'with non-',
'breaking\N{NARROW NO-BREAK SPACE}space.'],
break_on_hyphens=True)
self.check_wrap(text, 20,
['This is a sentence',
'with',
'non-breaking\N{NARROW NO-BREAK SPACE}space.'],
break_on_hyphens=False)
class MaxLinesTestCase(BaseTestCase):
text = "Hello there, how are you this fine day? I'm glad to hear it!"
def test_simple(self):
self.check_wrap(self.text, 12,
["Hello [...]"],
max_lines=0)
self.check_wrap(self.text, 12,
["Hello [...]"],
max_lines=1)
self.check_wrap(self.text, 12,
["Hello there,",
"how [...]"],
max_lines=2)
self.check_wrap(self.text, 13,
["Hello there,",
"how are [...]"],
max_lines=2)
self.check_wrap(self.text, 80, [self.text], max_lines=1)
self.check_wrap(self.text, 12,
["Hello there,",
"how are you",
"this fine",
"day? I'm",
"glad to hear",
"it!"],
max_lines=6)
def test_spaces(self):
# strip spaces before placeholder
self.check_wrap(self.text, 12,
["Hello there,",
"how are you",
"this fine",
"day? [...]"],
max_lines=4)
# placeholder at the start of line
self.check_wrap(self.text, 6,
["Hello",
"[...]"],
max_lines=2)
# final spaces
self.check_wrap(self.text + ' ' * 10, 12,
["Hello there,",
"how are you",
"this fine",
"day? I'm",
"glad to hear",
"it!"],
max_lines=6)
def test_placeholder(self):
self.check_wrap(self.text, 12,
["Hello..."],
max_lines=1,
placeholder='...')
self.check_wrap(self.text, 12,
["Hello there,",
"how are..."],
max_lines=2,
placeholder='...')
# long placeholder and indentation
with self.assertRaises(ValueError):
wrap(self.text, 16, initial_indent=' ',
max_lines=1, placeholder=' [truncated]...')
with self.assertRaises(ValueError):
wrap(self.text, 16, subsequent_indent=' ',
max_lines=2, placeholder=' [truncated]...')
self.check_wrap(self.text, 16,
[" Hello there,",
" [truncated]..."],
max_lines=2,
initial_indent=' ',
subsequent_indent=' ',
placeholder=' [truncated]...')
self.check_wrap(self.text, 16,
[" [truncated]..."],
max_lines=1,
initial_indent=' ',
subsequent_indent=' ',
placeholder=' [truncated]...')
self.check_wrap(self.text, 80, [self.text], placeholder='.' * 1000)
class LongWordTestCase (BaseTestCase):
def setUp(self):
self.wrapper = TextWrapper()
self.text = '''\
Did you say "supercalifragilisticexpialidocious?"
How *do* you spell that odd word, anyways?
'''
def test_break_long(self):
# Wrap text with long words and lots of punctuation
self.check_wrap(self.text, 30,
['Did you say "supercalifragilis',
'ticexpialidocious?" How *do*',
'you spell that odd word,',
'anyways?'])
self.check_wrap(self.text, 50,
['Did you say "supercalifragilisticexpialidocious?"',
'How *do* you spell that odd word, anyways?'])
# SF bug 797650. Prevent an infinite loop by making sure that at
# least one character gets split off on every pass.
self.check_wrap('-'*10+'hello', 10,
['----------',
' h',
' e',
' l',
' l',
' o'],
subsequent_indent = ' '*15)
# bug 1146. Prevent a long word to be wrongly wrapped when the
# preceding word is exactly one character shorter than the width
self.check_wrap(self.text, 12,
['Did you say ',
'"supercalifr',
'agilisticexp',
'ialidocious?',
'" How *do*',
'you spell',
'that odd',
'word,',
'anyways?'])
def test_nobreak_long(self):
# Test with break_long_words disabled
self.wrapper.break_long_words = 0
self.wrapper.width = 30
expect = ['Did you say',
'"supercalifragilisticexpialidocious?"',
'How *do* you spell that odd',
'word, anyways?'
]
result = self.wrapper.wrap(self.text)
self.check(result, expect)
# Same thing with kwargs passed to standalone wrap() function.
result = wrap(self.text, width=30, break_long_words=0)
self.check(result, expect)
def test_max_lines_long(self):
self.check_wrap(self.text, 12,
['Did you say ',
'"supercalifr',
'agilisticexp',
'[...]'],
max_lines=4)
class IndentTestCases(BaseTestCase):
# called before each test method
def setUp(self):
self.text = '''\
This paragraph will be filled, first without any indentation,
and then with some (including a hanging indent).'''
def test_fill(self):
# Test the fill() method
expect = '''\
This paragraph will be filled, first
without any indentation, and then with
some (including a hanging indent).'''
result = fill(self.text, 40)
self.check(result, expect)
def test_initial_indent(self):
# Test initial_indent parameter
expect = [" This paragraph will be filled,",
"first without any indentation, and then",
"with some (including a hanging indent)."]
result = wrap(self.text, 40, initial_indent=" ")
self.check(result, expect)
expect = "\n".join(expect)
result = fill(self.text, 40, initial_indent=" ")
self.check(result, expect)
def test_subsequent_indent(self):
# Test subsequent_indent parameter
expect = '''\
* This paragraph will be filled, first
without any indentation, and then
with some (including a hanging
indent).'''
result = fill(self.text, 40,
initial_indent=" * ", subsequent_indent=" ")
self.check(result, expect)
# Despite the similar names, DedentTestCase is *not* the inverse
# of IndentTestCase!
class DedentTestCase(unittest.TestCase):
def assertUnchanged(self, text):
"""assert that dedent() has no effect on 'text'"""
self.assertEqual(text, dedent(text))
def test_dedent_nomargin(self):
# No lines indented.
text = "Hello there.\nHow are you?\nOh good, I'm glad."
self.assertUnchanged(text)
# Similar, with a blank line.
text = "Hello there.\n\nBoo!"
self.assertUnchanged(text)
# Some lines indented, but overall margin is still zero.
text = "Hello there.\n This is indented."
self.assertUnchanged(text)
# Again, add a blank line.
text = "Hello there.\n\n Boo!\n"
self.assertUnchanged(text)
def test_dedent_even(self):
# All lines indented by two spaces.
text = " Hello there.\n How are ya?\n Oh good."
expect = "Hello there.\nHow are ya?\nOh good."
self.assertEqual(expect, dedent(text))
# Same, with blank lines.
text = " Hello there.\n\n How are ya?\n Oh good.\n"
expect = "Hello there.\n\nHow are ya?\nOh good.\n"
self.assertEqual(expect, dedent(text))
# Now indent one of the blank lines.
text = " Hello there.\n \n How are ya?\n Oh good.\n"
expect = "Hello there.\n\nHow are ya?\nOh good.\n"
self.assertEqual(expect, dedent(text))
def test_dedent_uneven(self):
# Lines indented unevenly.
text = '''\
def foo():
while 1:
return foo
'''
expect = '''\
def foo():
while 1:
return foo
'''
self.assertEqual(expect, dedent(text))
# Uneven indentation with a blank line.
text = " Foo\n Bar\n\n Baz\n"
expect = "Foo\n Bar\n\n Baz\n"
self.assertEqual(expect, dedent(text))
# Uneven indentation with a whitespace-only line.
text = " Foo\n Bar\n \n Baz\n"
expect = "Foo\n Bar\n\n Baz\n"
self.assertEqual(expect, dedent(text))
# dedent() should not mangle internal tabs
def test_dedent_preserve_internal_tabs(self):
text = " hello\tthere\n how are\tyou?"
expect = "hello\tthere\nhow are\tyou?"
self.assertEqual(expect, dedent(text))
# make sure that it preserves tabs when it's not making any
# changes at all
self.assertEqual(expect, dedent(expect))
# dedent() should not mangle tabs in the margin (i.e.
# tabs and spaces both count as margin, but are *not*
# considered equivalent)
def test_dedent_preserve_margin_tabs(self):
text = " hello there\n\thow are you?"
self.assertUnchanged(text)
# same effect even if we have 8 spaces
text = " hello there\n\thow are you?"
self.assertUnchanged(text)
# dedent() only removes whitespace that can be uniformly removed!
text = "\thello there\n\thow are you?"
expect = "hello there\nhow are you?"
self.assertEqual(expect, dedent(text))
text = " \thello there\n \thow are you?"
self.assertEqual(expect, dedent(text))
text = " \t hello there\n \t how are you?"
self.assertEqual(expect, dedent(text))
text = " \thello there\n \t how are you?"
expect = "hello there\n how are you?"
self.assertEqual(expect, dedent(text))
# test margin is smaller than smallest indent
text = " \thello there\n \thow are you?\n \tI'm fine, thanks"
expect = " \thello there\n \thow are you?\n\tI'm fine, thanks"
self.assertEqual(expect, dedent(text))
# Test textwrap.indent
class IndentTestCase(unittest.TestCase):
# The examples used for tests. If any of these change, the expected
# results in the various test cases must also be updated.
# The roundtrip cases are separate, because textwrap.dedent doesn't
# handle Windows line endings
ROUNDTRIP_CASES = (
# Basic test case
"Hi.\nThis is a test.\nTesting.",
# Include a blank line
"Hi.\nThis is a test.\n\nTesting.",
# Include leading and trailing blank lines
"\nHi.\nThis is a test.\nTesting.\n",
)
CASES = ROUNDTRIP_CASES + (
# Use Windows line endings
"Hi.\r\nThis is a test.\r\nTesting.\r\n",
# Pathological case
"\nHi.\r\nThis is a test.\n\r\nTesting.\r\n\n",
)
def test_indent_nomargin_default(self):
# indent should do nothing if 'prefix' is empty.
for text in self.CASES:
self.assertEqual(indent(text, ''), text)
def test_indent_nomargin_explicit_default(self):
# The same as test_indent_nomargin, but explicitly requesting
# the default behaviour by passing None as the predicate
for text in self.CASES:
self.assertEqual(indent(text, '', None), text)
def test_indent_nomargin_all_lines(self):
# The same as test_indent_nomargin, but using the optional
# predicate argument
predicate = lambda line: True
for text in self.CASES:
self.assertEqual(indent(text, '', predicate), text)
def test_indent_no_lines(self):
# Explicitly skip indenting any lines
predicate = lambda line: False
for text in self.CASES:
self.assertEqual(indent(text, ' ', predicate), text)
def test_roundtrip_spaces(self):
# A whitespace prefix should roundtrip with dedent
for text in self.ROUNDTRIP_CASES:
self.assertEqual(dedent(indent(text, ' ')), text)
def test_roundtrip_tabs(self):
# A whitespace prefix should roundtrip with dedent
for text in self.ROUNDTRIP_CASES:
self.assertEqual(dedent(indent(text, '\t\t')), text)
def test_roundtrip_mixed(self):
# A whitespace prefix should roundtrip with dedent
for text in self.ROUNDTRIP_CASES:
self.assertEqual(dedent(indent(text, ' \t \t ')), text)
def test_indent_default(self):
# Test default indenting of lines that are not whitespace only
prefix = ' '
expected = (
# Basic test case
" Hi.\n This is a test.\n Testing.",
# Include a blank line
" Hi.\n This is a test.\n\n Testing.",
# Include leading and trailing blank lines
"\n Hi.\n This is a test.\n Testing.\n",
# Use Windows line endings
" Hi.\r\n This is a test.\r\n Testing.\r\n",
# Pathological case
"\n Hi.\r\n This is a test.\n\r\n Testing.\r\n\n",
)
for text, expect in zip(self.CASES, expected):
self.assertEqual(indent(text, prefix), expect)
def test_indent_explicit_default(self):
# Test default indenting of lines that are not whitespace only
prefix = ' '
expected = (
# Basic test case
" Hi.\n This is a test.\n Testing.",
# Include a blank line
" Hi.\n This is a test.\n\n Testing.",
# Include leading and trailing blank lines
"\n Hi.\n This is a test.\n Testing.\n",
# Use Windows line endings
" Hi.\r\n This is a test.\r\n Testing.\r\n",
# Pathological case
"\n Hi.\r\n This is a test.\n\r\n Testing.\r\n\n",
)
for text, expect in zip(self.CASES, expected):
self.assertEqual(indent(text, prefix, None), expect)
def test_indent_all_lines(self):
# Add 'prefix' to all lines, including whitespace-only ones.
prefix = ' '
expected = (
# Basic test case
" Hi.\n This is a test.\n Testing.",
# Include a blank line
" Hi.\n This is a test.\n \n Testing.",
# Include leading and trailing blank lines
" \n Hi.\n This is a test.\n Testing.\n",
# Use Windows line endings
" Hi.\r\n This is a test.\r\n Testing.\r\n",
# Pathological case
" \n Hi.\r\n This is a test.\n \r\n Testing.\r\n \n",
)
predicate = lambda line: True
for text, expect in zip(self.CASES, expected):
self.assertEqual(indent(text, prefix, predicate), expect)
def test_indent_empty_lines(self):
# Add 'prefix' solely to whitespace-only lines.
prefix = ' '
expected = (
# Basic test case
"Hi.\nThis is a test.\nTesting.",
# Include a blank line
"Hi.\nThis is a test.\n \nTesting.",
# Include leading and trailing blank lines
" \nHi.\nThis is a test.\nTesting.\n",
# Use Windows line endings
"Hi.\r\nThis is a test.\r\nTesting.\r\n",
# Pathological case
" \nHi.\r\nThis is a test.\n \r\nTesting.\r\n \n",
)
predicate = lambda line: not line.strip()
for text, expect in zip(self.CASES, expected):
self.assertEqual(indent(text, prefix, predicate), expect)
class ShortenTestCase(BaseTestCase):
def check_shorten(self, text, width, expect, **kwargs):
result = shorten(text, width, **kwargs)
self.check(result, expect)
def test_simple(self):
# Simple case: just words, spaces, and a bit of punctuation
text = "Hello there, how are you this fine day? I'm glad to hear it!"
self.check_shorten(text, 18, "Hello there, [...]")
self.check_shorten(text, len(text), text)
self.check_shorten(text, len(text) - 1,
"Hello there, how are you this fine day? "
"I'm glad to [...]")
def test_placeholder(self):
text = "Hello there, how are you this fine day? I'm glad to hear it!"
self.check_shorten(text, 17, "Hello there,$$", placeholder='$$')
self.check_shorten(text, 18, "Hello there, how$$", placeholder='$$')
self.check_shorten(text, 18, "Hello there, $$", placeholder=' $$')
self.check_shorten(text, len(text), text, placeholder='$$')
self.check_shorten(text, len(text) - 1,
"Hello there, how are you this fine day? "
"I'm glad to hear$$", placeholder='$$')
def test_empty_string(self):
self.check_shorten("", 6, "")
def test_whitespace(self):
# Whitespace collapsing
text = """
This is a paragraph that already has
line breaks and \t tabs too."""
self.check_shorten(text, 62,
"This is a paragraph that already has line "
"breaks and tabs too.")
self.check_shorten(text, 61,
"This is a paragraph that already has line "
"breaks and [...]")
self.check_shorten("hello world! ", 12, "hello world!")
self.check_shorten("hello world! ", 11, "hello [...]")
# The leading space is trimmed from the placeholder
# (it would be ugly otherwise).
self.check_shorten("hello world! ", 10, "[...]")
def test_width_too_small_for_placeholder(self):
shorten("x" * 20, width=8, placeholder="(......)")
with self.assertRaises(ValueError):
shorten("x" * 20, width=8, placeholder="(.......)")
def test_first_word_too_long_but_placeholder_fits(self):
self.check_shorten("Helloo", 5, "[...]")
if __name__ == '__main__':
unittest.main()
| |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests that the system configuration methods work properly."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.core.protobuf import cluster_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.core.protobuf import tensorflow_server_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat
def reset_eager(fn):
def wrapper(*args, **kwargs):
try:
return fn(*args, **kwargs)
finally:
# Reset the context.
context._context = None
ops.enable_eager_execution_internal()
assert context._context is not None
return wrapper
class ConfigTest(test.TestCase, parameterized.TestCase):
@test_util.run_gpu_only
@reset_eager
def testDevicePolicy(self):
self.assertEqual(context.DEVICE_PLACEMENT_SILENT,
context.context().device_policy)
# If no op has been executed we should be able to set the device policy as
# well as any init-time configs.
config.set_intra_op_parallelism_threads(1)
config.set_device_policy('silent')
config.set_intra_op_parallelism_threads(2)
context.ensure_initialized()
def copy_tensor(dtype=dtypes.int32):
cpu_tensor = constant_op.constant(1, dtype=dtype)
gpu_tensor = cpu_tensor.gpu()
self.assertAllEqual(cpu_tensor + gpu_tensor, 2.0)
config.set_device_policy('silent')
self.assertEqual(config.get_device_policy(), 'silent')
self.assertEqual(context.DEVICE_PLACEMENT_SILENT,
context.context().device_policy)
copy_tensor()
config.set_device_policy('silent_for_int32')
self.assertEqual(config.get_device_policy(), 'silent_for_int32')
self.assertEqual(context.DEVICE_PLACEMENT_SILENT_FOR_INT32,
context.context().device_policy)
with self.assertRaisesRegex(errors.InvalidArgumentError,
'Tensors on conflicting devices'):
copy_tensor(dtypes.float32)
copy_tensor()
config.set_device_policy('warn')
self.assertEqual(config.get_device_policy(), 'warn')
self.assertEqual(context.DEVICE_PLACEMENT_WARN,
context.context().device_policy)
copy_tensor()
config.set_device_policy('explicit')
self.assertEqual(config.get_device_policy(), 'explicit')
self.assertEqual(context.DEVICE_PLACEMENT_EXPLICIT,
context.context().device_policy)
with self.assertRaisesRegex(errors.InvalidArgumentError,
'Tensors on conflicting devices'):
copy_tensor()
config.set_device_policy(None)
self.assertEqual(config.get_device_policy(), 'silent')
@reset_eager
def testExecutionMode(self):
self.assertTrue(config.get_synchronous_execution())
self.assertEqual(context.SYNC, context.context().execution_mode)
# If no op has been executed we should be able to set the execution mode as
# well as any init-time configs.
config.set_intra_op_parallelism_threads(1)
config.set_synchronous_execution(False)
config.set_intra_op_parallelism_threads(2)
config.set_synchronous_execution(True)
self.assertTrue(config.get_synchronous_execution())
self.assertEqual(context.SYNC, context.context().execution_mode)
config.set_synchronous_execution(False)
self.assertFalse(config.get_synchronous_execution())
self.assertEqual(context.ASYNC, context.context().execution_mode)
@reset_eager
def testIntraOpParallelismThreads(self):
config.set_intra_op_parallelism_threads(10)
self.assertEqual(
config.get_intra_op_parallelism_threads(),
context.context().intra_op_parallelism_threads)
context.ensure_initialized()
with self.assertRaises(RuntimeError):
config.set_intra_op_parallelism_threads(1)
config.set_intra_op_parallelism_threads(10)
@reset_eager
def testInterOpParallelismThreads(self):
config.set_inter_op_parallelism_threads(10)
self.assertEqual(
config.get_inter_op_parallelism_threads(),
context.context().inter_op_parallelism_threads)
context.ensure_initialized()
with self.assertRaises(RuntimeError):
config.set_inter_op_parallelism_threads(1)
config.set_inter_op_parallelism_threads(10)
@test_util.run_gpu_only
@reset_eager
def testSoftPlacement(self):
if context.executing_eagerly():
self.assertTrue(config.get_soft_device_placement())
else:
self.assertFalse(config.get_soft_device_placement())
def mod():
with ops.device('/device:GPU:0'):
a = constant_op.constant(1.0)
b = constant_op.constant(1.0)
return math_ops.mod(a, b)
config.set_soft_device_placement(True)
self.assertEqual(config.get_soft_device_placement(), True)
self.assertEqual(
config.get_soft_device_placement(),
context.context().soft_device_placement)
# Since soft placement is enabled, the mod operation should fallback to CPU
# with pure eager execution as well as functions
mod()
def_function.function(mod)()
config.set_soft_device_placement(False)
self.assertEqual(config.get_soft_device_placement(), False)
self.assertEqual(
config.get_soft_device_placement(),
context.context().soft_device_placement)
# Since soft placement is disabled, the mod operation should fail on GPU
# with pure eager execution as well as functions
with self.assertRaises(errors.InvalidArgumentError):
mod()
with self.assertRaises(errors.InvalidArgumentError):
def_function.function(mod)()
@reset_eager
def testLogDevicePlacement(self):
self.assertFalse(context.get_log_device_placement())
context.set_log_device_placement(True)
self.assertEqual(context.get_log_device_placement(), True)
self.assertEqual(
context.get_log_device_placement(),
context.context().log_device_placement)
context.set_log_device_placement(False)
self.assertEqual(context.get_log_device_placement(), False)
self.assertEqual(
context.get_log_device_placement(),
context.context().log_device_placement)
context.ensure_initialized()
# Changing the device placement should not throw an exception
context.set_log_device_placement(True)
@reset_eager
def testEnableMlirBridge(self):
# Default value of enable_mlir_bridge is false.
self.assertFalse(context.context().config.experimental.enable_mlir_bridge)
# Tests enabling mlir bridge.
config.enable_mlir_bridge()
self.assertTrue(context.context().config.experimental.enable_mlir_bridge)
# Tests disabling mlir bridge.
config.disable_mlir_bridge()
self.assertFalse(context.context().config.experimental.enable_mlir_bridge)
@reset_eager
def testEnableMlirGraphOptimization(self):
# Default value of enable_mlir_graph_optimization is false.
self.assertFalse(
context.context().config.experimental.enable_mlir_graph_optimization)
# Tests enabling mlir graph optimization.
config.enable_mlir_graph_optimization()
self.assertTrue(
context.context().config.experimental.enable_mlir_graph_optimization)
# Tests disabling mlir graph optimization.
config.disable_mlir_graph_optimization()
self.assertFalse(
context.context().config.experimental.enable_mlir_graph_optimization)
@test_util.run_gpu_only
@reset_eager
def testJit(self):
self.assertEqual(config.get_optimizer_jit(), False)
# the following function should cause Op fusion to occur. However, there is
# unfortunately no straightforward way to ensure this. We will just have to
# settle for creating a test that can trigger JIT.
@def_function.function
def fun(a, b):
c = a * b
d = c + a
return d
a = constant_op.constant([2., 2.])
b = constant_op.constant([2., 2.])
self.evaluate(fun(a, b))
config.set_optimizer_jit(True)
self.assertEqual(config.get_optimizer_jit(), True)
self.assertEqual(config.get_optimizer_jit(),
context.context().optimizer_jit)
self.evaluate(fun(a, b))
config.set_optimizer_jit(False)
self.assertEqual(config.get_optimizer_jit(), False)
self.assertEqual(config.get_optimizer_jit(),
context.context().optimizer_jit)
self.evaluate(fun(a, b))
@parameterized.named_parameters(
('LayoutOptimizer', 'layout_optimizer'),
('ConstantFolding', 'constant_folding'),
('ShapeOptimization', 'shape_optimization'),
('Remapping', 'remapping'),
('ArithmeticOptimization', 'arithmetic_optimization'),
('DependencyOptimization', 'dependency_optimization'),
('LoopOptimization', 'loop_optimization'),
('FunctionOptimization', 'function_optimization'),
('DebugStripper', 'debug_stripper'),
('ScopedAllocatorOptimization', 'scoped_allocator_optimization'),
('ImplementationSelector', 'implementation_selector'),
('AutoMixedPrecision', 'auto_mixed_precision'))
@reset_eager
def testOptimizerToggleOption(self, field):
# TODO(b/128531235): Improve testing of option
options = config.get_optimizer_experimental_options()
self.assertIsNone(options.get(field))
config.set_optimizer_experimental_options({field: True})
options[field] = True
self.assertDictEqual(config.get_optimizer_experimental_options(), options)
self.assertDictEqual(
context.context().get_optimizer_experimental_options(), options)
config.set_optimizer_experimental_options({field: False})
options[field] = False
self.assertDictEqual(config.get_optimizer_experimental_options(), options)
self.assertDictEqual(
context.context().get_optimizer_experimental_options(), options)
@parameterized.named_parameters(
('DisableModelPruning', 'disable_model_pruning'),
('DisableMetaOptimizer', 'disable_meta_optimizer'))
@reset_eager
def testOptimizerBoolOption(self, field):
# TODO(b/128531235): Improve testing of option
options = config.get_optimizer_experimental_options()
self.assertFalse(options.get(field))
config.set_optimizer_experimental_options({field: True})
options[field] = True
self.assertDictEqual(config.get_optimizer_experimental_options(), options)
self.assertDictEqual(
context.context().get_optimizer_experimental_options(), options)
config.set_optimizer_experimental_options({field: False})
options[field] = False
self.assertDictEqual(config.get_optimizer_experimental_options(), options)
self.assertDictEqual(
context.context().get_optimizer_experimental_options(), options)
@test_util.run_gpu_only
@reset_eager
def testOptimizerToggleOptionPinToHost(self):
options = config.get_optimizer_experimental_options()
self.assertIsNone(options.get('pin_to_host_optimization'))
@def_function.function
def fun():
op = test_ops.device_placement_op()
return op
# Force optimizer to run for all graphs
config.set_optimizer_experimental_options({'min_graph_nodes': -1})
options['min_graph_nodes'] = -1
# Since pin to host is disabled, the operation should go on GPU
gpu = self.evaluate(fun())
self.assertIn(compat.as_bytes('GPU'), gpu)
config.set_optimizer_experimental_options(
{'pin_to_host_optimization': True})
options['pin_to_host_optimization'] = True
self.assertDictEqual(config.get_optimizer_experimental_options(), options)
self.assertDictEqual(
context.context().get_optimizer_experimental_options(), options)
# Since pin to host is enabled, the operation should go on CPU
cpu = self.evaluate(fun())
self.assertIn(compat.as_bytes('CPU'), cpu)
config.set_optimizer_experimental_options(
{'pin_to_host_optimization': False})
options['pin_to_host_optimization'] = False
self.assertDictEqual(config.get_optimizer_experimental_options(), options)
self.assertDictEqual(
context.context().get_optimizer_experimental_options(), options)
# Since pin to host is disabled again, the operation should go on GPU
gpu2 = self.evaluate(fun())
self.assertIn(compat.as_bytes('GPU'), gpu2)
class DeviceTest(test.TestCase):
@reset_eager
def testPhysicalDevices(self):
cpus = config.list_physical_devices('CPU')
self.assertGreater(len(cpus), 0)
if test_util.is_gpu_available():
gpus = config.list_physical_devices('GPU')
self.assertGreater(len(gpus), 0)
@reset_eager
def testCpuMultiple(self):
cpus = config.list_physical_devices('CPU')
self.assertEqual(len(cpus), 1)
config.set_logical_device_configuration(cpus[0], [
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration()
])
context.ensure_initialized()
vcpus = config.list_logical_devices('CPU')
self.assertEqual(len(vcpus), 2)
with ops.device('/device:CPU:0'):
a = constant_op.constant(1.0)
self.evaluate(a)
with ops.device('/device:CPU:1'):
b = constant_op.constant(1.0)
self.evaluate(b)
with ops.device('/device:CPU:2'):
c = constant_op.constant(1.0)
self.evaluate(c)
self.assertIn('CPU:0', c.device)
# Ensure we can place ops on each of the device names
for vcpu in vcpus:
with ops.device(vcpu.name):
d = constant_op.constant(1.0)
self.evaluate(d)
# Modifying the CPU configuration is not supported
with self.assertRaisesRegex(RuntimeError, 'cannot be modified'):
config.set_logical_device_configuration(cpus[0], [
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration()
])
# Setting the same CPU configuration is fine
config.set_logical_device_configuration(cpus[0], [
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration()
])
@test_util.run_gpu_only
@reset_eager
def testGpuNone(self):
config.set_soft_device_placement(False)
gpus = config.list_physical_devices('GPU')
self.assertGreater(len(gpus), 0)
cpus = config.list_physical_devices('CPU')
self.assertEqual(len(cpus), 1)
self.assertEqual(len(config.get_visible_devices('CPU')), 1)
self.assertGreater(len(config.get_visible_devices('GPU')), 0)
self.assertEqual(len(config.get_visible_devices('XLA_GPU')), 0)
config.set_visible_devices(cpus[0])
self.assertEqual(len(config.get_visible_devices('CPU')), 1)
self.assertEqual(len(config.get_visible_devices('GPU')), 0)
self.assertEqual(len(config.list_logical_devices('XLA_GPU')), 0)
with self.assertRaisesRegex(errors.InvalidArgumentError,
'Could not satisfy'):
with ops.device('/device:GPU:0'):
a = array_ops.identity(1.0)
self.evaluate(a)
# Modifying the visible devices is not supported
with self.assertRaisesRegex(RuntimeError, 'cannot be modified'):
config.set_visible_devices(gpus)
# Setting the same visible devices is fine
config.set_visible_devices(cpus[0])
@reset_eager
def testGpuMultiple(self):
gpus = config.list_physical_devices('GPU')
if len(gpus) < 2:
self.skipTest('Need at least 2 GPUs')
context.ensure_initialized()
for i in range(0, len(gpus)):
with ops.device('/device:GPU:' + str(i)):
a = constant_op.constant(1.0)
self.evaluate(a)
with self.assertRaisesRegex(RuntimeError, 'unknown device'):
with ops.device('/device:GPU:' + str(len(gpus))):
a = constant_op.constant(1.0)
self.evaluate(a)
@reset_eager
def testDeviceDetails(self):
(cpu,) = config.list_physical_devices('CPU')
details = config.get_device_details(cpu)
self.assertEqual(details, {})
if not test_util.is_gpu_available():
return
gpus = config.list_physical_devices('GPU')
details = config.get_device_details(gpus[0])
self.assertIsInstance(details['device_name'], str)
self.assertNotEmpty(details['device_name'])
if test.is_built_with_rocm():
# AMD GPUs do not have a compute capability
self.assertNotIn('compute_capability', details)
else:
cc = details['compute_capability']
self.assertIsInstance(cc, tuple)
major, minor = cc
self.assertGreater(major, 0)
self.assertGreaterEqual(minor, 0)
# Test GPU returned from get_visible_devices
if len(gpus) > 2:
config.set_visible_devices(gpus[1], 'GPU')
(visible_gpu,) = config.get_visible_devices('GPU')
details = config.get_device_details(visible_gpu)
self.assertIsInstance(details['device_name'], str)
@reset_eager
def testDeviceDetailsErrors(self):
logical_devices = config.list_logical_devices()
with self.assertRaisesRegex(ValueError,
'must be a tf.config.PhysicalDevice'):
config.get_device_details(logical_devices[0])
phys_dev = context.PhysicalDevice('/physical_device:CPU:100', 'CPU')
with self.assertRaisesRegex(
ValueError, 'The PhysicalDevice must be one obtained from '
'calling `tf.config.list_physical_devices`'):
config.get_device_details(phys_dev)
@test_util.run_gpu_only
@reset_eager
def testVirtualGpu(self):
config.set_soft_device_placement(False)
gpus = config.list_physical_devices('GPU')
self.assertNotEqual(len(gpus), 0)
self.assertIsNone(config.get_logical_device_configuration(gpus[-1]))
config.set_logical_device_configuration(gpus[-1], [
context.LogicalDeviceConfiguration(memory_limit=10),
context.LogicalDeviceConfiguration(memory_limit=10)
])
self.assertEqual(len(config.get_logical_device_configuration(gpus[-1])), 2)
logical_gpus = config.list_logical_devices('GPU')
self.assertTrue(len(logical_gpus), len(gpus) + 1)
for i in range(0, len(logical_gpus)):
with ops.device('/device:GPU:' + str(i)):
a = array_ops.identity(1.0)
self.evaluate(a)
with self.assertRaisesRegex(errors.InvalidArgumentError,
'Could not satisfy'):
with ops.device('/device:GPU:' + str(len(logical_gpus))):
a = array_ops.identity(1.0)
self.evaluate(a)
# Modifying the GPU configuration is not supported
with self.assertRaisesRegex(RuntimeError, 'cannot be modified'):
config.set_logical_device_configuration(gpus[-1], [
context.LogicalDeviceConfiguration(memory_limit=20),
context.LogicalDeviceConfiguration(memory_limit=20)
])
with self.assertRaisesRegex(RuntimeError, 'cannot be modified'):
config.set_logical_device_configuration(gpus[-1], [
context.LogicalDeviceConfiguration(memory_limit=10),
context.LogicalDeviceConfiguration(memory_limit=10),
context.LogicalDeviceConfiguration(memory_limit=10)
])
# Setting the same GPU configuration is fine
config.set_logical_device_configuration(gpus[-1], [
context.LogicalDeviceConfiguration(memory_limit=10),
context.LogicalDeviceConfiguration(memory_limit=10)
])
@test_util.run_gpu_only
@reset_eager
def testGpuGrowth(self):
gpus = config.list_physical_devices('GPU')
self.assertNotEqual(len(gpus), 0)
self.assertIsNone(config.get_memory_growth(gpus[-1]))
for gpu in gpus:
config.set_memory_growth(gpu, True)
c = context.context().config
self.assertTrue(c.gpu_options.allow_growth)
logical_gpus = config.list_logical_devices('GPU')
self.assertTrue(len(logical_gpus), len(gpus))
# Modifying the GPU configuration is not supported
with self.assertRaisesRegex(RuntimeError, 'cannot be modified'):
for gpu in gpus:
config.set_memory_growth(gpu, False)
# Setting the same GPU configuration is fine
for gpu in gpus:
config.set_memory_growth(gpu, True)
@test_util.run_gpu_only
@reset_eager
def testGpuInvalidConfig(self):
gpus = config.list_physical_devices('GPU')
self.assertNotEqual(len(gpus), 0)
if len(gpus) > 1:
# Assert if other GPUs were not configured
config.set_memory_growth(gpus[0], True)
with self.assertRaisesRegex(ValueError, 'cannot differ'):
c = context.context().config
# If we limit visibility to GPU 0, growth is fine
config.set_visible_devices(gpus[0], 'GPU')
c = context.context().config
self.assertTrue(c.gpu_options.allow_growth)
# Default setting for second GPU is False and works if we set visibility
config.set_visible_devices(gpus[1], 'GPU')
c = context.context().config
self.assertFalse(c.gpu_options.allow_growth)
# Growth now fails because all the GPUs are visible and not the same
config.set_visible_devices(gpus, 'GPU')
with self.assertRaisesRegex(ValueError, 'cannot differ'):
c = context.context().config
for gpu in gpus:
config.set_memory_growth(gpu, True)
c = context.context().config
self.assertTrue(c.gpu_options.allow_growth)
with self.assertRaisesRegex(ValueError, 'memory limit'):
config.set_logical_device_configuration(gpus[-1], [
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration()
])
self.assertIsNone(config.get_logical_device_configuration(gpus[-1]))
config.set_logical_device_configuration(gpus[-1], [
context.LogicalDeviceConfiguration(memory_limit=10),
context.LogicalDeviceConfiguration(memory_limit=10)
])
c = context.context().config
self.assertFalse(c.gpu_options.allow_growth)
with self.assertRaisesRegex(ValueError, 'virtual devices'):
config.set_memory_growth(gpus[-1], False)
@test_util.run_gpu_only
@reset_eager
def testRemote(self):
gpus = config.list_logical_devices('GPU')
self.assertNotEqual(len(gpus), 0)
context.ensure_initialized()
gpus = config.list_logical_devices('GPU')
self.assertNotEqual(len(gpus), 0)
for gpu in gpus:
self.assertIsNotNone(gpu.name)
context.ensure_initialized()
job_name = 'test'
cluster_def = cluster_pb2.ClusterDef()
job_def = cluster_def.job.add()
job_def.name = job_name
job_def.tasks[0] = 'localhost:0'
server_def = tensorflow_server_pb2.ServerDef(
cluster=cluster_def, job_name=job_name, task_index=0, protocol='grpc')
context.set_server_def(server_def)
gpus = config.list_logical_devices('GPU')
for gpu in gpus:
self.assertIsNotNone(gpu.name)
@reset_eager
def testV1CompatibilityDummyInvisibleDeviceList(self):
gpus = config.list_physical_devices('GPU')
if gpus:
self.skipTest('Test requires no GPUs')
# Ensure GPU options left untouched on CPU only environments
context.context()._physical_devices = None
context.context()._config = config_pb2.ConfigProto(
gpu_options=config_pb2.GPUOptions(visible_device_list='0'))
new_config = context.context().config
self.assertEqual(new_config.gpu_options.visible_device_list, '0')
@test_util.run_gpu_only
@reset_eager
def testV1Compatibility(self):
# Ensure we set 1 CPU by default
context.context()._config = config_pb2.ConfigProto()
new_config = context.context().config
self.assertEqual(new_config.device_count['CPU'], 1)
context.context()._physical_devices = None
# Ensure CPU is split
context.context()._config = config_pb2.ConfigProto(device_count={'CPU': 2})
new_config = context.context().config
self.assertEqual(new_config.device_count['CPU'], 2)
context.context()._physical_devices = None
# Handle empty visible device list
context.context()._config = config_pb2.ConfigProto(
gpu_options=config_pb2.GPUOptions(visible_device_list=''))
gpus = config.list_physical_devices('GPU')
gpu_count = len(gpus)
new_config = context.context().config
self.assertEqual(new_config.gpu_options.visible_device_list,
','.join(str(i) for i in range(len(gpus))))
context.context()._physical_devices = None
# Handle invalid visible device list
context.context()._config = config_pb2.ConfigProto(
gpu_options=config_pb2.GPUOptions(visible_device_list=str(gpu_count)))
with self.assertRaisesRegex(ValueError, 'Invalid visible device index'):
gpus = config.list_physical_devices('GPU')
new_config = context.context().config
context.context()._physical_devices = None
# Handle single visible device list
context.context()._config = config_pb2.ConfigProto(
gpu_options=config_pb2.GPUOptions(visible_device_list=str(gpu_count-1)))
gpus = config.list_physical_devices('GPU')
new_config = context.context().config
self.assertEqual(new_config.gpu_options.visible_device_list,
str(gpu_count-1))
context.context()._physical_devices = None
def testConfigureCollectiveOps(self):
context.context().configure_collective_ops(
collective_leader='/job:worker/replica:0/task:0',
scoped_allocator_enabled_ops=('CollectiveReduce',),
use_nccl_communication=False,
device_filters=['/job:worker/task:1'])
new_config = context.context().config
# Verify group leader
self.assertEqual('/job:worker/replica:0/task:0',
new_config.experimental.collective_group_leader)
# Verify device filters.
self.assertEqual(['/job:worker/task:1'], new_config.device_filters)
# Verify rewrite options.
new_rewrite_options = new_config.graph_options.rewrite_options
self.assertEqual(rewriter_config_pb2.RewriterConfig.ON,
new_rewrite_options.scoped_allocator_optimization)
self.assertEqual(['CollectiveReduce'],
new_rewrite_options.scoped_allocator_opts.enable_op)
class TensorFloat32Test(test.TestCase):
def setUp(self):
super(TensorFloat32Test, self).setUp()
if not test_util.is_gpu_available(
cuda_only=True, min_cuda_compute_capability=(8, 0)):
self.skipTest('TensorFloat-32 requires an NVIDIA GPU with compute '
'capability of at least 8.0')
def tearDown(self):
super(TensorFloat32Test, self).tearDown()
config.enable_tensor_float_32_execution(True)
def test_tf32_enabled(self):
self.assertTrue(config.tensor_float_32_execution_enabled())
x = array_ops.fill((8, 8), 1 + 2**-20)
y = array_ops.ones((8, 8))
out = math_ops.matmul(x, y)
# In tf32, each element of x is rounded to 1, so the output will be 8s.
expected = array_ops.fill((8, 8), 8)
self.assertAllEqual(out, expected)
def test_tf32_disabled(self):
self.assertTrue(config.tensor_float_32_execution_enabled())
config.enable_tensor_float_32_execution(False)
self.assertFalse(config.tensor_float_32_execution_enabled())
x = array_ops.fill((8, 8), 1 + 2**-20)
y = array_ops.ones((8, 8))
out = math_ops.matmul(x, y)
expected = array_ops.fill((8, 8), 8 * (1 + 2**-20))
self.assertAllEqual(out, expected)
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
| |
'''
This file contains classes and functions that implement the PyPXE TFTP service
'''
import socket
import struct
import os
import select
import time
import logging
import math
class ParentSocket(socket.socket):
'''Subclassed socket.socket to enable a link-back to the client object.'''
parent = None
class Client:
'''Client instance for TFTPD.'''
def __init__(self, mainsock, parent):
self.default_retries = parent.default_retries
self.timeout = parent.timeout
self.ip = parent.ip
self.message, self.address = mainsock.recvfrom(1024)
self.logger = parent.logger.getChild('Client.{0}'.format(self.address))
self.logger.debug('Recieving request...')
self.retries = self.default_retries
self.block = 1
self.blksize = 512
self.sent_time = float('inf')
self.dead = False
self.fh = None
self.filename = ''
self.wrap = -1
# message from the main socket
self.handle()
def ready(self):
'''Called when there is something to be read on our socket.'''
self.message = self.sock.recv(1024)
self.handle()
def send_block(self):
'''
Sends the next block of data, setting the timeout and retry
variables accordingly.
'''
data = self.fh.read(self.blksize)
# opcode 3 == DATA, wraparound block number
response = struct.pack('!HH', 3, self.block % 65536)
response += data
self.sock.sendto(response, self.address)
self.logger.debug('Sending block {0}'.format(self.block))
self.retries -= 1
self.sent_time = time.time()
def no_ack(self):
'''Determines if we timed out waiting for an ACK from the client.'''
if self.sent_time + self.timeout < time.time():
return True
return False
def no_retries(self):
'''Determines if the client ran out of retry attempts.'''
if not self.retries:
return True
return False
def valid_mode(self):
'''Determines if the file read mode octet; if not, send an error.'''
mode = self.message.split(chr(0))[1]
if mode == 'octet': return True
self.sendError(5, 'Mode {0} not supported'.format(mode))
return False
def check_file(self):
'''
Determines if the file exist and if it is a file; if not,
send an error.
'''
filename = self.message.split(chr(0))[0]
if os.path.lexists(filename) and os.path.isfile(filename):
self.filename = filename
return True
self.sendError(1, 'File Not Found', filename = filename)
return False
def parse_options(self):
'''
Extracts the options sent from a client; if any, calculates the last
block based on the filesize and blocksize.
'''
options = self.message.split(chr(0))[2: -1]
options = dict(zip(options[0::2], map(int, options[1::2])))
self.blksize = options.get('blksize', self.blksize)
self.lastblock = math.ceil(self.filesize / float(self.blksize))
self.tsize = True if 'tsize' in options else False
if self.filesize > (2 ** 16) * self.blksize:
self.logger.warning('Request too big, attempting transfer anyway.')
self.logger.debug('Details: Filesize {0} is too big for blksize {1}.'.format(self.filesize, self.blksize))
if len(options):
# we need to know later if we actually had any options
self.block = 0
return True
else:
return False
def reply_options(self):
'''Acknowledges any options received.'''
# only called if options, so send them all
response = struct.pack("!H", 6)
response += 'blksize' + chr(0)
response += str(self.blksize) + chr(0)
response += 'tsize' + chr(0)
response += str(self.filesize) + chr(0)
self.sock.sendto(response, self.address)
def newRequest(self):
'''
When receiving a read request from the parent socket, open our
own socket and check the read request; if we don't have any options,
send the first block.
'''
self.sock = ParentSocket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind((self.ip, 0))
# used by select() to find ready clients
self.sock.parent = self
if not self.valid_mode() or not self.check_file():
# some clients just ACK the error (wrong code?)
# so forcefully shutdown
self.complete()
return
self.fh = open(self.filename, 'rb')
self.filesize = os.path.getsize(self.filename)
if not self.parse_options():
# no options recieved so start transfer
if self.block == 1:
self.send_block()
return
# we got some options so ACK those first
self.reply_options()
def sendError(self, code = 1, message = 'File Not Found', filename = ''):
'''
Sends an error code and string to a client. See RFC1350, page 10 for
details.
Value Meaning
===== =======
0 Not defined, see error message (if any).
1 File not found.
2 Access violation.
3 Disk full or allocation exceeded.
4 Illegal TFTP operation.
5 Unknown transfer ID.
6 File already exists.
7 No such user.
'''
response = struct.pack('!H', 5) # error opcode
response += struct.pack('!H', code) # error code
response += message
response += chr(0)
self.sock.sendto(response, self.address)
self.logger.debug('Sending {0}: {1} {2}'.format(code, message, filename))
def complete(self):
'''
Closes a file and socket after sending it
and marks ourselves as dead to be cleaned up.
'''
try:
self.fh.close()
except AttributeError:
# we have not opened yet or file-not-found
pass
self.sock.close()
self.dead = True
def handle(self):
'''Takes the message from the parent socket and act accordingly.'''
# if addr not in ongoing, call this, else ready()
[opcode] = struct.unpack('!H', self.message[:2])
if opcode == 1:
self.message = self.message[2:]
self.newRequest()
elif opcode == 4:
[block] = struct.unpack('!H', self.message[2:4])
if block == 0:
self.wrap += 1
if block < self.block % 65536:
self.logger.warning('Ignoring duplicated ACK received for block {0}'.format(self.block))
elif block > self.block % 65536:
self.logger.warning('Ignoring out of sequence ACK received for block {0}'.format(self.block))
elif block + self.wrap * 65536 == self.lastblock:
if self.filesize % self.blksize == 0:
self.block = block + 1
self.send_block()
self.logger.debug('Completed sending {0}'.format(self.filename))
self.complete()
else:
self.block = block + 1
self.retries = self.default_retries
self.send_block()
class TFTPD:
'''
This class implements a read-only TFTP server
implemented from RFC1350 and RFC2348
'''
def __init__(self, **server_settings):
self.ip = server_settings.get('ip', '0.0.0.0')
self.port = server_settings.get('port', 69)
self.netbook_directory = server_settings.get('netbook_directory', '.')
self.mode_debug = server_settings.get('mode_debug', False) # debug mode
self.logger = server_settings.get('logger', None)
self.default_retries = server_settings.get('default_retries', 3)
self.timeout = server_settings.get('timeout', 5)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind((self.ip, self.port))
# setup logger
if self.logger == None:
self.logger = logging.getLogger('TFTP')
handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(name)s %(message)s')
handler.setFormatter(formatter)
self.logger.addHandler(handler)
if self.mode_debug:
self.logger.setLevel(logging.DEBUG)
self.logger.debug('NOTICE: TFTP server started in debug mode. TFTP server is using the following:')
self.logger.debug('Server IP: {0}'.format(self.ip))
self.logger.debug('Server Port: {0}'.format(self.port))
self.logger.debug('Network Boot Directory: {0}'.format(self.netbook_directory))
self.ongoing = []
# start in network boot file directory and then chroot,
# this simplifies target later as well as offers a slight security increase
os.chdir (self.netbook_directory)
os.chroot ('.')
def listen(self):
'''This method listens for incoming requests.'''
while True:
# remove complete clients to select doesn't fail
map(self.ongoing.remove, [client for client in self.ongoing if client.dead])
rlist, _, _ = select.select([self.sock] + [client.sock for client in self.ongoing if not client.dead], [], [], 0)
for sock in rlist:
if sock == self.sock:
# main socket, so new client
self.ongoing.append(Client(sock, self))
else:
# client socket, so tell the client object it's ready
sock.parent.ready()
# if we haven't recieved an ACK in timeout time, retry
[client.send_block() for client in self.ongoing if client.no_ack()]
# if we have run out of retries, kill the client
[client.complete() for client in self.ongoing if client.no_retries()]
| |
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handles database requests from other nova services."""
from nova.api.ec2 import ec2utils
from nova import block_device
from nova.cells import rpcapi as cells_rpcapi
from nova.compute import api as compute_api
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova.conductor.tasks import live_migrate
from nova.db import base
from nova import exception
from nova.image import glance
from nova import manager
from nova import network
from nova.network.security_group import openstack_driver
from nova import notifications
from nova.objects import base as nova_object
from nova.objects import instance as instance_obj
from nova.objects import migration as migration_obj
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common.rpc import common as rpc_common
from nova.openstack.common import timeutils
from nova import quota
from nova.scheduler import rpcapi as scheduler_rpcapi
from nova.scheduler import utils as scheduler_utils
LOG = logging.getLogger(__name__)
# Instead of having a huge list of arguments to instance_update(), we just
# accept a dict of fields to update and use this whitelist to validate it.
allowed_updates = ['task_state', 'vm_state', 'expected_task_state',
'power_state', 'access_ip_v4', 'access_ip_v6',
'launched_at', 'terminated_at', 'host', 'node',
'memory_mb', 'vcpus', 'root_gb', 'ephemeral_gb',
'instance_type_id', 'root_device_name', 'launched_on',
'progress', 'vm_mode', 'default_ephemeral_device',
'default_swap_device', 'root_device_name',
'system_metadata', 'updated_at'
]
# Fields that we want to convert back into a datetime object.
datetime_fields = ['launched_at', 'terminated_at', 'updated_at']
class ConductorManager(manager.Manager):
"""Mission: Conduct things.
The methods in the base API for nova-conductor are various proxy operations
performed on behalf of the nova-compute service running on compute nodes.
Compute nodes are not allowed to directly access the database, so this set
of methods allows them to get specific work done without locally accessing
the database.
The nova-conductor service also exposes an API in the 'compute_task'
namespace. See the ComputeTaskManager class for details.
"""
RPC_API_VERSION = '1.58'
def __init__(self, *args, **kwargs):
super(ConductorManager, self).__init__(service_name='conductor',
*args, **kwargs)
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver())
self._network_api = None
self._compute_api = None
self.compute_task_mgr = ComputeTaskManager()
self.quotas = quota.QUOTAS
self.cells_rpcapi = cells_rpcapi.CellsAPI()
def create_rpc_dispatcher(self, *args, **kwargs):
kwargs['additional_apis'] = [self.compute_task_mgr]
return super(ConductorManager, self).create_rpc_dispatcher(*args,
**kwargs)
@property
def network_api(self):
# NOTE(danms): We need to instantiate our network_api on first use
# to avoid the circular dependency that exists between our init
# and network_api's
if self._network_api is None:
self._network_api = network.API()
return self._network_api
@property
def compute_api(self):
if self._compute_api is None:
self._compute_api = compute_api.API()
return self._compute_api
def ping(self, context, arg):
# NOTE(russellb) This method can be removed in 2.0 of this API. It is
# now a part of the base rpc API.
return jsonutils.to_primitive({'service': 'conductor', 'arg': arg})
@rpc_common.client_exceptions(KeyError, ValueError,
exception.InvalidUUID,
exception.InstanceNotFound,
exception.UnexpectedTaskStateError)
def instance_update(self, context, instance_uuid,
updates, service=None):
for key, value in updates.iteritems():
if key not in allowed_updates:
LOG.error(_("Instance update attempted for "
"'%(key)s' on %(instance_uuid)s"),
{'key': key, 'instance_uuid': instance_uuid})
raise KeyError("unexpected update keyword '%s'" % key)
if key in datetime_fields and isinstance(value, basestring):
updates[key] = timeutils.parse_strtime(value)
old_ref, instance_ref = self.db.instance_update_and_get_original(
context, instance_uuid, updates)
notifications.send_update(context, old_ref, instance_ref, service)
return jsonutils.to_primitive(instance_ref)
@rpc_common.client_exceptions(exception.InstanceNotFound)
def instance_get(self, context, instance_id):
return jsonutils.to_primitive(
self.db.instance_get(context, instance_id))
@rpc_common.client_exceptions(exception.InstanceNotFound)
def instance_get_by_uuid(self, context, instance_uuid,
columns_to_join=None):
return jsonutils.to_primitive(
self.db.instance_get_by_uuid(context, instance_uuid,
columns_to_join))
# NOTE(hanlind): This method can be removed in v2.0 of the RPC API.
def instance_get_all(self, context):
return jsonutils.to_primitive(self.db.instance_get_all(context))
def instance_get_all_by_host(self, context, host, node=None,
columns_to_join=None):
if node is not None:
result = self.db.instance_get_all_by_host_and_node(
context.elevated(), host, node)
else:
result = self.db.instance_get_all_by_host(context.elevated(), host,
columns_to_join)
return jsonutils.to_primitive(result)
# NOTE(comstud): This method is now deprecated and can be removed in
# version v2.0 of the RPC API
@rpc_common.client_exceptions(exception.MigrationNotFound)
def migration_get(self, context, migration_id):
migration_ref = self.db.migration_get(context.elevated(),
migration_id)
return jsonutils.to_primitive(migration_ref)
# NOTE(comstud): This method is now deprecated and can be removed in
# version v2.0 of the RPC API
def migration_get_unconfirmed_by_dest_compute(self, context,
confirm_window,
dest_compute):
migrations = self.db.migration_get_unconfirmed_by_dest_compute(
context, confirm_window, dest_compute)
return jsonutils.to_primitive(migrations)
def migration_get_in_progress_by_host_and_node(self, context,
host, node):
migrations = self.db.migration_get_in_progress_by_host_and_node(
context, host, node)
return jsonutils.to_primitive(migrations)
# NOTE(comstud): This method can be removed in v2.0 of the RPC API.
def migration_create(self, context, instance, values):
values.update({'instance_uuid': instance['uuid'],
'source_compute': instance['host'],
'source_node': instance['node']})
migration_ref = self.db.migration_create(context.elevated(), values)
return jsonutils.to_primitive(migration_ref)
@rpc_common.client_exceptions(exception.MigrationNotFound)
def migration_update(self, context, migration, status):
migration_ref = self.db.migration_update(context.elevated(),
migration['id'],
{'status': status})
return jsonutils.to_primitive(migration_ref)
@rpc_common.client_exceptions(exception.AggregateHostExists)
def aggregate_host_add(self, context, aggregate, host):
host_ref = self.db.aggregate_host_add(context.elevated(),
aggregate['id'], host)
return jsonutils.to_primitive(host_ref)
@rpc_common.client_exceptions(exception.AggregateHostNotFound)
def aggregate_host_delete(self, context, aggregate, host):
self.db.aggregate_host_delete(context.elevated(),
aggregate['id'], host)
@rpc_common.client_exceptions(exception.AggregateNotFound)
def aggregate_get(self, context, aggregate_id):
aggregate = self.db.aggregate_get(context.elevated(), aggregate_id)
return jsonutils.to_primitive(aggregate)
def aggregate_get_by_host(self, context, host, key=None):
aggregates = self.db.aggregate_get_by_host(context.elevated(),
host, key)
return jsonutils.to_primitive(aggregates)
def aggregate_metadata_add(self, context, aggregate, metadata,
set_delete=False):
new_metadata = self.db.aggregate_metadata_add(context.elevated(),
aggregate['id'],
metadata, set_delete)
return jsonutils.to_primitive(new_metadata)
@rpc_common.client_exceptions(exception.AggregateMetadataNotFound)
def aggregate_metadata_delete(self, context, aggregate, key):
self.db.aggregate_metadata_delete(context.elevated(),
aggregate['id'], key)
def aggregate_metadata_get_by_host(self, context, host,
key='availability_zone'):
result = self.db.aggregate_metadata_get_by_host(context, host, key)
return jsonutils.to_primitive(result)
def bw_usage_update(self, context, uuid, mac, start_period,
bw_in=None, bw_out=None,
last_ctr_in=None, last_ctr_out=None,
last_refreshed=None,
update_cells=True):
if [bw_in, bw_out, last_ctr_in, last_ctr_out].count(None) != 4:
self.db.bw_usage_update(context, uuid, mac, start_period,
bw_in, bw_out, last_ctr_in, last_ctr_out,
last_refreshed,
update_cells=update_cells)
usage = self.db.bw_usage_get(context, uuid, start_period, mac)
return jsonutils.to_primitive(usage)
# NOTE(russellb) This method can be removed in 2.0 of this API. It is
# deprecated in favor of the method in the base API.
def get_backdoor_port(self, context):
return self.backdoor_port
def security_group_get_by_instance(self, context, instance):
group = self.db.security_group_get_by_instance(context,
instance['uuid'])
return jsonutils.to_primitive(group)
def security_group_rule_get_by_security_group(self, context, secgroup):
rules = self.db.security_group_rule_get_by_security_group(
context, secgroup['id'])
return jsonutils.to_primitive(rules, max_depth=4)
def provider_fw_rule_get_all(self, context):
rules = self.db.provider_fw_rule_get_all(context)
return jsonutils.to_primitive(rules)
def agent_build_get_by_triple(self, context, hypervisor, os, architecture):
info = self.db.agent_build_get_by_triple(context, hypervisor, os,
architecture)
return jsonutils.to_primitive(info)
def block_device_mapping_update_or_create(self, context, values,
create=None):
if create is None:
bdm = self.db.block_device_mapping_update_or_create(context,
values)
elif create is True:
bdm = self.db.block_device_mapping_create(context, values)
else:
bdm = self.db.block_device_mapping_update(context,
values['id'],
values)
# NOTE:comstud): 'bdm' is always in the new format, so we
# account for this in cells/messaging.py
self.cells_rpcapi.bdm_update_or_create_at_top(context, bdm,
create=create)
def block_device_mapping_get_all_by_instance(self, context, instance,
legacy=True):
bdms = self.db.block_device_mapping_get_all_by_instance(
context, instance['uuid'])
if legacy:
bdms = block_device.legacy_mapping(bdms)
return jsonutils.to_primitive(bdms)
def block_device_mapping_destroy(self, context, bdms=None,
instance=None, volume_id=None,
device_name=None):
if bdms is not None:
for bdm in bdms:
self.db.block_device_mapping_destroy(context, bdm['id'])
# NOTE(comstud): bdm['id'] will be different in API cell,
# so we must try to destroy by device_name or volume_id.
# We need an instance_uuid in order to do this properly,
# too.
# I hope to clean a lot of this up in the object
# implementation.
instance_uuid = (bdm['instance_uuid'] or
(instance and instance['uuid']))
if not instance_uuid:
continue
# Better to be safe than sorry. device_name is not
# NULLable, however it could be an empty string.
if bdm['device_name']:
self.cells_rpcapi.bdm_destroy_at_top(
context, instance_uuid,
device_name=bdm['device_name'])
elif bdm['volume_id']:
self.cells_rpcapi.bdm_destroy_at_top(
context, instance_uuid,
volume_id=bdm['volume_id'])
elif instance is not None and volume_id is not None:
self.db.block_device_mapping_destroy_by_instance_and_volume(
context, instance['uuid'], volume_id)
self.cells_rpcapi.bdm_destroy_at_top(
context, instance['uuid'], volume_id=volume_id)
elif instance is not None and device_name is not None:
self.db.block_device_mapping_destroy_by_instance_and_device(
context, instance['uuid'], device_name)
self.cells_rpcapi.bdm_destroy_at_top(
context, instance['uuid'], device_name=device_name)
else:
# NOTE(danms): This shouldn't happen
raise exception.Invalid(_("Invalid block_device_mapping_destroy"
" invocation"))
def instance_get_all_by_filters(self, context, filters, sort_key,
sort_dir, columns_to_join=None):
result = self.db.instance_get_all_by_filters(
context, filters, sort_key, sort_dir,
columns_to_join=columns_to_join)
return jsonutils.to_primitive(result)
# NOTE(hanlind): This method can be removed in v2.0 of the RPC API.
def instance_get_all_hung_in_rebooting(self, context, timeout):
result = self.db.instance_get_all_hung_in_rebooting(context, timeout)
return jsonutils.to_primitive(result)
def instance_get_active_by_window(self, context, begin, end=None,
project_id=None, host=None):
# Unused, but cannot remove until major RPC version bump
result = self.db.instance_get_active_by_window(context, begin, end,
project_id, host)
return jsonutils.to_primitive(result)
def instance_get_active_by_window_joined(self, context, begin, end=None,
project_id=None, host=None):
result = self.db.instance_get_active_by_window_joined(
context, begin, end, project_id, host)
return jsonutils.to_primitive(result)
def instance_destroy(self, context, instance):
self.db.instance_destroy(context, instance['uuid'])
def instance_info_cache_delete(self, context, instance):
self.db.instance_info_cache_delete(context, instance['uuid'])
def instance_info_cache_update(self, context, instance, values):
self.db.instance_info_cache_update(context, instance['uuid'],
values)
def instance_type_get(self, context, instance_type_id):
result = self.db.flavor_get(context, instance_type_id)
return jsonutils.to_primitive(result)
def instance_fault_create(self, context, values):
result = self.db.instance_fault_create(context, values)
return jsonutils.to_primitive(result)
# NOTE(kerrin): This method can be removed in v2.0 of the RPC API.
def vol_get_usage_by_time(self, context, start_time):
result = self.db.vol_get_usage_by_time(context, start_time)
return jsonutils.to_primitive(result)
# NOTE(kerrin): The last_refreshed argument is unused by this method
# and can be removed in v2.0 of the RPC API.
def vol_usage_update(self, context, vol_id, rd_req, rd_bytes, wr_req,
wr_bytes, instance, last_refreshed=None,
update_totals=False):
vol_usage = self.db.vol_usage_update(context, vol_id,
rd_req, rd_bytes,
wr_req, wr_bytes,
instance['uuid'],
instance['project_id'],
instance['user_id'],
instance['availability_zone'],
update_totals)
# We have just updated the database, so send the notification now
self.notifier.info(context, 'volume.usage',
compute_utils.usage_volume_info(vol_usage))
@rpc_common.client_exceptions(exception.ComputeHostNotFound,
exception.HostBinaryNotFound)
def service_get_all_by(self, context, topic=None, host=None, binary=None):
if not any((topic, host, binary)):
result = self.db.service_get_all(context)
elif all((topic, host)):
if topic == 'compute':
result = self.db.service_get_by_compute_host(context, host)
# FIXME(comstud) Potentially remove this on bump to v2.0
result = [result]
else:
result = self.db.service_get_by_host_and_topic(context,
host, topic)
elif all((host, binary)):
result = self.db.service_get_by_args(context, host, binary)
elif topic:
result = self.db.service_get_all_by_topic(context, topic)
elif host:
result = self.db.service_get_all_by_host(context, host)
return jsonutils.to_primitive(result)
def action_event_start(self, context, values):
evt = self.db.action_event_start(context, values)
return jsonutils.to_primitive(evt)
def action_event_finish(self, context, values):
evt = self.db.action_event_finish(context, values)
return jsonutils.to_primitive(evt)
def service_create(self, context, values):
svc = self.db.service_create(context, values)
return jsonutils.to_primitive(svc)
@rpc_common.client_exceptions(exception.ServiceNotFound)
def service_destroy(self, context, service_id):
self.db.service_destroy(context, service_id)
def compute_node_create(self, context, values):
result = self.db.compute_node_create(context, values)
return jsonutils.to_primitive(result)
def compute_node_update(self, context, node, values, prune_stats=False):
result = self.db.compute_node_update(context, node['id'], values,
prune_stats)
return jsonutils.to_primitive(result)
def compute_node_delete(self, context, node):
result = self.db.compute_node_delete(context, node['id'])
return jsonutils.to_primitive(result)
@rpc_common.client_exceptions(exception.ServiceNotFound)
def service_update(self, context, service, values):
svc = self.db.service_update(context, service['id'], values)
return jsonutils.to_primitive(svc)
def task_log_get(self, context, task_name, begin, end, host, state=None):
result = self.db.task_log_get(context, task_name, begin, end, host,
state)
return jsonutils.to_primitive(result)
def task_log_begin_task(self, context, task_name, begin, end, host,
task_items=None, message=None):
result = self.db.task_log_begin_task(context.elevated(), task_name,
begin, end, host, task_items,
message)
return jsonutils.to_primitive(result)
def task_log_end_task(self, context, task_name, begin, end, host,
errors, message=None):
result = self.db.task_log_end_task(context.elevated(), task_name,
begin, end, host, errors, message)
return jsonutils.to_primitive(result)
def notify_usage_exists(self, context, instance, current_period=False,
ignore_missing_network_data=True,
system_metadata=None, extra_usage_info=None):
compute_utils.notify_usage_exists(self.notifier, context, instance,
current_period,
ignore_missing_network_data,
system_metadata, extra_usage_info)
def security_groups_trigger_handler(self, context, event, args):
self.security_group_api.trigger_handler(event, context, *args)
def security_groups_trigger_members_refresh(self, context, group_ids):
self.security_group_api.trigger_members_refresh(context, group_ids)
def network_migrate_instance_start(self, context, instance, migration):
self.network_api.migrate_instance_start(context, instance, migration)
def network_migrate_instance_finish(self, context, instance, migration):
self.network_api.migrate_instance_finish(context, instance, migration)
def quota_commit(self, context, reservations, project_id=None,
user_id=None):
quota.QUOTAS.commit(context, reservations, project_id=project_id,
user_id=user_id)
def quota_rollback(self, context, reservations, project_id=None,
user_id=None):
quota.QUOTAS.rollback(context, reservations, project_id=project_id,
user_id=user_id)
def get_ec2_ids(self, context, instance):
ec2_ids = {}
ec2_ids['instance-id'] = ec2utils.id_to_ec2_inst_id(instance['uuid'])
ec2_ids['ami-id'] = ec2utils.glance_id_to_ec2_id(context,
instance['image_ref'])
for image_type in ['kernel', 'ramdisk']:
if '%s_id' % image_type in instance:
image_id = instance['%s_id' % image_type]
ec2_image_type = ec2utils.image_type(image_type)
ec2_id = ec2utils.glance_id_to_ec2_id(context, image_id,
ec2_image_type)
ec2_ids['%s-id' % image_type] = ec2_id
return ec2_ids
# NOTE(danms): This method is now deprecated and can be removed in
# version v2.0 of the RPC API
def compute_stop(self, context, instance, do_cast=True):
# NOTE(mriedem): Clients using an interface before 1.43 will be sending
# dicts so we need to handle that here since compute/api::stop()
# requires an object.
if isinstance(instance, dict):
instance = instance_obj.Instance._from_db_object(
context, instance_obj.Instance(), instance)
self.compute_api.stop(context, instance, do_cast)
# NOTE(comstud): This method is now deprecated and can be removed in
# version v2.0 of the RPC API
def compute_confirm_resize(self, context, instance, migration_ref):
if isinstance(instance, dict):
attrs = ['metadata', 'system_metadata', 'info_cache',
'security_groups']
instance = instance_obj.Instance._from_db_object(
context, instance_obj.Instance(), instance,
expected_attrs=attrs)
if isinstance(migration_ref, dict):
migration_ref = migration_obj.Migration._from_db_object(
context.elevated(), migration_ref)
self.compute_api.confirm_resize(context, instance,
migration=migration_ref)
def compute_unrescue(self, context, instance):
self.compute_api.unrescue(context, instance)
def _object_dispatch(self, target, method, context, args, kwargs):
"""Dispatch a call to an object method.
This ensures that object methods get called and any exception
that is raised gets wrapped in a ClientException for forwarding
back to the caller (without spamming the conductor logs).
"""
try:
# NOTE(danms): Keep the getattr inside the try block since
# a missing method is really a client problem
return getattr(target, method)(context, *args, **kwargs)
except Exception:
raise rpc_common.ClientException()
def object_class_action(self, context, objname, objmethod,
objver, args, kwargs):
"""Perform a classmethod action on an object."""
objclass = nova_object.NovaObject.obj_class_from_name(objname,
objver)
return self._object_dispatch(objclass, objmethod, context,
args, kwargs)
def object_action(self, context, objinst, objmethod, args, kwargs):
"""Perform an action on an object."""
oldobj = objinst.obj_clone()
result = self._object_dispatch(objinst, objmethod, context,
args, kwargs)
updates = dict()
# NOTE(danms): Diff the object with the one passed to us and
# generate a list of changes to forward back
for field in objinst.fields:
if not objinst.obj_attr_is_set(field):
# Avoid demand-loading anything
continue
if (not oldobj.obj_attr_is_set(field) or
oldobj[field] != objinst[field]):
updates[field] = objinst._attr_to_primitive(field)
# This is safe since a field named this would conflict with the
# method anyway
updates['obj_what_changed'] = objinst.obj_what_changed()
return updates, result
# NOTE(danms): This method is now deprecated and can be removed in
# v2.0 of the RPC API
def compute_reboot(self, context, instance, reboot_type):
self.compute_api.reboot(context, instance, reboot_type)
def object_backport(self, context, objinst, target_version):
return objinst.obj_to_primitive(target_version=target_version)
def get_tenant_qos_settings_by_tenant_id(self, context, project_id):
qos_group_from_db = self.db.tenant_qos_get_by_tenant(context,
project_id)
return qos_group_from_db['settings']
class ComputeTaskManager(base.Base):
"""Namespace for compute methods.
This class presents an rpc API for nova-conductor under the 'compute_task'
namespace. The methods here are compute operations that are invoked
by the API service. These methods see the operation to completion, which
may involve coordinating activities on multiple compute nodes.
"""
RPC_API_NAMESPACE = 'compute_task'
RPC_API_VERSION = '1.6'
def __init__(self):
super(ComputeTaskManager, self).__init__()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
self.image_service = glance.get_default_image_service()
self.quotas = quota.QUOTAS
@rpc_common.client_exceptions(exception.NoValidHost,
exception.ComputeServiceUnavailable,
exception.InvalidHypervisorType,
exception.UnableToMigrateToSelf,
exception.DestinationHypervisorTooOld,
exception.InvalidLocalStorage,
exception.InvalidSharedStorage,
exception.MigrationPreCheckError)
def migrate_server(self, context, instance, scheduler_hint, live, rebuild,
flavor, block_migration, disk_over_commit, reservations=None):
if instance and not isinstance(instance, instance_obj.Instance):
# NOTE(danms): Until v2 of the RPC API, we need to tolerate
# old-world instance objects here
attrs = ['metadata', 'system_metadata', 'info_cache',
'security_groups']
instance = instance_obj.Instance._from_db_object(
context, instance_obj.Instance(), instance,
expected_attrs=attrs)
if live and not rebuild and not flavor:
self._live_migrate(context, instance, scheduler_hint,
block_migration, disk_over_commit)
elif not live and not rebuild and flavor:
instance_uuid = instance['uuid']
with compute_utils.EventReporter(context, ConductorManager(),
'cold_migrate', instance_uuid):
self._cold_migrate(context, instance, flavor,
scheduler_hint['filter_properties'],
reservations)
else:
raise NotImplementedError()
def _cold_migrate(self, context, instance, flavor, filter_properties,
reservations):
image_ref = instance.image_ref
image = compute_utils.get_image_metadata(
context, self.image_service, image_ref, instance)
request_spec = scheduler_utils.build_request_spec(
context, image, [instance], instance_type=flavor)
try:
hosts = self.scheduler_rpcapi.select_destinations(
context, request_spec, filter_properties)
host_state = hosts[0]
except exception.NoValidHost as ex:
vm_state = instance['vm_state']
if not vm_state:
vm_state = vm_states.ACTIVE
updates = {'vm_state': vm_state, 'task_state': None}
self._set_vm_state_and_notify(context, 'migrate_server',
updates, ex, request_spec)
if reservations:
self.quotas.rollback(context, reservations)
LOG.warning(_("No valid host found for cold migrate"))
return
try:
scheduler_utils.populate_filter_properties(filter_properties,
host_state)
# context is not serializable
filter_properties.pop('context', None)
# TODO(timello): originally, instance_type in request_spec
# on compute.api.resize does not have 'extra_specs', so we
# remove it for now to keep tests backward compatibility.
request_spec['instance_type'].pop('extra_specs')
(host, node) = (host_state['host'], host_state['nodename'])
self.compute_rpcapi.prep_resize(
context, image, instance,
flavor, host,
reservations, request_spec=request_spec,
filter_properties=filter_properties, node=node)
except Exception as ex:
with excutils.save_and_reraise_exception():
updates = {'vm_state': vm_states.ERROR,
'task_state': None}
self._set_vm_state_and_notify(context, 'migrate_server',
updates, ex, request_spec)
if reservations:
self.quotas.rollback(context, reservations)
def _set_vm_state_and_notify(self, context, method, updates, ex,
request_spec):
scheduler_utils.set_vm_state_and_notify(
context, 'compute_task', method, updates,
ex, request_spec, self.db)
def _live_migrate(self, context, instance, scheduler_hint,
block_migration, disk_over_commit):
destination = scheduler_hint.get("host")
try:
live_migrate.execute(context, instance, destination,
block_migration, disk_over_commit)
except (exception.NoValidHost,
exception.ComputeServiceUnavailable,
exception.InvalidHypervisorType,
exception.UnableToMigrateToSelf,
exception.DestinationHypervisorTooOld,
exception.InvalidLocalStorage,
exception.InvalidSharedStorage,
exception.MigrationPreCheckError) as ex:
with excutils.save_and_reraise_exception():
#TODO(johngarbutt) - eventually need instance actions here
request_spec = {'instance_properties': {
'uuid': instance['uuid'], },
}
scheduler_utils.set_vm_state_and_notify(context,
'compute_task', 'migrate_server',
dict(vm_state=instance['vm_state'],
task_state=None,
expected_task_state=task_states.MIGRATING,),
ex, request_spec, self.db)
except Exception as ex:
with excutils.save_and_reraise_exception():
request_spec = {'instance_properties': {
'uuid': instance['uuid'], },
}
scheduler_utils.set_vm_state_and_notify(context,
'compute_task', 'migrate_server',
{'vm_state': vm_states.ERROR},
ex, request_spec, self.db)
def build_instances(self, context, instances, image, filter_properties,
admin_password, injected_files, requested_networks,
security_groups, block_device_mapping, legacy_bdm=True):
request_spec = scheduler_utils.build_request_spec(context, image,
instances)
# NOTE(alaski): For compatibility until a new scheduler method is used.
request_spec.update({'block_device_mapping': block_device_mapping,
'security_group': security_groups})
self.scheduler_rpcapi.run_instance(context, request_spec=request_spec,
admin_password=admin_password, injected_files=injected_files,
requested_networks=requested_networks, is_first_time=True,
filter_properties=filter_properties,
legacy_bdm_in_spec=legacy_bdm)
def _get_image(self, context, image_id):
if not image_id:
return None
return self.image_service.show(context, image_id)
def _delete_image(self, context, image_id):
(image_service, image_id) = glance.get_remote_image_service(context,
image_id)
return image_service.delete(context, image_id)
def _schedule_instances(self, context, image, filter_properties,
*instances):
request_spec = scheduler_utils.build_request_spec(context, image,
instances)
# dict(host='', nodename='', limits='')
hosts = self.scheduler_rpcapi.select_destinations(context,
request_spec, filter_properties)
return hosts
def unshelve_instance(self, context, instance):
sys_meta = instance.system_metadata
if instance.vm_state == vm_states.SHELVED:
instance.task_state = task_states.POWERING_ON
instance.save(expected_task_state=task_states.UNSHELVING)
self.compute_rpcapi.start_instance(context, instance)
snapshot_id = sys_meta.get('shelved_image_id')
if snapshot_id:
self._delete_image(context, snapshot_id)
elif instance.vm_state == vm_states.SHELVED_OFFLOADED:
try:
with compute_utils.EventReporter(context, self.db,
'get_image_info', instance.uuid):
image = self._get_image(context,
sys_meta['shelved_image_id'])
except exception.ImageNotFound:
with excutils.save_and_reraise_exception():
LOG.error(_('Unshelve attempted but vm_state not SHELVED '
'or SHELVED_OFFLOADED'), instance=instance)
instance.vm_state = vm_states.ERROR
instance.save()
filter_properties = {}
hosts = self._schedule_instances(context, image,
filter_properties, instance)
host = hosts.pop(0)['host']
self.compute_rpcapi.unshelve_instance(context, instance, host,
image)
else:
LOG.error(_('Unshelve attempted but vm_state not SHELVED or '
'SHELVED_OFFLOADED'), instance=instance)
instance.vm_state = vm_states.ERROR
instance.save()
return
for key in ['shelved_at', 'shelved_image_id', 'shelved_host']:
if key in sys_meta:
del(sys_meta[key])
instance.system_metadata = sys_meta
instance.save()
| |
# Copyright (C) 2013, 2014, 2017 by Kevin L. Mitchell <klmitch@mit.edu>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import inspect
import sys
import pkg_resources
import six
__all__ = ['console', 'prog', 'usage', 'description', 'epilog',
'formatter_class', 'argument', 'argument_group',
'mutually_exclusive_group', 'subparsers', 'load_subcommands']
def _clean_text(text):
"""
Clean up a multiple-line, potentially multiple-paragraph text
string. This is used to extract the first paragraph of a string
and eliminate line breaks and indentation. Lines will be joined
together by a single space.
:param text: The text string to clean up. It is safe to pass
``None``.
:returns: The first paragraph, cleaned up as described above.
"""
desc = []
for line in (text or '').strip().split('\n'):
# Clean up the line...
line = line.strip()
# We only want the first paragraph
if not line:
break
desc.append(line)
return ' '.join(desc)
def expose(func):
"""
A decorator for ``ScriptAdaptor`` methods. Methods so decorated
will be exposed on the function decorated by ``cli_tools``. This
has no effect on classes decorated by ``cli_tools``.
:param func: The function to expose.
:returns: The function.
"""
# Just set the expose attribute on the function.
func._cli_expose = True
return func
class ScriptAdaptorMeta(type):
"""
A metaclass for ``ScriptAdaptor``. This builds a list of the
names of methods that have been decorated with ``@expose``. This
is used to copy the exposed methods onto a decorated function.
"""
def __new__(mcs, name, bases, namespace):
"""
Create the ``ScriptAdaptor`` class. This ensures that an
``exposed`` class attribute is set to the set of method names
that should be exposed on a decorated function.
:param name: The class name.
:param bases: A tuple of the base classes.
:param namespace: A dictionary containing the class namespace.
:returns: The constructed class.
"""
# Build up the set of exposed method names
exposed = set()
for name, value in namespace.items():
if callable(value) and getattr(value, '_cli_expose', False):
exposed.add(name)
namespace['exposed'] = exposed
# Construct and return the class
return super(ScriptAdaptorMeta, mcs).__new__(mcs, name, bases,
namespace)
@six.add_metaclass(ScriptAdaptorMeta)
class ScriptAdaptor(object):
"""
An adaptor for the function. Keeps track of the declared command
line arguments and includes methods for declaring processors and
calling the function from the console.
"""
@classmethod
def _get_adaptor(cls, func):
"""
Gets the ``ScriptAdaptor`` for a function.
:param func: The function to obtain the ``ScriptAdaptor`` of.
:returns: The ``ScriptAdaptor``.
"""
# Get the adaptor, creating one if necessary
adaptor = getattr(func, 'cli_tools', None)
if adaptor is None:
is_class = inspect.isclass(func)
adaptor = cls(func, is_class)
func.cli_tools = adaptor
# Set up the added functions
if not is_class:
for meth in cls.exposed:
setattr(func, meth, getattr(adaptor, meth))
return adaptor
def __init__(self, func, is_class=None):
"""
Initialize a ``ScriptAdaptor``.
:param func: The underlying function.
:param is_class: A boolean specifying whether the ``func`` is
actually a class.
"""
self._func = func
self._is_class = (is_class if is_class is not None
else inspect.isclass(func))
self._run = 'run' if self._is_class else None
self._args_hook = lambda x: None
self._processor = lambda x: None
self._arguments = []
self._groups = {}
self._subcommands = {}
self._entrypoints = set()
self.do_subs = False
self.subkwargs = {}
self.prog = None
self.usage = None
self.description = _clean_text(func.__doc__)
self.epilog = None
self.formatter_class = argparse.HelpFormatter
# This will be an attribute name for the adaptor implementing
# the subcommand; this allows for the potential of arbitrary
# depth on subcommands
self._subcmd_attr = '_script_adaptor_%x' % id(self)
def _add_argument(self, args, kwargs, group):
"""
Add an argument specification to the list of argument
specifications. The argument specification is inserted at the
beginning of the list of argument specifications, so that the
decorators may be added in natural order.
:param args: The positional arguments of the argument
specification.
:param kwargs: The keyword arguments of the argument
specification.
:param group: An argument group name. If provided, the
argument specification will be added to the
named group, rather than to the general list of
arguments.
"""
if group:
self._groups.setdefault(group, dict(arguments=[]))
self._groups[group]['arguments'].insert(0, (args, kwargs))
else:
self._arguments.insert(0, ('argument', args, kwargs))
def _add_group(self, group, type, kwargs):
"""
Add an argument group specification to the list of argument
specifications. The group specification is inserted at the
beginning of the list of argument specifications, so that the
decorators may be added in natural order.
:param group: The name of the argument group. If the group is
already defined, an ``argparse.ArgumentError``
will be raised.
:param type: Either "group" or "exclusive", depending on the
desired group type.
:param kwargs: The keyword arguments of the group
specification.
"""
# Make sure the group exists
self._groups.setdefault(group, dict(arguments=[]))
# Look out for the pre-existence of the group
if 'type' in self._groups[group]:
raise argparse.ArgumentError(None, "group %s: conflicting groups" %
group)
# Save the data
self._groups[group]['type'] = type
# Add the group to the argument specification list
self._arguments.insert(0, ('group', group, kwargs))
def _add_subcommand(self, name, adaptor):
"""
Add a subcommand to the parser.
:param name: The name of the command to be added.
:param adaptor: The corresponding ScriptAdaptor instance.
"""
self._subcommands[name] = adaptor
self.do_subs = True
def _add_extensions(self, group):
"""
Adds extensions to the parser. This will cause a walk of a
``pkg_resources`` entrypoint group, adding each discovered
function that has an attached ScriptAdaptor instance as a
subcommand. This walk is performed immediately prior to
building the subcommand processor. Note that no attempt is
made to avoid duplication of subcommands.
:param group: The entrypoint group name.
"""
self._entrypoints.add(group)
# We are now in subparsers mode
self.do_subs = True
def _process_entrypoints(self):
"""
Perform a walk of all entrypoint groups declared using
``_add_extensions()``. This is called immediately prior to
building the subcommand processor.
"""
# Walk the set of all declared entrypoints
for group in self._entrypoints:
for ep in pkg_resources.iter_entry_points(group):
try:
func = ep.load()
self._add_subcommand(ep.name, func.cli_tools)
except (ImportError, AttributeError,
pkg_resources.UnknownExtra):
# Ignore any expected errors
pass
# We've processed these entrypoints; avoid double-processing
self._entrypoints = set()
@expose
def args_hook(self, func):
"""
Sets a hook for constructing the arguments. This hook could
be used to allow, for instance, a set of authentication
plugins to add their configuration options to the argument
parser. This method may be used as a decorator, e.g.:
@console
def func():
pass
@func.args_hook
def _hook(parser):
pass
If the hook is a regular function, it will be called after
processing all of the regular argument specifications.
If the hook is a generator, the segment before the first
``yield`` statement will be executed before adding any regular
argument specifications, and the remainder will be executed
afterward.
:param func: The function to be installed as an argument hook.
:returns: The function, allowing this method to be used as a
decorator.
"""
self._args_hook = func
return func
@expose
def processor(self, func):
"""
Sets a processor for the underlying function. A processor
function runs before and potentially after the underlying
function, but only when it is being called as a console
script. This method may be used as a decorator, e.g.:
@console
def func():
pass
@func.processor
def _proc(args):
pass
If the processor is a regular function, it will be called just
before the underlying function is called, and it will be
passed the parsed arguments.
If the processor is a generator, the segment before the first
``yield`` statement will be executed just before the
underlying function is called. The return result of the
``yield`` statement will be the return result of the
underlying function, and if another value is ``yield``ed, that
value will replace the return result for the purposes of the
console script.
:param func: The function to be installed as a processor.
:returns: The function, allowing this method to be used as a
decorator.
"""
self._processor = func
return func
@expose
def subcommand(self, name=None):
"""
Decorator used to mark another function as a subcommand of
this function. If ``function()`` is the parent function, this
decorator can be used in any of the following ways:
@function.subcommand('spam')
def foo():
pass
@function.subcommand()
def bar():
pass
@function.subcommand
def baz():
pass
In the first case, the command name is set explicitly. In the
latter two cases, the command name is the function name.
:param name: If a string, gives the name of the subcommand.
If a callable, specifies the function being added
as a subcommand. If not specified, a decorator
will be returned which will derive the name from
the function.
:returns: If ``name`` was a callable, it will be returned.
Otherwise, returns a callable which takes a callable
as an argument and returns that callable, to conform
with the decorator syntax.
"""
def decorator(func):
cmdname = name or func.__name__
adaptor = self._get_adaptor(func)
self._add_subcommand(cmdname, adaptor)
return func
# If we were passed a callable, we were used without
# parentheses, and will derive the command name from the
# function...
if callable(name):
func = name
name = None
return decorator(func)
return decorator
@expose
def setup_args(self, parser):
"""
Set up an ``argparse.ArgumentParser`` object by adding all the
arguments taken by the function. This is available to allow
other users access to the argument specifications.
:param parser: An ``argparse.ArgumentParser`` object, or any
related object having an ``add_argument()``
method.
"""
# Run the args hook, if it's a generator
post = self._args_hook
if inspect.isgeneratorfunction(self._args_hook):
post = self._args_hook(parser)
try:
six.next(post)
except StopIteration:
# Won't be doing any post-processing anyway
post = None
for arg_type, args, kwargs in self._arguments:
if arg_type == 'argument':
parser.add_argument(*args, **kwargs)
elif arg_type == 'group':
# Get the group information
arguments = self._groups[args]['arguments']
type = self._groups[args]['type']
# Create the group in the parser
if type == 'group':
group = parser.add_argument_group(**kwargs)
elif type == 'exclusive':
group = parser.add_mutually_exclusive_group(**kwargs)
else:
# Huh, don't know that group...
continue # pragma: no cover
# Set up all the arguments
for a_args, a_kwargs in arguments:
group.add_argument(*a_args, **a_kwargs)
# If we have subcommands, set up the parser appropriately
if self.do_subs:
self._process_entrypoints()
subparsers = parser.add_subparsers(**self.subkwargs)
for cmd, adaptor in self._subcommands.items():
cmd_parser = subparsers.add_parser(
cmd,
prog=adaptor.prog,
usage=adaptor.usage,
description=adaptor.description,
epilog=adaptor.epilog,
formatter_class=adaptor.formatter_class,
)
adaptor.setup_args(cmd_parser)
# Remember which adaptor implements the subcommand
defaults = {self._subcmd_attr: adaptor}
cmd_parser.set_defaults(**defaults)
# If the hook has a post phase, run it
if post:
if inspect.isgenerator(post):
try:
six.next(post)
except StopIteration:
pass
post.close()
else:
post(parser)
@expose
def get_kwargs(self, func, args=None):
"""
Given an ``argparse.Namespace``, as produced by
``argparse.ArgumentParser.parse_args()``, determines the
keyword arguments to pass to the specified function. Note
that an ``AttributeError`` exception will be raised if any
argument required by the function is not set in ``args``.
:param func: A callable to introspect.
:param args: A ``argparse.Namespace`` object containing the
argument values.
:returns: A dictionary containing the keyword arguments to be
passed to the underlying function.
"""
# For backwards compatibility, handle the case when we were
# called with only one argument
if args is None:
args = func
func = self._func
# Get the argument spec for the correct underlying function
if inspect.isclass(func):
try:
# Try __new__() first; this will raise a TypeError if
# __new__() hasn't been overridden
argspec = inspect.getargspec(func.__new__)
ismethod = True
except TypeError:
try:
# OK, no __new__(); try __init__()
argspec = inspect.getargspec(func.__init__)
ismethod = True
except TypeError:
# OK, no __init__(); that means that the class
# initializer takes no arguments
argspec = inspect.ArgSpec([], None, None, None)
ismethod = False
else:
argspec = inspect.getargspec(func)
ismethod = inspect.ismethod(func)
# We need to figure out which arguments the final function
# actually needs
kwargs = {}
req_args = (argspec.args[:-len(argspec.defaults)]
if argspec.defaults else argspec.args)
required = set(req_args[1:] if ismethod else req_args)
for arg_name in argspec.args:
try:
kwargs[arg_name] = getattr(args, arg_name)
except AttributeError:
if arg_name in required:
# If this happens, that's a programming error
raise
# If the function accepts any keyword argument, add whatever
# remains
if argspec.keywords:
for key, value in args.__dict__.items():
if key in kwargs:
# Already handled
continue
kwargs[key] = value
return kwargs
@expose
def safe_call(self, args):
"""
Call the processor and the underlying function. If the
``debug`` attribute of ``args`` exists and is ``True``, any
exceptions raised by the underlying function will be
re-raised.
:param args: This should be an ``argparse.Namespace`` object;
the keyword arguments for the function will be
derived from it.
:returns: A tuple of the function return value and exception
information. Only one of these values will be
non-``None``.
"""
# Run the processor
post = None
if inspect.isgeneratorfunction(self._processor):
post = self._processor(args)
try:
six.next(post)
except StopIteration:
# Won't be doing any post-processing anyway
post = None
else:
self._processor(args)
# Initialize the results
result = None
exc_info = None
try:
# Call the function
result = self._func(**self.get_kwargs(self._func, args))
except Exception:
if args and getattr(args, 'debug', False):
# Re-raise if desired
raise
exc_info = sys.exc_info()
if self._is_class:
# All we've done so far is initialize the class; now we
# need to actually run it
try:
meth = getattr(result, self._run)
result = meth(**self.get_kwargs(meth, args))
except Exception:
if args and getattr(args, 'debug', False):
# Re-raise if desired
raise
result = None # must clear result
exc_info = sys.exc_info()
# If the processor has a post phase, run it
if post:
try:
if exc_info:
# Overwrite the result and exception information
result = post.throw(*exc_info)
exc_info = None
else:
result = post.send(result)
except StopIteration:
# No result replacement...
pass
except Exception:
# Overwrite the result and exception information
exc_info = sys.exc_info()
result = None
post.close()
return result, exc_info
@expose
def console(self, args=None, argv=None):
"""
Call the function as a console script. Command line arguments
are parsed (unless ``args`` is passed), the processor (if any)
is called, then the underlying function is called. If a
``debug`` attribute is set by the command line arguments, and
if it is ``True``, any exception raised by the underlying
function will be re-raised; otherwise, the return value will
be either the return value of the function or the string value
of the exception (unless overwritten by the processor).
:param args: If provided, should be an ``argparse.Namespace``
containing the required argument values for the
function. This can be used to parse the
parameters separately.
:param argv: If provided, should be a list of argument strings
to be parsed by the argument parser, in
preference to ``sys.argv[1:]``.
:returns: The function return value, the string value of any
exception raised by the function, or a value yielded
by the processor to replace the function value.
"""
# First, let's parse the arguments
if not args:
parser = argparse.ArgumentParser(
prog=self.prog,
usage=self.usage,
description=self.description,
epilog=self.epilog,
formatter_class=self.formatter_class,
)
self.setup_args(parser)
args = parser.parse_args(args=argv)
# Get the adaptor
if self.do_subs:
# If the subcommand attribute isn't set, we'll call our
# underlying function
adaptor = getattr(args, self._subcmd_attr, self)
else:
adaptor = self
# Call the function
result, exc_info = adaptor.safe_call(args)
if exc_info:
return str(exc_info[1])
return result
@expose
def get_subcommands(self):
"""
Retrieve a dictionary of the recognized subcommands.
:returns: A dictionary mapping subcommand names to the
implementing functions.
"""
# We only have a return value if we're in subparsers mode
if not self.do_subs:
return {}
# Process any declared entrypoints
self._process_entrypoints()
# Return the subcommands dictionary
return dict((k, v._func) for k, v in self._subcommands.items())
def console(func):
"""
Decorator to mark a script as a console script. This decorator is
optional, but can be used if no arguments other than the default
``argparse`` arguments (such as "--help") are specified.
"""
# This will ensure that the ScriptAdaptor is attached to the
# function
ScriptAdaptor._get_adaptor(func)
return func
def prog(text):
"""
Decorator used to specify the program name for the console script
help message.
:param text: The text to use for the program name.
"""
def decorator(func):
adaptor = ScriptAdaptor._get_adaptor(func)
adaptor.prog = text
return func
return decorator
def usage(text):
"""
Decorator used to specify a usage string for the console script
help message.
:param text: The text to use for the usage.
"""
def decorator(func):
adaptor = ScriptAdaptor._get_adaptor(func)
adaptor.usage = text
return func
return decorator
def description(text):
"""
Decorator used to specify a short description of the console
script. This can be used to override the default, which is
derived from the docstring of the function.
:param text: The text to use for the description.
"""
def decorator(func):
adaptor = ScriptAdaptor._get_adaptor(func)
adaptor.description = text
return func
return decorator
def epilog(text):
"""
Decorator used to specify an epilog for the console script help
message.
:param text: The text to use for the epilog.
"""
def decorator(func):
adaptor = ScriptAdaptor._get_adaptor(func)
adaptor.epilog = text
return func
return decorator
def formatter_class(klass):
"""
Decorator used to specify the formatter class for the console
script.
:param klass: The formatter class to use.
"""
def decorator(func):
adaptor = ScriptAdaptor._get_adaptor(func)
adaptor.formatter_class = klass
return func
return decorator
def argument(*args, **kwargs):
"""
Decorator used to specify an argument taken by the console script.
Positional and keyword arguments have the same meaning as those
given to ``argparse.ArgumentParser.add_argument()``.
"""
def decorator(func):
adaptor = ScriptAdaptor._get_adaptor(func)
group = kwargs.pop('group', None)
adaptor._add_argument(args, kwargs, group=group)
return func
return decorator
def argument_group(group, **kwargs):
"""
Decorator used to specify an argument group. Keyword arguments
have the same meaning as those given to
``argparse.ArgumentParser.add_argument_group()``.
Arguments may be placed in a given argument group by passing the
``group`` keyword argument to @argument().
:param group: The name of the argument group.
"""
def decorator(func):
adaptor = ScriptAdaptor._get_adaptor(func)
adaptor._add_group(group, 'group', kwargs)
return func
return decorator
def mutually_exclusive_group(group, **kwargs):
"""
Decorator used to specify a mutually exclusive argument group.
Keyword arguments have the same meaning as those given to
``argparse.ArgumentParser.add_mutually_exclusive_group()``.
Arguments may be placed in a given argument group by passing the
``group`` keyword argument to @argument().
:param group: The name of the argument group.
"""
def decorator(func):
adaptor = ScriptAdaptor._get_adaptor(func)
adaptor._add_group(group, 'exclusive', kwargs)
return func
return decorator
def subparsers(**kwargs):
"""
Decorator used to specify alternate keyword arguments to pass to
the ``argparse.ArgumentParser.add_subparsers()`` call.
"""
def decorator(func):
adaptor = ScriptAdaptor._get_adaptor(func)
adaptor.subkwargs = kwargs
adaptor.do_subs = True
return func
return decorator
def load_subcommands(group):
"""
Decorator used to load subcommands from a given ``pkg_resources``
entrypoint group. Each function must be appropriately decorated
with the ``cli_tools`` decorators to be considered an extension.
:param group: The name of the ``pkg_resources`` entrypoint group.
"""
def decorator(func):
adaptor = ScriptAdaptor._get_adaptor(func)
adaptor._add_extensions(group)
return func
return decorator
| |
# coding=utf-8
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
import decimal
from os.path import expanduser
from qrl import __version__ as version
import os
import yaml
from math import ceil, log
class UserConfig(object):
__instance = None
def __init__(self, ignore_check=False):
# TODO: Move to metaclass in Python 3
if not ignore_check and UserConfig.__instance is not None:
raise Exception("UserConfig can only be instantiated once")
UserConfig.__instance = self
self.genesis_prev_headerhash = b'The sleeper must awaken'
self.genesis_timestamp = 1530004179
self.genesis_difficulty = 10000000
# Default configuration
self.mining_enabled = False
self.mining_address = ''
self.mining_thread_count = 0 # 0 to auto detect thread count based on CPU/GPU number of processors
self.mining_pause = 0 # this will force a sleep (ms) while mining to reduce cpu usage. Only for mocknet
# Ephemeral Configuration
self.accept_ephemeral = True
# PEER Configuration
self.max_redundant_connections = 5 # Number of connections allowed from nodes having same IP
self.enable_peer_discovery = True # Allows to discover new peers from the connected peers
self.peer_list = ['35.178.79.137',
'35.177.182.85',
'18.130.119.29',
'18.130.25.64']
self.p2p_local_port = 19000 # Locally binded port at which node will listen for connection
self.p2p_public_port = 19000 # Public port forwarding connections to server
self.peer_rate_limit = 500 # Max Number of messages per minute per peer
self.p2p_q_size = 10000
self.outgoing_message_expiry = 90 # Outgoing message expires after 90 seconds
self.ntp_servers = ['pool.ntp.org', 'ntp.ubuntu.com']
self.ntp_refresh = 12 * 60 * 60 # 12 hours
self.ntp_request_timeout = 10 # 10 seconds ntp timeout
self.ban_minutes = 20 # Allows to ban a peer's IP who is breaking protocol
self.monitor_connections_interval = 30 # Monitor connection every 30 seconds
self.max_peers_limit = 100 # Number of allowed peers
self.chain_state_timeout = 180
self.chain_state_broadcast_period = 30
# must be less than ping_timeout
self.transaction_minimum_fee = int(0 * DevConfig(ignore_check).shor_per_quanta)
self.transaction_pool_size = 25000
self.pending_transaction_pool_size = 75000
# 1% of the pending_transaction_pool will be reserved for moving stale txn
self.pending_transaction_pool_reserve = int(self.pending_transaction_pool_size * 0.01)
self.stale_transaction_threshold = 15 # 15 Blocks
self._qrl_dir = expanduser(os.path.join("~/.qrl"))
# ======================================
# ADMIN API CONFIGURATION
# ======================================
self.admin_api_enabled = False
self.admin_api_host = "127.0.0.1"
self.admin_api_port = 19008
self.admin_api_threads = 1
self.admin_api_max_concurrent_rpc = 100
# ======================================
# PUBLIC API CONFIGURATION
# ======================================
self.public_api_enabled = True
self.public_api_host = "127.0.0.1"
self.public_api_port = 19009
self.public_api_threads = 1
self.public_api_max_concurrent_rpc = 100
# ======================================
# MINING API CONFIGURATION
# ======================================
self.mining_api_enabled = False
self.mining_api_host = "127.0.0.1"
self.mining_api_port = 19007
self.mining_api_threads = 1
self.mining_api_max_concurrent_rpc = 100
# ======================================
# DEBUG API CONFIGURATION
# ======================================
self.debug_api_enabled = False
self.debug_api_host = "127.0.0.1"
self.debug_api_port = 52134
self.debug_api_threads = 1
self.debug_api_max_concurrent_rpc = 100
# ======================================
# GRPC PROXY CONFIGURATION
# ======================================
self.grpc_proxy_host = "127.0.0.1"
self.grpc_proxy_port = 18090
# ======================================
# WALLET DAEMON CONFIGURATION
# ======================================
self.public_api_server = "127.0.0.1:19009"
self.wallet_daemon_host = "127.0.0.1"
self.wallet_daemon_port = 18091
self.number_of_slaves = 3
# ======================================
# WALLET API CONFIGURATION
# ======================================
self.wallet_api_host = "127.0.0.1"
self.wallet_api_port = 19010
self.wallet_api_threads = 1
self.wallet_api_max_concurrent_rpc = 100
# WARNING! loading should be the last line.. any new setting after this will not be updated by the config file
self.load_yaml(self.config_path)
# WARNING! loading should be the last line.. any new setting after this will not be updated by the config file
@property
def qrl_dir(self):
return self._qrl_dir
@qrl_dir.setter
def qrl_dir(self, new_qrl_dir):
self._qrl_dir = new_qrl_dir
self.load_yaml(self.config_path)
@property
def wallet_dir(self):
return expanduser(self.qrl_dir)
@property
def data_dir(self):
return expanduser(os.path.join(self.qrl_dir, "data"))
@property
def config_path(self):
return expanduser(os.path.join(self.qrl_dir, "config.yml"))
@property
def log_path(self):
return expanduser(os.path.join(self.qrl_dir, "qrl.log"))
@property
def walletd_log_path(self):
return expanduser(os.path.join(self.qrl_dir, "walletd.log"))
@property
def mining_pool_payment_wallet_path(self):
return expanduser(os.path.join(self.qrl_dir, 'payment_slaves.json'))
@staticmethod
def getInstance():
if UserConfig.__instance is None:
return UserConfig()
return UserConfig.__instance
def load_yaml(self, file_path):
"""
Overrides default configuration using a yaml file
:param file_path: The path to the configuration file
"""
if os.path.isfile(file_path):
with open(file_path) as f:
dataMap = yaml.safe_load(f)
if dataMap is not None:
if 'genesis_prev_headerhash' in dataMap:
dataMap['genesis_prev_headerhash'] = dataMap['genesis_prev_headerhash'].encode()
self.__dict__.update(**dataMap)
def create_path(path):
# FIXME: Obsolete. Refactor/remove. Use makedirs from python3
tmp_path = os.path.join(path)
if not os.path.isdir(tmp_path):
os.makedirs(tmp_path)
class DevConfig(object):
__instance = None
def __init__(self, ignore_check=False):
super(DevConfig, self).__init__()
# TODO: Move to metaclass in Python 3
if not ignore_check and DevConfig.__instance is not None:
raise Exception("UserConfig can only be instantiated once")
DevConfig.__instance = self
self.version = version + ' python'
################################################################
# Warning: Don't change following configuration. #
# For QRL Developers only #
################################################################
self.block_lead_timestamp = 30
self.block_max_drift = 15
self.max_future_blocks_length = 256
self.max_margin_block_number = 32
self.min_margin_block_number = 7
self.public_ip = None
self.reorg_limit = 22000
self.cache_frequency = 1000
self.message_q_size = 300
self.message_receipt_timeout = 10 # request timeout for full message
self.message_buffer_size = 64 * 1024 * 1024 # 64 MB
self.max_coin_supply = decimal.Decimal(105000000)
self.coin_remaning_at_genesis = decimal.Decimal(40000000)
self.timestamp_error = 5 # Error in second
self.blocks_per_epoch = 100
self.xmss_tree_height = 12
self.slave_xmss_height = int(ceil(log(self.blocks_per_epoch * 3, 2)))
self.slave_xmss_height += self.slave_xmss_height % 2
# Maximum number of ots index upto which OTS index should be tracked. Any OTS index above the specified value
# will be managed by OTS Counter
self.max_ots_tracking_index = 8192
self.mining_nonce_offset = 39
self.extra_nonce_offset = 43
self.mining_blob_size = 76
self.ots_bitfield_size = ceil(self.max_ots_tracking_index / 8)
self.default_nonce = 0
self.default_account_balance = 0 * (10 ** 9)
self.hash_buffer_size = 4
self.minimum_minting_delay = 45 # Minimum delay in second before a block is being created
self.mining_setpoint_blocktime = 60
self.tx_extra_overhead = 15 # 15 bytes
self.coinbase_address = b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
# Directories and files
self.db_name = 'state'
self.peers_filename = 'known_peers.json'
self.chain_file_directory = 'data'
self.wallet_dat_filename = 'wallet.json'
self.slave_dat_filename = 'slave.qrl'
self.banned_peers_filename = 'banned_peers.qrl'
self.trust_min_msgcount = 10
self.trust_min_conntime = 10
self.supplied_coins = 65000000 * (10 ** 9)
# ======================================
# TRANSACTION CONTROLLER
# ======================================
# Max number of output addresses and corresponding data can be added into a list of a transaction
self.transaction_multi_output_limit = 100
# ======================================
# TOKEN TRANSACTION
# ======================================
self.max_token_symbol_length = 10
self.max_token_name_length = 30
# ======================================
# DIFFICULTY CONTROLLER
# ======================================
self.N_measurement = 30
self.kp = 5
# ======================================
# BLOCK SIZE CONTROLLER
# ======================================
self.number_of_blocks_analyze = 10
self.size_multiplier = 1.1
self.block_min_size_limit = 1024 * 1024 # 1 MB - Initial Block Size Limit
# ======================================
# P2P SETTINGS
# ======================================
self.max_receivable_bytes = 10 * 1024 * 1024 # 10 MB [Temporary Restriction]
self.reserved_quota = 1024 # 1 KB
self.max_bytes_out = self.max_receivable_bytes - self.reserved_quota
self.sync_delay_mining = 60 # Delay mining by 60 seconds while syncing blocks to mainchain
# ======================================
# API SETTINGS
# ======================================
self.block_timeseries_size = 1440
# ======================================
# SHOR PER QUANTA / MAX ALLOWED DECIMALS
# ======================================
self.shor_per_quanta = decimal.Decimal(10 ** 9)
@staticmethod
def getInstance():
if DevConfig.__instance is None:
return DevConfig()
return DevConfig.__instance
user = UserConfig.getInstance()
dev = DevConfig.getInstance()
| |
import os, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
import pybullet as p
import numpy as np
import copy
import math
import pybullet_data
class Kuka:
def __init__(self, urdfRootPath=pybullet_data.getDataPath(), timeStep=0.01):
self.urdfRootPath = urdfRootPath
self.timeStep = timeStep
self.maxVelocity = .35
self.maxForce = 200.
self.fingerAForce = 2
self.fingerBForce = 2.5
self.fingerTipForce = 2
self.useInverseKinematics = 1
self.useSimulation = 1
self.useNullSpace = 21
self.useOrientation = 1
self.kukaEndEffectorIndex = 6
self.kukaGripperIndex = 7
#lower limits for null space
self.ll = [-.967, -2, -2.96, 0.19, -2.96, -2.09, -3.05]
#upper limits for null space
self.ul = [.967, 2, 2.96, 2.29, 2.96, 2.09, 3.05]
#joint ranges for null space
self.jr = [5.8, 4, 5.8, 4, 5.8, 4, 6]
#restposes for null space
self.rp = [0, 0, 0, 0.5 * math.pi, 0, -math.pi * 0.5 * 0.66, 0]
#joint damping coefficents
self.jd = [
0.00001, 0.00001, 0.00001, 0.00001, 0.00001, 0.00001, 0.00001, 0.00001, 0.00001, 0.00001,
0.00001, 0.00001, 0.00001, 0.00001
]
self.reset()
def reset(self):
objects = p.loadSDF(os.path.join(self.urdfRootPath, "kuka_iiwa/kuka_with_gripper2.sdf"))
self.kukaUid = objects[0]
#for i in range (p.getNumJoints(self.kukaUid)):
# print(p.getJointInfo(self.kukaUid,i))
p.resetBasePositionAndOrientation(self.kukaUid, [-0.100000, 0.000000, 0.070000],
[0.000000, 0.000000, 0.000000, 1.000000])
self.jointPositions = [
0.006418, 0.413184, -0.011401, -1.589317, 0.005379, 1.137684, -0.006539, 0.000048,
-0.299912, 0.000000, -0.000043, 0.299960, 0.000000, -0.000200
]
self.numJoints = p.getNumJoints(self.kukaUid)
for jointIndex in range(self.numJoints):
p.resetJointState(self.kukaUid, jointIndex, self.jointPositions[jointIndex])
p.setJointMotorControl2(self.kukaUid,
jointIndex,
p.POSITION_CONTROL,
targetPosition=self.jointPositions[jointIndex],
force=self.maxForce)
self.trayUid = p.loadURDF(os.path.join(self.urdfRootPath, "tray/tray.urdf"), 0.640000,
0.075000, -0.190000, 0.000000, 0.000000, 1.000000, 0.000000)
self.endEffectorPos = [0.537, 0.0, 0.5]
self.endEffectorAngle = 0
self.motorNames = []
self.motorIndices = []
for i in range(self.numJoints):
jointInfo = p.getJointInfo(self.kukaUid, i)
qIndex = jointInfo[3]
if qIndex > -1:
#print("motorname")
#print(jointInfo[1])
self.motorNames.append(str(jointInfo[1]))
self.motorIndices.append(i)
def getActionDimension(self):
if (self.useInverseKinematics):
return len(self.motorIndices)
return 6 #position x,y,z and roll/pitch/yaw euler angles of end effector
def getObservationDimension(self):
return len(self.getObservation())
def getObservation(self):
observation = []
state = p.getLinkState(self.kukaUid, self.kukaGripperIndex)
pos = state[0]
orn = state[1]
euler = p.getEulerFromQuaternion(orn)
observation.extend(list(pos))
observation.extend(list(euler))
return observation
def applyAction(self, motorCommands):
#print ("self.numJoints")
#print (self.numJoints)
if (self.useInverseKinematics):
dx = motorCommands[0]
dy = motorCommands[1]
dz = motorCommands[2]
da = motorCommands[3]
fingerAngle = motorCommands[4]
state = p.getLinkState(self.kukaUid, self.kukaEndEffectorIndex)
actualEndEffectorPos = state[0]
#print("pos[2] (getLinkState(kukaEndEffectorIndex)")
#print(actualEndEffectorPos[2])
self.endEffectorPos[0] = self.endEffectorPos[0] + dx
if (self.endEffectorPos[0] > 0.65):
self.endEffectorPos[0] = 0.65
if (self.endEffectorPos[0] < 0.50):
self.endEffectorPos[0] = 0.50
self.endEffectorPos[1] = self.endEffectorPos[1] + dy
if (self.endEffectorPos[1] < -0.17):
self.endEffectorPos[1] = -0.17
if (self.endEffectorPos[1] > 0.22):
self.endEffectorPos[1] = 0.22
#print ("self.endEffectorPos[2]")
#print (self.endEffectorPos[2])
#print("actualEndEffectorPos[2]")
#print(actualEndEffectorPos[2])
#if (dz<0 or actualEndEffectorPos[2]<0.5):
self.endEffectorPos[2] = self.endEffectorPos[2] + dz
self.endEffectorAngle = self.endEffectorAngle + da
pos = self.endEffectorPos
orn = p.getQuaternionFromEuler([0, -math.pi, 0]) # -math.pi,yaw])
if (self.useNullSpace == 1):
if (self.useOrientation == 1):
jointPoses = p.calculateInverseKinematics(self.kukaUid, self.kukaEndEffectorIndex, pos,
orn, self.ll, self.ul, self.jr, self.rp)
else:
jointPoses = p.calculateInverseKinematics(self.kukaUid,
self.kukaEndEffectorIndex,
pos,
lowerLimits=self.ll,
upperLimits=self.ul,
jointRanges=self.jr,
restPoses=self.rp)
else:
if (self.useOrientation == 1):
jointPoses = p.calculateInverseKinematics(self.kukaUid,
self.kukaEndEffectorIndex,
pos,
orn,
jointDamping=self.jd)
else:
jointPoses = p.calculateInverseKinematics(self.kukaUid, self.kukaEndEffectorIndex, pos)
#print("jointPoses")
#print(jointPoses)
#print("self.kukaEndEffectorIndex")
#print(self.kukaEndEffectorIndex)
if (self.useSimulation):
for i in range(self.kukaEndEffectorIndex + 1):
#print(i)
p.setJointMotorControl2(bodyUniqueId=self.kukaUid,
jointIndex=i,
controlMode=p.POSITION_CONTROL,
targetPosition=jointPoses[i],
targetVelocity=0,
force=self.maxForce,
maxVelocity=self.maxVelocity,
positionGain=0.3,
velocityGain=1)
else:
#reset the joint state (ignoring all dynamics, not recommended to use during simulation)
for i in range(self.numJoints):
p.resetJointState(self.kukaUid, i, jointPoses[i])
#fingers
p.setJointMotorControl2(self.kukaUid,
7,
p.POSITION_CONTROL,
targetPosition=self.endEffectorAngle,
force=self.maxForce)
p.setJointMotorControl2(self.kukaUid,
8,
p.POSITION_CONTROL,
targetPosition=-fingerAngle,
force=self.fingerAForce)
p.setJointMotorControl2(self.kukaUid,
11,
p.POSITION_CONTROL,
targetPosition=fingerAngle,
force=self.fingerBForce)
p.setJointMotorControl2(self.kukaUid,
10,
p.POSITION_CONTROL,
targetPosition=0,
force=self.fingerTipForce)
p.setJointMotorControl2(self.kukaUid,
13,
p.POSITION_CONTROL,
targetPosition=0,
force=self.fingerTipForce)
else:
for action in range(len(motorCommands)):
motor = self.motorIndices[action]
p.setJointMotorControl2(self.kukaUid,
motor,
p.POSITION_CONTROL,
targetPosition=motorCommands[action],
force=self.maxForce)
| |
from astropy.io import fits
from astropy.table import Table
from astropy.time import Time
import astropy.units as u
import os
import numpy as np
from srttools.io import (
mkdir_p,
locations,
read_data_fitszilla,
get_chan_columns,
classify_chan_columns,
)
from srttools.utils import scantype, force_move_file, minmax, median_diff
from srttools.fit import detrend_spectroscopic_data
import warnings
from astropy import log
def default_scan_info_table():
return Table(
names=[
"scan_id",
"start",
"stop",
"ra_min",
"ra_max",
"ra_d",
"dec_min",
"dec_max",
"dec_d",
"az_min",
"az_max",
"az_d",
"el_min",
"el_max",
"el_d",
"glon_min",
"glon_max",
"glon_d",
"glat_min",
"glat_max",
"glat_d",
"is_skydip",
"kind",
"direction",
],
dtype=[
int,
float,
float,
float,
float,
float,
float,
float,
float,
float,
float,
float,
float,
float,
float,
float,
float,
float,
float,
float,
float,
bool,
"S10",
"S5",
],
)
def get_subscan_info(subscan):
info = default_scan_info_table()
scan_id = subscan.meta["SubScanID"]
start, stop = minmax(subscan["time"])
ramin, ramax = minmax(subscan["ra"])
decmin, decmax = minmax(subscan["dec"])
azmin, azmax = minmax(subscan["az"])
elmin, elmax = minmax(subscan["el"])
is_skydip = subscan.meta["is_skydip"]
d_ra = median_diff(subscan["ra"])
d_dec = median_diff(subscan["dec"])
d_az = median_diff(subscan["az"])
d_el = median_diff(subscan["el"])
ravar = (ramax - ramin) * np.cos(np.mean((decmin, decmax)))
decvar = decmax - decmin
azvar = (azmax - azmin) * np.cos(np.mean((elmin, elmax)))
elvar = elmax - elmin
tot_eq = np.sqrt(ravar ** 2 + decvar ** 2)
tot_hor = np.sqrt(elvar ** 2 + azvar ** 2)
ravar /= tot_eq
decvar /= tot_hor
directions = np.array(["ra", "dec", "az", "el"])
allvars = np.array([ravar, decvar, azvar, elvar])
if tot_eq > 2 * tot_hor:
kind = "point"
direction = ""
else:
kind = "line"
direction = directions[np.argmax(allvars)]
info.add_row(
[
scan_id,
start,
stop,
ramin,
ramax,
d_ra,
decmin,
decmax,
d_dec,
azmin,
azmax,
d_az,
elmin,
elmax,
d_el,
0,
0,
0,
0,
0,
0,
is_skydip,
kind,
direction,
]
)
return info
def format_direction(direction):
"""
Examples
--------
>>> format_direction('ra')
'ra'
>>> format_direction('el')
'alat'
>>> format_direction('az')
'alon'
"""
lowerdir = direction.lower()
if lowerdir == "el":
return "alat"
elif lowerdir == "az":
return "alon"
return direction
def get_observing_strategy_from_subscan_info(info):
"""Get observing strategy from subscan information."""
kinds = info["kind"]
skydips = info["is_skydip"]
lines = info[kinds == "line"]
points = info[kinds == "point"]
ctype = "RA/DEC"
durations = (info["stop"] - info["start"]) * 86400
xspc, yspc = (0, 0)
zigzag = False
stype = "MAP"
direction = "Unkn"
length = 0
if np.all(skydips):
stype = "SKYDIP"
mode = "OTF"
geom = "LINE"
direction = "ALAT"
elif len(lines) > len(points):
mode = "OTF"
ra_lines = lines[lines["direction"] == "ra"]
dec_lines = lines[lines["direction"] == "dec"]
az_lines = lines[lines["direction"] == "az"]
el_lines = lines[lines["direction"] == "el"]
directions = np.array(["ra", "dec", "az", "el"])
nsub = np.array(
[len(lines[lines["direction"] == d]) for d in directions]
)
direction = directions[np.argmax(nsub)]
if direction in ["ra", "dec"]:
lon_lines, dlon, lat_lines, dlat = ra_lines, "ra", dec_lines, "dec"
elif direction in ["az", "el"]:
lon_lines, dlon, lat_lines, dlat = az_lines, "az", el_lines, "el"
else:
raise ValueError("Unknown scan direction")
ctype = format_direction(dlon) + "/" + format_direction(dlat)
sample_dist_lon = lon_lines[dlon + "_d"]
sample_dist_lat = lon_lines[dlat + "_d"]
if len(lon_lines) == len(lat_lines):
geom = "CROSS"
zigzag = True
length = np.median(
lon_lines[dlon + "_max"] - lon_lines[dlon + "_min"]
)
elif len(lon_lines) > len(lat_lines):
geom = "LINE"
# if we see an inversion of direction, set zigzag to True
zigzag = np.any(sample_dist_lon[:-1] * sample_dist_lon[1:] < 0)
length = np.median(
lon_lines[dlon + "_max"] - lon_lines[dlon + "_min"]
)
direction = format_direction(dlon)
xspc = 0
yspc = median_diff(info[dlat + "_min"], sorting=True)
else:
geom = "LINE"
zigzag = np.any(sample_dist_lat[:-1] * sample_dist_lat[1:] < 0)
length = np.median(
lat_lines[dlat + "_max"] - lat_lines[dlat + "_min"]
)
direction = format_direction(dlat)
yspc = 0
xspc = median_diff(info[dlon + "_min"], sorting=True)
else:
mode = "RASTER"
geom = "SINGLE"
results = type("results", (), {})()
results.mode = mode
results.geom = geom
results.sep = (xspc, yspc)
results.zigzag = zigzag
results.length = length
results.type = stype
results.ctype = ctype
results.stype = stype
results.scanvel = length / np.median(durations)
results.direction = direction
results.nobs = len(info["scan_id"])
results.scantime = np.median(durations)
return results
def _copy_hdu_and_adapt_length(hdu, length):
data = hdu.data
columns = []
for col in data.columns:
newvals = [data[col.name][0]] * length
newcol = fits.Column(name=col.name, array=newvals, format=col.format)
columns.append(newcol)
newhdu = fits.BinTableHDU.from_columns(columns)
newhdu.header = hdu.header
return newhdu
keywords_to_reset = [
"11CD2F",
"11CD2I",
"11CD2J",
"11CD2R",
"11CD2S",
"1CRPX2F",
"1CRPX2I",
"1CRPX2J",
"1CRPX2R",
"1CRPX2S",
"1CRVL2F",
"1CRVL2I",
"1CRVL2J",
"1CRVL2R",
"1CRVL2S",
"1CTYP2F",
"1CTYP2I",
"1CTYP2J",
"1CTYP2R",
"1CTYP2S",
"1CUNI2F",
"1CUNI2I",
"1CUNI2J",
"1CUNI2R",
"1CUNI2S",
"1SOBS2F",
"1SOBS2I",
"1SOBS2J",
"1SOBS2R",
"1SOBS2S",
"1SPEC2F",
"1SPEC2I",
"1SPEC2J",
"1SPEC2R",
"1SPEC2S",
"1VSOU2R",
"AN",
"ANRX",
"AW",
"AWRX",
"BANDWID",
"BLATOBJ",
"BLONGOBJ",
"CA",
"CARX",
"DEWCABIN",
"DEWRTMOD",
"DEWUSER",
"DEWZERO",
"DISTANCE",
"ECCENTR",
"FDELTACA",
"FDELTAIA",
"FDELTAIE",
"FDELTAX",
"FDELTAXT",
"FDELTAY",
"FDELTAYT",
"FDELTAZ",
"FDELTAZT",
"FDTYPCOD",
"FEBEBAND",
"FEBEFEED",
"FEGAIN",
"FREQRES",
"FRTHRWHI",
"FRTHRWLO",
"GRPID1",
"GRPLC1",
"HACA",
"HACA2",
"HACA2RX",
"HACA3",
"HACA3RX",
"HACARX",
"HASA",
"HASA2",
"HASA2RX",
"HASARX",
"HECA2",
"HECA2RX",
"HECA3",
"HECA3RX",
"HECE",
"HECE2",
"HECE2RX",
"HECE6",
"HECE6RX",
"HECERX",
"HESA",
"HESA2",
"HESA2RX",
"HESA3",
"HESA3RX",
"HESA4",
"HESA4RX",
"HESA5",
"HESA5RX",
"HESARX",
"HESE",
"HESERX",
"HSCA",
"HSCA2",
"HSCA2RX",
"HSCA5",
"HSCA5RX",
"HSCARX",
"HSSA3",
"HSSA3RX",
"IA",
"IARX",
"IE",
"IERX",
"INCLINAT",
"LATOBJ",
"LONGASC",
"LONGOBJ",
"LONGSTRN",
"NFEBE",
"NOPTREFL",
"NPAE",
"NPAERX",
"NPHASES",
"NRX",
"NRXRX",
"NRY",
"NRYRX",
"NUSEBAND",
"OMEGA",
"OPTPATH",
"ORBEPOCH",
"ORBEQNOX",
"PATLAT",
"PATLONG",
"PDELTACA",
"PDELTAIA",
"PDELTAIE",
"PERIDATE",
"PERIDIST",
"REFOFFX",
"REFOFFY",
"REF_ONLN",
"REF_POL",
"RESTFREQ",
"SBSEP",
"SCANLEN",
"SCANLINE",
"SCANNUM",
"SCANPAR1",
"SCANPAR2",
"SCANROT",
"SCANRPTS",
"SCANSKEW",
"SCANTIME",
"SCANXSPC",
"SCANXVEL",
"SCANYSPC",
"SIDEBAND",
"SIG_ONLN",
"SIG_POL",
"SKYFREQ",
"SWTCHMOD",
"TBLANK",
"TRANSITI",
"TSYNC",
"WCSNM2F",
"WCSNM2I",
"WCSNM2J",
"WCSNM2R",
"WCSNM2S",
"WOBTHROW",
"WOBUSED",
]
def pack_data(scan, polar_dict, detrend=False):
"""Pack data into MBFITS-ready format
Examples
--------
>>> scan = {'Feed0_LCP': np.arange(4), 'Feed0_RCP': np.arange(4, 8)}
>>> polar = {'LCP': 'Feed0_LCP', 'RCP': 'Feed0_RCP'}
>>> res = pack_data(scan, polar)
>>> np.allclose(res, [[0, 4], [1, 5], [2, 6], [3, 7]])
True
>>> scan = {'Feed0_LCP': np.arange(2), 'Feed0_RCP': np.arange(2, 4),
... 'Feed0_Q': np.arange(4, 6), 'Feed0_U': np.arange(6, 8)}
>>> polar = {'LCP': 'Feed0_LCP', 'RCP': 'Feed0_RCP', 'Q': 'Feed0_Q',
... 'U': 'Feed0_U'}
>>> res = pack_data(scan, polar)
>>> np.allclose(res, [[0, 2, 4, 6], [1, 3, 5, 7]])
True
>>> scan = {'Feed0_LCP': np.ones((2, 4)), 'Feed0_RCP': np.zeros((2, 4))}
>>> polar = {'LCP': 'Feed0_LCP', 'RCP': 'Feed0_RCP'}
>>> res = pack_data(scan, polar)
>>> np.allclose(res, [[[ 1., 1., 1., 1.], [ 0., 0., 0., 0.]],
... [[ 1., 1., 1., 1.], [ 0., 0., 0., 0.]]])
True
"""
polar_list = list(polar_dict.keys())
if "LCP" in polar_list:
data = [scan[polar_dict["LCP"]], scan[polar_dict["RCP"]]]
try:
data.append(scan[polar_dict["Q"]])
data.append(scan[polar_dict["U"]])
except KeyError:
pass
else: # pragma: no cover
raise ValueError("Polarization kind not implemented yet")
if detrend:
new_data = []
for d in data:
detr, _ = detrend_spectroscopic_data(0, d, "als")
new_data.append(detr)
data = new_data
return np.stack(data, axis=1)
def reset_all_keywords(header):
"""Set a specific list of keywords to zero or empty string.
Examples
--------
>>> from astropy.io.fits import Header
>>> h = Header({'SCANNUM': 5, 'OPTPATH': 'dafafa', 'a': 'blabla'})
>>> h2 = reset_all_keywords(h)
>>> h2['SCANNUM']
0
>>> h2['OPTPATH']
''
>>> # This is not in the list of keywords to eliminate
>>> h2['a']
'blabla'
"""
for key in keywords_to_reset:
if key in header:
if isinstance(header[key], str):
header[key] = ""
else:
header[key] = type(header[key])(0)
return header
class MBFITS_creator:
def __init__(self, dirname, test=False):
self.dirname = dirname
self.test = test
mkdir_p(dirname)
curdir = os.path.dirname(__file__)
datadir = os.path.join(curdir, "..", "data")
self.template_dir = os.path.join(datadir, "mbfits_template")
self.FEBE = {}
self.GROUPING = "GROUPING.fits"
with fits.open(
os.path.join(self.template_dir, "GROUPING.fits"), memmap=False
) as grouping_template:
grouping_template[1].data = grouping_template[1].data[:1]
grouping_template.writeto(
os.path.join(self.dirname, self.GROUPING), overwrite=True
)
self.SCAN = "SCAN.fits"
with fits.open(
os.path.join(self.template_dir, "SCAN.fits"), memmap=False
) as scan_template:
scan_template[1].data["FEBE"][0] = "EMPTY"
scan_template.writeto(
os.path.join(self.dirname, self.SCAN), overwrite=True
)
self.date_obs = Time.now()
self.scan_info = default_scan_info_table()
self.nfeeds = None
self.ra = 0
self.dec = 0
self.site = None
self.lst = 1e32
def fill_in_summary(self, summaryfile):
log.info("Loading {}".format(summaryfile))
with fits.open(summaryfile, memmap=False) as hdul:
header = hdul[0].header
hdudict = dict(header.items())
self.ra = np.degrees(hdudict["RightAscension"])
self.dec = np.degrees(hdudict["Declination"])
self.restfreq = None
if "RESTFREQ1" in hdudict:
self.resfreq = hdudict["RESTFREQ1"]
try:
self.date_obs = Time(hdudict["DATE-OBS"])
except KeyError:
self.date_obs = Time(hdudict["DATE"])
try:
self.obsid = int(hdudict["OBSID"])
except (KeyError, ValueError):
self.obsid = 9999
with fits.open(
os.path.join(self.dirname, self.GROUPING), memmap=False
) as grouphdul:
groupheader = grouphdul[0].header
groupdict = dict(groupheader.items())
for key in hdudict.keys():
if key in groupdict:
groupheader[key] = hdudict[key]
groupheader["RA"] = self.ra
groupheader["DEC"] = self.dec
groupheader["DATE-OBS"] = self.date_obs.value
groupheader["MJD-OBS"] = self.date_obs.mjd
groupheader["SCANNUM"] = self.obsid
grouphdul.writeto("tmp.fits", overwrite=True)
force_move_file("tmp.fits", os.path.join(self.dirname, self.GROUPING))
with fits.open(
os.path.join(self.dirname, self.SCAN), memmap=False
) as scanhdul:
scanheader = reset_all_keywords(scanhdul[1].header)
scandict = dict(scanheader.items())
for key in hdudict.keys():
if key[:5] in ["NAXIS", "PGCOU", "GCOUN"]:
continue
if key in scandict:
scanheader[key] = hdudict[key]
# Todo: update with correct keywords
scanheader["DATE-OBS"] = self.date_obs.value
scanheader["MJD"] = self.date_obs.mjd
scanheader["SCANNUM"] = self.obsid
scanhdul.writeto("tmp.fits", overwrite=True)
force_move_file("tmp.fits", os.path.join(self.dirname, self.SCAN))
def add_subscan(self, scanfile, detrend=False):
log.info("Loading {}".format(scanfile))
subscan = read_data_fitszilla(scanfile)
subscan_info = get_subscan_info(subscan)
self.scan_info.add_row(subscan_info[0])
time = Time(subscan["time"] * u.day, scale="utc", format="mjd")
if self.date_obs.mjd > time[0].mjd:
self.date_obs = time[0]
if self.site is None:
self.site = subscan.meta["site"]
chans = get_chan_columns(subscan)
combinations = classify_chan_columns(chans)
if self.nfeeds is None:
self.nfeeds = len(combinations.keys())
for feed in combinations:
felabel = subscan.meta["receiver"] + "{}".format(feed)
febe = felabel + "-" + subscan.meta["backend"]
datapar = os.path.join(
self.template_dir, "1", "FLASH460L-XFFTS-DATAPAR.fits"
)
with fits.open(datapar, memmap=False) as subs_par_template:
n = len(subscan)
# ------------- Update DATAPAR --------------
subs_par_template[1] = _copy_hdu_and_adapt_length(
subs_par_template[1], n
)
newtable = Table(subs_par_template[1].data)
newtable["MJD"] = subscan["time"]
newtable["LST"][:] = time.sidereal_time(
"apparent", locations[subscan.meta["site"]].lon
).value
if newtable["LST"][0] < self.lst:
self.lst = newtable["LST"][0]
newtable["INTEGTIM"][:] = subscan["Feed0_LCP"].meta[
"sample_rate"
]
newtable["RA"] = subscan["ra"].to(u.deg)
newtable["DEC"] = subscan["dec"].to(u.deg)
newtable["AZIMUTH"] = subscan["az"].to(u.deg)
newtable["ELEVATIO"] = subscan["el"].to(u.deg)
_, direction = scantype(
subscan["ra"],
subscan["dec"],
el=subscan["el"],
az=subscan["az"],
)
direction_cut = (
direction.replace("<", "").replace(">", "").lower()
)
if direction_cut in ["ra", "dec"]:
baslon = subscan["ra"].to(u.deg)
baslat = subscan["dec"].to(u.deg)
yoff = baslat.value - self.dec
# GLS projection
xoff = baslon.value - self.ra
newtable["LONGOFF"] = xoff * np.cos(np.radians(self.dec))
newtable["LATOFF"] = yoff
elif direction_cut in ["el", "az"]:
warnings.warn("AltAz projection not implemented properly")
baslon, baslat = (
subscan["az"].to(u.deg),
subscan["el"].to(u.deg),
)
newtable["LONGOFF"] = 0 * u.deg
newtable["LATOFF"] = 0 * u.deg
else:
raise ValueError("Unknown coordinates")
newtable["CBASLONG"] = baslon
newtable["CBASLAT"] = baslat
newtable["BASLONG"] = baslon
newtable["BASLAT"] = baslat
newhdu = fits.table_to_hdu(newtable)
subs_par_template[1].data = newhdu.data
subs_par_template[1].header["DATE-OBS"] = time[0].fits.replace(
"(UTC)", ""
)
subs_par_template[1].header["LST"] = newtable["LST"][0]
subs_par_template[1].header["FEBE"] = febe
subs_par_template[1].header["SCANDIR"] = format_direction(
direction_cut
).upper()
subs_par_template[1].header["SCANNUM"] = self.obsid
outdir = str(subscan.meta["SubScanID"])
mkdir_p(os.path.join(self.dirname, outdir))
new_datapar = os.path.join(outdir, febe + "-DATAPAR.fits")
subs_par_template.writeto("tmp.fits", overwrite=True)
force_move_file(
"tmp.fits", os.path.join(self.dirname, new_datapar)
)
arraydata = os.path.join(
self.template_dir, "1", "FLASH460L-XFFTS-ARRAYDATA-1.fits"
)
new_arraydata_rows = []
bands = list(combinations[feed].keys())
for baseband in combinations[feed]:
nbands = np.max(bands)
ch = list(combinations[feed][baseband].values())[0]
packed_data = pack_data(
subscan, combinations[feed][baseband], detrend=detrend
)
# ------------- Update ARRAYDATA -------------
with fits.open(arraydata, memmap=False) as subs_template:
subs_template[1] = _copy_hdu_and_adapt_length(
subs_template[1], n
)
new_header = reset_all_keywords(subs_template[1].header)
new_header["SCANNUM"] = self.obsid
new_header["SUBSNUM"] = subscan.meta["SubScanID"]
new_header["DATE-OBS"] = self.date_obs.fits
new_header["FEBE"] = febe
new_header["BASEBAND"] = baseband
new_header["NUSEBAND"] = nbands
new_header["CHANNELS"] = subscan.meta["channels"]
new_header["SKYFREQ"] = (
subscan[ch].meta["frequency"].to("Hz").value
)
if self.restfreq is not None:
new_header["RESTFREQ"] = self.restfreq
else:
new_header["RESTFREQ"] = new_header["SKYFREQ"]
bandwidth = subscan[ch].meta["bandwidth"].to("Hz").value
new_header["BANDWID"] = bandwidth
new_header["FREQRES"] = bandwidth / new_header["CHANNELS"]
# Todo: check sideband
new_header["SIDEBAND"] = "USB"
# Todo: check all these strange keywords. These are
# probably NOT the rest frequencies!
new_header["1CRVL2F"] = new_header["RESTFREQ"]
new_header["1CRVL2S"] = new_header["RESTFREQ"]
for i in ["1CRPX2S", "1CRPX2R", "1CRPX2F", "1CRPX2J"]:
new_header[i] = (new_header["CHANNELS"] + 1) // 2
subs_template[1].header = new_header
newtable = Table(subs_template[1].data)
newtable["MJD"] = subscan["time"]
newtable["DATA"] = packed_data
newhdu = fits.table_to_hdu(newtable)
subs_template[1].data = newhdu.data
subname = febe + "-ARRAYDATA-{}.fits".format(baseband)
new_sub = os.path.join(outdir, subname)
subs_template.writeto("tmp.fits", overwrite=True)
new_arraydata_rows.append(
[
2,
new_sub,
"URL",
"ARRAYDATA-MBFITS",
subscan.meta["SubScanID"],
febe,
baseband,
]
)
force_move_file(
"tmp.fits", os.path.join(self.dirname, new_sub)
)
# Finally, update GROUPING file
with fits.open(
os.path.join(self.dirname, self.GROUPING), memmap=False
) as grouping:
newtable = Table(grouping[1].data)
if febe not in self.FEBE:
nfebe = len(list(self.FEBE.keys()))
new_febe = self.add_febe(
febe, combinations, feed, subscan[ch].meta, bands=bands
)
grouping[0].header["FEBE{}".format(nfebe)] = febe
grouping[0].header["FREQ{}".format(nfebe)] = (
subscan[ch].meta["frequency"].to("Hz").value
)
grouping[0].header["BWID{}".format(nfebe)] = (
subscan[ch].meta["bandwidth"].to("Hz").value
)
grouping[0].header["LINE{}".format(nfebe)] = ""
newtable.add_row(
[
2,
new_febe,
"URL",
"FEBEPAR-MBFITS",
-999,
febe,
-999,
]
)
self.FEBE[febe] = new_febe
newtable.add_row(
[2, new_datapar, "URL", "DATAPAR-MBFITS", -999, febe, -999]
)
for row in new_arraydata_rows:
newtable.add_row(row)
new_hdu = fits.table_to_hdu(newtable)
grouping[1].data = new_hdu.data
grouping[0].header["INSTRUME"] = subscan[ch].meta["backend"]
grouping[0].header["TELESCOP"] = self.site
grouping.writeto("tmp.fits", overwrite=True)
force_move_file(
"tmp.fits", os.path.join(self.dirname, self.GROUPING)
)
if self.test:
break
def add_febe(self, febe, feed_info, feed, meta, bands=None):
if bands is None:
bands = [1]
polar = "N"
polar_code = polar[0]
febe_name = febe + "-FEBEPAR.fits"
with fits.open(
os.path.join(self.template_dir, "FLASH460L-XFFTS-FEBEPAR.fits"),
memmap=False,
) as febe_template:
febe_template[1].header = reset_all_keywords(
febe_template[1].header
)
febedata = Table(febe_template[1].data)
# FEBEFEED stores the total number of feeds for the receiver in
# use. A receiver outputting two polarisations counts as two
# feeds. For an array, count the total no. of pixels, even if
# not all in use.
febedata["USEBAND"] = np.array([bands])
febedata["NUSEFEED"] = np.array([[2]])
febedata["USEFEED"] = np.array(
[[feed * 2 + 1, feed * 2 + 2, feed * 2 + 1, feed * 2 + 2]]
)
febedata["BESECTS"] = np.array([[0]])
febedata["FEEDTYPE"] = np.array([[1, 2, 3, 4]])
febedata["POLTY"][:] = np.array([polar_code])
febedata["POLA"][:] = np.array([[0.0, 0.0]])
new_hdu = fits.table_to_hdu(febedata)
febe_template[1].data = new_hdu.data
# TODO: fill in the information given in the subscan[ch]
new_febe = os.path.join(self.dirname, febe_name)
febe_template[1].header["DATE-OBS"] = self.date_obs.fits
febe_template[1].header["FEBE"] = febe
febe_template[1].header["FEBEFEED"] = self.nfeeds * 2
febe_template[1].header["NUSEBAND"] = max(bands)
febe_template[1].header["NPHASES"] = 1
febe_template[1].header["SWTCHMOD"] = "NONE"
febe_template[1].header["SCANNUM"] = self.obsid
if "Q" in feed_info[feed][bands[0]].keys():
febe_template[1].header["FDTYPCOD"] = "1:L, 2:R, 3:Q, 4:U"
else:
febe_template[1].header["FDTYPCOD"] = "1:L, 2:R"
febe_template.writeto("tmp.fits", overwrite=True)
force_move_file("tmp.fits", new_febe)
with fits.open(
os.path.join(self.dirname, self.SCAN), memmap=False
) as scan:
newtable = Table(scan[1].data)
if newtable["FEBE"][0].strip() == "EMPTY":
newtable["FEBE"][0] = febe
else:
newtable.add_row([febe])
new_hdu = fits.table_to_hdu(newtable)
scan[1].data = new_hdu.data
scanheader = scan[1].header
scanheader["SITELONG"] = np.degrees(meta["SiteLongitude"])
scanheader["SITELAT"] = np.degrees(meta["SiteLatitude"])
scanheader["SITEELEV"] = meta["SiteHeight"]
diameter = 64.0 if meta["site"].lower().strip() == "srt" else 32.0
scanheader["DIAMETER"] = diameter
scanheader["PROJID"] = meta["Project_Name"]
scan.writeto("tmp.fits", overwrite=True)
force_move_file("tmp.fits", os.path.join(self.dirname, self.SCAN))
return febe_name
def update_scan_info(self):
info = get_observing_strategy_from_subscan_info(self.scan_info)
with fits.open(
os.path.join(self.dirname, self.SCAN), memmap=False
) as scanhdul:
scanheader = scanhdul[1].header
# Todo: update with correct keywords
scanheader["CTYPE"] = info.ctype
scanheader["CTYPE1"] = "RA---GLS"
scanheader["CTYPE2"] = "DEC--GLS"
scanheader["CRVAL1"] = self.ra
scanheader["CRVAL2"] = self.dec
scanheader["BLONGOBJ"] = self.ra
scanheader["BLATOBJ"] = self.dec
scanheader["LONGOBJ"] = self.ra if not info.ctype[0] == "A" else 0
scanheader["LATOBJ"] = self.dec if not info.ctype[0] == "A" else 0
scanheader["EQUINOX"] = 2000.0
scanheader["GRPLC1"] = "GROUPING.fits"
scanheader["LST"] = self.lst
scanheader["LATPOLE"] = 90.0
scanheader["LONPOLE"] = 0.0
scanheader["PATLONG"] = 0
scanheader["MOVEFRAM"] = False
if info.ctype == "ALON/ALAT":
scanheader["WCSNAME"] = "Absolute horizontal"
scanheader["SCANTYPE"] = info.stype.upper()
scanheader["SCANDIR"] = info.direction.upper()
scanheader["SCANXVEL"] = info.scanvel
scanheader["SCANTIME"] = info.scantime
scanheader["SCANMODE"] = info.mode.upper()
scanheader["SCANGEOM"] = info.geom.upper()
scanheader["SCANLINE"] = 1
scanheader["SCANLEN"] = np.degrees(info.length)
scanheader["SCANYSPC"] = np.degrees(info.sep[1])
scanheader["SCANXSPC"] = np.degrees(info.sep[0])
scanheader["SCANPAR1"] = -999
scanheader["SCANPAR2"] = -999
scanheader["ZIGZAG"] = info.zigzag
scanheader["PHASE1"] = "sig"
scanheader["PHASE2"] = "sig"
scanheader["NOBS"] = info.nobs
scanheader["NSUBS"] = info.nobs
scanheader["WOBCYCLE"] = 0.0
scanheader["WOBDIR"] = "NONE"
scanheader["WOBMODE"] = "NONE"
scanheader["WOBPATT"] = "NONE"
scanhdul.writeto("tmp.fits", overwrite=True)
force_move_file("tmp.fits", os.path.join(self.dirname, self.SCAN))
def wrap_up_file(self):
import copy
prihdu = fits.PrimaryHDU()
with fits.open(
os.path.join(self.dirname, self.GROUPING), memmap=False
) as grouhdl:
prihdu.header = copy.deepcopy(grouhdl[0].header)
file_list = list(
zip(
grouhdl[1].data["MEMBER_LOCATION"],
grouhdl[1].data["EXTNAME"],
grouhdl[1].data["FEBE"],
)
)
hdulists = {}
for febe in self.FEBE.keys():
hdulists[febe] = fits.HDUList([prihdu])
with fits.open(
os.path.join(self.dirname, self.SCAN), memmap=False
) as scanhdul:
scanhdul[1].data["FEBE"] = [febe]
newhdu = type(scanhdul[1])()
newhdu.data = scanhdul[1].data
newhdu.header = scanhdul[1].header
hdulists[febe].append(newhdu)
for fname, ext, febe in file_list:
if febe == "":
continue
with fits.open(
os.path.join(self.dirname, fname), memmap=False
) as hl:
newhdu = type(hl[ext])()
newhdu.data = hl[ext].data
newhdu.header = hl[ext].header
hdulists[febe].append(newhdu)
fnames = {}
for febe, hdulist in hdulists.items():
fname = self.dirname + "." + febe + ".fits"
hdulist.writeto(fname, overwrite=True)
hdulist.close()
fnames[febe] = fname
return fnames
| |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import sys
from collections import namedtuple
from twitter.common.collections import OrderedSet
from pants.option.scope import GLOBAL_SCOPE, GLOBAL_SCOPE_CONFIG_SECTION, ScopeInfo
from pants.util.meta import AbstractClass
# TODO: Switch all clients to reference pants.option.scope directly.
GLOBAL_SCOPE = GLOBAL_SCOPE
GLOBAL_SCOPE_CONFIG_SECTION = GLOBAL_SCOPE_CONFIG_SECTION
class ArgSplitterError(Exception):
pass
class SplitArgs(namedtuple('SplitArgs',
['goals', 'scope_to_flags', 'targets', 'passthru', 'passthru_owner'])):
"""The result of splitting args.
goals: A list of explicitly specified goals.
scope_to_flags: An ordered map from scope name to the list of flags belonging to that scope.
The global scope is specified as an empty string.
Keys are in the order encountered in the args.
targets: A list of target specs.
passthru: Any remaining args specified after a -- separator.
passthru_owner: The scope specified last on the command line, if any. None otherwise.
"""
pass
class HelpRequest(AbstractClass):
"""Represents an implicit or explicit request for help by the user."""
pass
class OptionsHelp(HelpRequest):
def __init__(self, advanced=False, all_scopes=False):
"""The user requested help for cmd-line options.
:param advanced: Did the user ask for advanced help (e.g., using --help-advanced).
:param all_scopes: Did the user ask for help for all goals and tasks (e.g., using --help-all).
"""
super(OptionsHelp, self).__init__()
self.advanced = advanced
self.all_scopes = all_scopes
class VersionHelp(HelpRequest):
"""The user asked for the version of this instance of pants."""
pass
class UnknownGoalHelp(HelpRequest):
"""The user specified an unknown goal (or task)."""
def __init__(self, unknown_goals):
super(UnknownGoalHelp, self).__init__()
self.unknown_goals = unknown_goals
class NoGoalHelp(HelpRequest):
"""The user specified no goals."""
pass
class ArgSplitter(object):
"""Splits a command-line into scoped sets of flags, and a set of targets.
Recognizes, e.g.:
./pants goal -x compile --foo compile.java -y target1 target2
./pants -x compile --foo compile.java -y -- target1 target2
./pants -x compile target1 target2 --compile-java-flag
./pants -x --compile-java-flag compile target1 target2
Handles help and version args specially.
"""
_HELP_BASIC_ARGS = ('-h', '--help', 'help')
_HELP_ADVANCED_ARGS = ('--help-advanced', 'help-advanced')
_HELP_ALL_SCOPES_ARGS = ('--help-all', 'help-all')
_HELP_VERSION_ARGS = ('-v', '-V', '--version')
_HELP_ARGS = _HELP_BASIC_ARGS + _HELP_ADVANCED_ARGS + _HELP_ALL_SCOPES_ARGS + _HELP_VERSION_ARGS
def __init__(self, known_scope_infos):
self._known_scope_infos = known_scope_infos
# TODO: Get rid of our reliance on known scopes here. We don't really need it now
# that we heuristically identify target specs based on it containing /, : or being
# a top-level directory.
self._known_scopes = (set([si.scope for si in known_scope_infos]) |
{'help', 'help-advanced', 'help-all'})
self._unknown_scopes = []
self._unconsumed_args = [] # In reverse order, for efficient popping off the end.
self._help_request = None # Will be set if we encounter any help flags.
# For convenience, and for historical reasons, we allow --scope-flag-name anywhere on the
# cmd line, as an alternative to ... scope --flag-name.
# We check for prefixes in reverse order, so we match the longest prefix first.
sorted_scope_infos = sorted(filter(lambda si: si.scope, self._known_scope_infos),
key=lambda si: si.scope, reverse=True)
# List of pairs (prefix, ScopeInfo).
self._known_scoping_prefixes = [('{0}-'.format(si.scope.replace('.', '-')), si)
for si in sorted_scope_infos]
@property
def help_request(self):
return self._help_request
def _check_for_help_request(self, arg):
if not arg in self._HELP_ARGS:
return False
if arg in self._HELP_VERSION_ARGS:
self._help_request = VersionHelp()
else:
# First ensure that we have a basic OptionsHelp.
if not self._help_request:
self._help_request = OptionsHelp()
# Now see if we need to enhance it.
if isinstance(self._help_request, OptionsHelp):
advanced = self._help_request.advanced or arg in self._HELP_ADVANCED_ARGS
all_scopes = self._help_request.all_scopes or arg in self._HELP_ALL_SCOPES_ARGS
self._help_request = OptionsHelp(advanced=advanced, all_scopes=all_scopes)
return True
def split_args(self, args=None):
"""Split the specified arg list (or sys.argv if unspecified).
args[0] is ignored.
Returns a SplitArgs tuple.
"""
goals = OrderedSet()
scope_to_flags = {}
def add_scope(s):
# Force the scope to appear, even if empty.
if s not in scope_to_flags:
scope_to_flags[s] = []
targets = []
passthru = []
passthru_owner = None
self._unconsumed_args = list(reversed(sys.argv if args is None else args))
# In regular use the first token is the binary name, so skip it. However tests may
# pass just a list of flags, so don't skip it in that case.
if not self._at_flag() and self._unconsumed_args:
self._unconsumed_args.pop()
if self._unconsumed_args and self._unconsumed_args[-1] == 'goal':
# TODO: Temporary warning. Eventually specifying 'goal' will be an error.
print("WARNING: Specifying 'goal' explicitly is no longer necessary, and deprecated.",
file=sys.stderr)
self._unconsumed_args.pop()
def assign_flag_to_scope(flag, default_scope):
flag_scope, descoped_flag = self._descope_flag(flag, default_scope=default_scope)
if flag_scope not in scope_to_flags:
scope_to_flags[flag_scope] = []
scope_to_flags[flag_scope].append(descoped_flag)
global_flags = self._consume_flags()
add_scope(GLOBAL_SCOPE)
for flag in global_flags:
assign_flag_to_scope(flag, GLOBAL_SCOPE)
scope, flags = self._consume_scope()
while scope:
if not self._check_for_help_request(scope.lower()):
add_scope(scope)
goals.add(scope.partition('.')[0])
passthru_owner = scope
for flag in flags:
assign_flag_to_scope(flag, scope)
scope, flags = self._consume_scope()
while self._unconsumed_args and not self._at_double_dash():
arg = self._unconsumed_args.pop()
if arg.startswith(b'-'):
# We assume any args here are in global scope.
if not self._check_for_help_request(arg):
assign_flag_to_scope(arg, GLOBAL_SCOPE)
elif os.path.sep in arg or ':' in arg or os.path.isdir(arg):
targets.append(arg)
elif arg not in self._known_scopes:
self._unknown_scopes.append(arg)
if self._at_double_dash():
self._unconsumed_args.pop()
passthru = list(reversed(self._unconsumed_args))
if self._unknown_scopes:
self._help_request = UnknownGoalHelp(self._unknown_scopes)
if not goals and not self._help_request:
self._help_request = NoGoalHelp()
return SplitArgs(goals, scope_to_flags, targets, passthru, passthru_owner if passthru else None)
def _consume_scope(self):
"""Returns a pair (scope, list of flags encountered in that scope).
Note that the flag may be explicitly scoped, and therefore not actually belong to this scope.
For example, in:
./pants --compile-java-partition-size-hint=100 compile <target>
--compile-java-partition-size-hint should be treated as if it were --partition-size-hint=100
in the compile.java scope.
"""
if not self._at_scope():
return None, []
scope = self._unconsumed_args.pop()
flags = self._consume_flags()
return scope, flags
def _consume_flags(self):
"""Read flags until we encounter the first token that isn't a flag."""
flags = []
while self._at_flag():
flag = self._unconsumed_args.pop()
if not self._check_for_help_request(flag):
flags.append(flag)
return flags
def _descope_flag(self, flag, default_scope):
"""If the flag is prefixed by its scope, in the old style, extract the scope.
Otherwise assume it belongs to default_scope.
returns a pair (scope, flag).
"""
for scope_prefix, scope_info in self._known_scoping_prefixes:
for flag_prefix in ['--', '--no-']:
prefix = flag_prefix + scope_prefix
if flag.startswith(prefix):
scope = scope_info.scope
if scope_info.category == ScopeInfo.SUBSYSTEM and default_scope != GLOBAL_SCOPE:
# We allow goal.task --subsystem-foo to refer to the task-level subsystem instance,
# i.e., as if qualified by --subsystem-goal-task-foo.
# Note that this means that we can't set a task option on the cmd-line if its
# name happens to start with a subsystem scope.
# TODO: Either fix this or at least detect such options and warn.
task_subsystem_scope = '{}.{}'.format(scope_info.scope, default_scope)
if task_subsystem_scope in self._known_scopes: # Such a task subsystem actually exists.
scope = task_subsystem_scope
return scope, flag_prefix + flag[len(prefix):]
return default_scope, flag
def _at_flag(self):
return (self._unconsumed_args and
self._unconsumed_args[-1].startswith(b'-') and
not self._at_double_dash())
def _at_scope(self):
return self._unconsumed_args and self._unconsumed_args[-1] in self._known_scopes
def _at_double_dash(self):
return self._unconsumed_args and self._unconsumed_args[-1] == b'--'
| |
import json
from ichnaea.conftest import GB_LAT, GB_LON, GB_MCC
from ichnaea.models import (
BlueObservation,
BlueReport,
CellObservation,
CellReport,
constants,
Radio,
Report,
ReportSource,
WifiObservation,
WifiReport,
)
from ichnaea.tests.factories import (
BlueObservationFactory,
CellObservationFactory,
WifiObservationFactory,
)
class BaseTest(object):
def compare(self, name, value, expect):
assert self.sample(**{name: value})[name] == expect
class TestReport(BaseTest):
def sample(self, **kwargs):
report = {"lat": GB_LAT, "lon": GB_LON}
for (k, v) in kwargs.items():
report[k] = v
return Report.validate(report)
def test_latlon(self):
assert self.sample(lat=GB_LAT, lon=GB_LON) is not None
assert self.sample(lat=0.0, lon=0.0) is None
assert self.sample(lat=GB_LAT, lon=None) is None
def test_accuracy(self):
field = "accuracy"
self.compare(field, constants.MIN_ACCURACY - 0.1, None)
self.compare(field, 0.0, 0.0)
self.compare(field, 10.2, 10.2)
self.compare(field, constants.MAX_ACCURACY + 0.1, None)
def test_altitude(self):
field = "altitude"
self.compare(field, constants.MIN_ALTITUDE - 0.1, None)
self.compare(field, -100.0, -100.0)
self.compare(field, 0.0, 0.0)
self.compare(field, 10.1, 10.1)
self.compare(field, constants.MAX_ALTITUDE + 0.1, None)
def test_altitude_accuracy(self):
field = "altitude_accuracy"
self.compare(field, constants.MIN_ALTITUDE_ACCURACY - 0.1, None)
self.compare(field, 0.0, 0.0)
self.compare(field, 10.2, 10.2)
self.compare(field, constants.MAX_ALTITUDE_ACCURACY + 0.1, None)
def test_heading(self):
field = "heading"
self.compare(field, constants.MIN_HEADING - 0.1, None)
self.compare(field, 0.0, 0.0)
self.compare(field, 357.2, 357.2)
self.compare(field, constants.MAX_HEADING + 0.1, None)
def test_pressure(self):
field = "pressure"
self.compare(field, constants.MIN_PRESSURE - 0.1, None)
self.compare(field, 870.1, 870.1)
self.compare(field, 1080.2, 1080.2)
self.compare(field, constants.MAX_PRESSURE + 0.1, None)
def test_source(self):
field = "source"
for source in (
ReportSource.fixed,
ReportSource.gnss,
ReportSource.fused,
ReportSource.query,
):
self.compare(field, source, source)
self.compare(field, "gnss", ReportSource.gnss)
def test_speed(self):
field = "speed"
self.compare(field, constants.MIN_SPEED - 0.1, None)
self.compare(field, 0.0, 0.0)
self.compare(field, 100.1, 100.1)
self.compare(field, constants.MAX_SPEED + 0.1, None)
def test_timestamp(self):
field = "timestamp"
self.compare(field, constants.MIN_TIMESTAMP - 1, None)
self.compare(field, 1405602028568, 1405602028568)
self.compare(field, constants.MAX_TIMESTAMP + 1, None)
class TestBlueObservation(BaseTest):
def test_fields(self):
mac = "3680873e9b83"
obs = BlueObservation.create(
mac=mac,
lat=GB_LAT,
lon=GB_LON,
pressure=1010.2,
source="fixed",
timestamp=1405602028568,
signal=-45,
)
assert obs.lat == GB_LAT
assert obs.lon == GB_LON
assert obs.mac == mac
assert obs.pressure == 1010.2
assert obs.signal == -45
assert obs.source is ReportSource.fixed
assert obs.timestamp == 1405602028568
assert obs.shard_id == "8"
def test_json(self):
obs = BlueObservationFactory.build(accuracy=None, source=ReportSource.gnss)
result = BlueObservation.from_json(json.loads(json.dumps(obs.to_json())))
assert type(result) is BlueObservation
assert result.accuracy is None
assert result.mac == obs.mac
assert result.lat == obs.lat
assert result.lon == obs.lon
assert result.source is ReportSource.gnss
assert type(result.source) is ReportSource
def test_weight(self):
obs_factory = BlueObservationFactory.build
assert round(obs_factory(accuracy=None).weight, 2) == 1.0
assert round(obs_factory(accuracy=0.0).weight, 2) == 1.0
assert round(obs_factory(accuracy=10.0).weight, 2) == 1.0
assert round(obs_factory(accuracy=40.0).weight, 2) == 0.5
assert round(obs_factory(accuracy=100.0).weight, 2) == 0.32
assert round(obs_factory(accuracy=100.1).weight, 2) == 0.0
assert round(obs_factory(accuracy=None, age=1000).weight, 2) == 1.0
assert round(obs_factory(accuracy=None, age=8000).weight, 2) == 0.5
assert round(obs_factory(accuracy=None, age=20001).weight, 2) == 0.0
assert round(obs_factory(accuracy=None, speed=None).weight, 2) == 1.0
assert round(obs_factory(accuracy=None, speed=0.0).weight, 2) == 1.0
assert round(obs_factory(accuracy=None, speed=1.0).weight, 2) == 1.0
assert round(obs_factory(accuracy=None, speed=20.0).weight, 2) == 0.5
assert round(obs_factory(accuracy=None, speed=51.0).weight, 2) == 0.0
class TestBlueReport(BaseTest):
def sample(self, **kwargs):
report = {"mac": "3680873e9b83"}
for (k, v) in kwargs.items():
report[k] = v
return BlueReport.validate(report)
def test_mac(self):
assert self.sample(mac="3680873e9b83") is not None
assert self.sample(mac="") is None
assert self.sample(mac="1234567890123") is None
assert self.sample(mac="aaaaaaZZZZZZ") is None
def test_age(self):
field = "age"
self.compare(field, constants.MIN_AGE - 1, None)
self.compare(field, -40000, -40000)
self.compare(field, 60000, 60000)
self.compare(field, constants.MAX_AGE + 1, None)
def test_signal(self):
field = "signal"
self.compare(field, constants.MIN_BLUE_SIGNAL - 1, None)
self.compare(field, -90, -90)
self.compare(field, -10, -10)
self.compare(field, constants.MAX_BLUE_SIGNAL + 1, None)
class TestCellObservation(BaseTest):
def test_fields(self):
obs = CellObservation.create(
radio=Radio.gsm,
mcc=GB_MCC,
mnc=5,
lac=12345,
cid=23456,
lat=GB_LAT,
lon=GB_LON,
pressure=1010.2,
source="gnss",
timestamp=1405602028568,
asu=26,
signal=-61,
ta=10,
)
assert obs.lat == GB_LAT
assert obs.lon == GB_LON
assert obs.pressure == 1010.2
assert obs.source == ReportSource.gnss
assert obs.timestamp == 1405602028568
assert obs.radio == Radio.gsm
assert obs.mcc == GB_MCC
assert obs.mnc == 5
assert obs.lac == 12345
assert obs.cid == 23456
assert obs.asu == 26
assert obs.signal == -61
assert obs.ta == 10
assert obs.shard_id == "gsm"
def test_mcc_latlon(self):
sample = dict(radio=Radio.gsm, mnc=6, lac=1, cid=2, lat=GB_LAT, lon=GB_LON)
assert CellObservation.create(mcc=GB_MCC, **sample) is not None
assert CellObservation.create(mcc=262, **sample) is None
def test_json(self):
obs = CellObservationFactory.build(accuracy=None, source="fixed")
result = CellObservation.from_json(json.loads(json.dumps(obs.to_json())))
assert type(result) is CellObservation
assert result.accuracy is None
assert type(result.radio), Radio
assert result.radio == obs.radio
assert result.mcc == obs.mcc
assert result.mnc == obs.mnc
assert result.lac == obs.lac
assert result.cid == obs.cid
assert result.lat == obs.lat
assert result.lon == obs.lon
assert result.source is ReportSource.fixed
assert type(result.source) is ReportSource
def test_weight(self):
obs_factory = CellObservationFactory.build
assert (
round(obs_factory(radio=Radio.gsm, accuracy=None, signal=-95).weight, 2)
== 1.0
)
assert (
round(obs_factory(radio=Radio.gsm, accuracy=0.0, signal=-95).weight, 2)
== 1.0
)
assert (
round(obs_factory(radio=Radio.gsm, accuracy=10.0, signal=-95).weight, 2)
== 1.0
)
assert (
round(obs_factory(radio=Radio.gsm, accuracy=160, signal=-95).weight, 2)
== 0.25
)
assert (
round(obs_factory(radio=Radio.gsm, accuracy=200, signal=-95).weight, 2)
== 0.22
)
assert (
round(obs_factory(radio=Radio.gsm, accuracy=1000, signal=-95).weight, 2)
== 0.1
)
assert (
round(obs_factory(radio=Radio.gsm, accuracy=1000.1, signal=-95).weight, 2)
== 0.0
)
assert (
round(obs_factory(radio=Radio.gsm, accuracy=10.0, signal=-51).weight, 2)
== 10.17
)
assert (
round(obs_factory(radio=Radio.gsm, accuracy=160.0, signal=-51).weight, 2)
== 2.54
)
assert (
round(obs_factory(radio=Radio.gsm, accuracy=10.0, signal=-113).weight, 2)
== 0.52
)
assert (
round(obs_factory(radio=Radio.wcdma, accuracy=10.0, signal=-25).weight, 2)
== 256.0
)
assert (
round(obs_factory(radio=Radio.wcdma, accuracy=160.0, signal=-25).weight, 2)
== 64.0
)
assert (
round(obs_factory(radio=Radio.wcdma, accuracy=10.0, signal=-121).weight, 2)
== 0.47
)
assert (
round(obs_factory(radio=Radio.lte, accuracy=10.0, signal=-43).weight, 2)
== 47.96
)
assert (
round(obs_factory(radio=Radio.lte, accuracy=160.0, signal=-43).weight, 2)
== 11.99
)
assert (
round(obs_factory(radio=Radio.lte, accuracy=10.0, signal=-140).weight, 2)
== 0.3
)
assert round(obs_factory(accuracy=0, age=1000).weight, 2) == 1.0
assert round(obs_factory(accuracy=0, age=8000).weight, 2) == 0.5
assert round(obs_factory(accuracy=0, age=20001).weight, 2) == 0.0
assert round(obs_factory(accuracy=0, speed=None).weight, 2) == 1.0
assert round(obs_factory(accuracy=0, speed=0.0).weight, 2) == 1.0
assert round(obs_factory(accuracy=0, speed=1.0).weight, 2) == 1.0
assert round(obs_factory(accuracy=0, speed=20.0).weight, 2) == 0.5
assert round(obs_factory(accuracy=0, speed=50.1).weight, 2) == 0.0
class TestCellReport(BaseTest):
def sample(self, **kwargs):
report = {"radio": Radio.gsm, "mcc": GB_MCC, "mnc": 1, "lac": 2, "cid": 3}
for (k, v) in kwargs.items():
report[k] = v
return CellReport.validate(report)
def test_cellid(self):
assert self.sample() is not None
assert self.sample(radio=None) is None
assert self.sample(mcc=None) is None
assert self.sample(mnc=None) is None
assert self.sample(lac=None) is None
assert self.sample(cid=None) is None
def test_radio(self):
field = "radio"
self.compare(field, "gsm", Radio.gsm)
self.compare(field, "wcdma", Radio.wcdma)
self.compare(field, "lte", Radio.lte)
assert self.sample(radio="cdma") is None
assert self.sample(radio="hspa") is None
assert self.sample(radio="wimax") is None
def test_mcc(self):
self.compare("mcc", 262, 262)
assert self.sample(mcc=constants.MIN_MCC - 1) is None
assert self.sample(mcc=constants.MAX_MCC + 1) is None
def test_mnc(self):
self.compare("mnc", 5, 5)
assert self.sample(mnc=constants.MIN_MNC - 1) is None
assert self.sample(mnc=constants.MAX_MNC + 1) is None
def test_lac(self):
self.compare("lac", 5, 5)
assert self.sample(lac=constants.MIN_LAC - 1) is None
assert self.sample(lac=constants.MAX_LAC + 1) is None
def test_lac_cid(self):
assert (
self.sample(radio=Radio.gsm, lac=None, cid=constants.MAX_CID_GSM, psc=None)
is None
)
assert (
self.sample(radio=Radio.gsm, lac=None, cid=constants.MAX_CID_GSM, psc=1)
is None
)
def test_cid(self):
for radio in (Radio.gsm, Radio.wcdma, Radio.lte):
assert self.sample(radio=radio, cid=constants.MIN_CID - 1) is None
assert self.sample(radio=radio, cid=12345)["cid"] == 12345
assert self.sample(radio=radio, cid=constants.MAX_CID + 1) is None
# correct radio type for large GSM cid
cid = constants.MAX_CID_GSM + 1
assert self.sample(radio=Radio.gsm, cid=cid)["radio"] is Radio.wcdma
# accept large WCDMA/LTE cid
assert self.sample(radio=Radio.wcdma, cid=cid)["cid"] == cid
assert self.sample(radio=Radio.lte, cid=cid)["cid"] == cid
def test_psc(self):
for radio in (Radio.gsm, Radio.wcdma, Radio.lte):
assert self.sample(radio=radio, psc=constants.MIN_PSC - 1)["psc"] is None
assert self.sample(radio=radio, psc=15)["psc"] == 15
assert self.sample(radio=radio, cid=constants.MAX_PSC + 1)["psc"] is None
assert (
self.sample(radio=Radio.lte, psc=constants.MAX_PSC_LTE + 1)["psc"] is None
)
def test_age(self):
field = "age"
self.compare(field, constants.MIN_AGE - 1, None)
self.compare(field, -40000, -40000)
self.compare(field, 60000, 60000)
self.compare(field, constants.MAX_AGE + 1, None)
def test_asu(self):
for radio in (Radio.gsm, Radio.wcdma, Radio.lte):
assert (
self.sample(radio=radio, asu=constants.MIN_CELL_ASU[radio] - 1)["asu"]
is None
)
assert self.sample(radio=radio, asu=15)["asu"] == 15
assert (
self.sample(radio=radio, asu=constants.MAX_CELL_ASU[radio] + 1)["asu"]
is None
)
def test_asu_signal(self):
for radio in (Radio.gsm, Radio.wcdma, Radio.lte):
# if both are specified, leave them untouched
assert self.sample(radio=radio, asu=15, signal=-75)["signal"] == -75
for radio, signal in ((Radio.gsm, -83), (Radio.wcdma, -101), (Radio.lte, -125)):
# calculate signal from asu
assert self.sample(radio=radio, asu=15, signal=None)["signal"] == signal
# switch asu/signal fields
assert self.sample(radio=radio, asu=signal, signal=None)["signal"] == signal
assert self.sample(radio=radio, asu=signal, signal=10)["signal"] == signal
def test_signal(self):
for radio in (Radio.gsm, Radio.wcdma, Radio.lte):
assert (
self.sample(radio=radio, signal=constants.MIN_CELL_SIGNAL[radio] - 1)[
"signal"
]
is None
)
assert self.sample(radio=radio, signal=-75)["signal"] == -75
assert (
self.sample(radio=radio, signal=constants.MAX_CELL_SIGNAL[radio] + 1)[
"signal"
]
is None
)
def test_ta(self):
field = "ta"
self.compare(field, constants.MIN_CELL_TA - 1, None)
self.compare(field, 0, 0)
self.compare(field, 31, 31)
self.compare(field, constants.MAX_CELL_TA + 1, None)
assert self.sample(radio=Radio.gsm, ta=1)["ta"] == 1
assert self.sample(radio=Radio.wcdma, ta=1)["ta"] is None
assert self.sample(radio=Radio.lte, ta=1)["ta"] == 1
class TestWifiObservation(BaseTest):
def test_invalid(self):
assert WifiObservation.create(mac="3680873e9b83", lat=0.0, lon=0.0) is None
assert WifiObservation.create(mac="", lat=0.0, lon=0.0) is None
def test_fields(self):
mac = "3680873e9b83"
obs = WifiObservation.create(
mac=mac,
lat=GB_LAT,
lon=GB_LON,
pressure=1010.2,
source=ReportSource.query,
timestamp=1405602028568,
channel=5,
signal=-45,
)
assert obs.lat == GB_LAT
assert obs.lon == GB_LON
assert obs.mac == mac
assert obs.pressure == 1010.2
assert obs.source == ReportSource.query
assert obs.timestamp == 1405602028568
assert obs.channel == 5
assert obs.signal == -45
assert obs.shard_id == "8"
def test_json(self):
obs = WifiObservationFactory.build(accuracy=None, source=ReportSource.query)
result = WifiObservation.from_json(json.loads(json.dumps(obs.to_json())))
assert type(result) is WifiObservation
assert result.accuracy is None
assert result.mac == obs.mac
assert result.lat == obs.lat
assert result.lon == obs.lon
assert result.source == ReportSource.query
assert type(result.source) is ReportSource
def test_weight(self):
obs_factory = WifiObservationFactory.build
assert round(obs_factory(accuracy=None, signal=-80).weight, 2) == 1.0
assert round(obs_factory(accuracy=0.0, signal=-80).weight, 2) == 1.0
assert round(obs_factory(accuracy=10.0, signal=-80).weight, 2) == 1.0
assert round(obs_factory(accuracy=40.0, signal=-80).weight, 2) == 0.5
assert round(obs_factory(accuracy=100, signal=-80).weight, 2) == 0.32
assert round(obs_factory(accuracy=200, signal=-80).weight, 2) == 0.22
assert round(obs_factory(accuracy=200.1, signal=-80).weight, 2) == 0.0
assert round(obs_factory(accuracy=10, signal=-100).weight, 2) == 0.48
assert round(obs_factory(accuracy=10, signal=-30).weight, 2) == 16.0
assert round(obs_factory(accuracy=10, signal=-10).weight, 2) == 123.46
assert round(obs_factory(accuracy=40, signal=-30).weight, 2) == 8.0
assert round(obs_factory(accuracy=100, signal=-30).weight, 2) == 5.06
assert round(obs_factory(accuracy=100, signal=-10).weight, 2) == 39.04
assert round(obs_factory(accuracy=0, age=0).weight, 2) == 1.0
assert round(obs_factory(accuracy=0, age=1000).weight, 2) == 1.0
assert round(obs_factory(accuracy=0, age=-1000).weight, 2) == 1.0
assert round(obs_factory(accuracy=0, age=5000).weight, 2) == 0.63
assert round(obs_factory(accuracy=0, age=8000).weight, 2) == 0.5
assert round(obs_factory(accuracy=0, age=20001).weight, 2) == 0.0
assert round(obs_factory(accuracy=0, speed=None).weight, 2) == 1.0
assert round(obs_factory(accuracy=0, speed=0.0).weight, 2) == 1.0
assert round(obs_factory(accuracy=0, speed=1.0).weight, 2) == 1.0
assert round(obs_factory(accuracy=0, speed=20.0).weight, 2) == 0.5
assert round(obs_factory(accuracy=0, speed=50.1).weight, 2) == 0.0
class TestWifiReport(BaseTest):
def sample(self, **kwargs):
report = {"mac": "3680873e9b83"}
for (k, v) in kwargs.items():
report[k] = v
return WifiReport.validate(report)
def test_mac(self):
assert self.sample(mac="3680873e9b83") is not None
assert self.sample(mac="3680873E9B83") is not None
assert self.sample(mac="36:80:87:3e:9b:83") is not None
assert self.sample(mac="36-80-87-3e-9b-83") is not None
assert self.sample(mac="36.80.87.3e.9b.83") is not None
# We considered but do not ban locally administered WiFi
# mac addresses based on the U/L bit
# https://en.wikipedia.org/wiki/MAC_address
assert self.sample(mac="0a0000000000") is not None
assert self.sample(mac="") is None
assert self.sample(mac="1234567890123") is None
assert self.sample(mac="aaaaaaZZZZZZ") is None
assert self.sample(mac="000000000000") is None
assert self.sample(mac="ffffffffffff") is None
assert self.sample(mac=constants.WIFI_TEST_MAC) is None
def test_age(self):
field = "age"
self.compare(field, constants.MIN_AGE - 1, None)
self.compare(field, -40000, -40000)
self.compare(field, 60000, 60000)
self.compare(field, constants.MAX_AGE + 1, None)
def test_channel(self):
field = "channel"
self.compare(field, constants.MIN_WIFI_CHANNEL - 1, None)
self.compare(field, 1, 1)
self.compare(field, 36, 36)
self.compare(field, constants.MAX_WIFI_CHANNEL + 1, None)
def test_channel_frequency(self):
sample = self.sample(channel=0, frequency=10)
assert sample["channel"] is None
assert sample["frequency"] is None
sample = self.sample(channel=0, frequency=2412)
assert sample["channel"] == 1
assert sample["frequency"] == 2412
sample = self.sample(channel=4, frequency=10)
assert sample["channel"] == 4
assert sample["frequency"] == 2427
sample = self.sample(channel=1, frequency=2427)
assert sample["channel"] == 1
assert sample["frequency"] == 2427
def test_frequency(self):
field = "frequency"
self.compare(field, constants.MIN_WIFI_FREQUENCY - 1, None)
self.compare(field, 2412, 2412)
self.compare(field, 2484, 2484)
self.compare(field, 4915, 4915)
self.compare(field, 5170, 5170)
self.compare(field, 5925, 5925)
self.compare(field, constants.MAX_WIFI_FREQUENCY + 1, None)
def test_signal(self):
field = "signal"
self.compare(field, constants.MIN_WIFI_SIGNAL - 1, None)
self.compare(field, -90, -90)
self.compare(field, -10, -10)
self.compare(field, constants.MAX_WIFI_SIGNAL + 1, None)
def test_snr(self):
field = "snr"
self.compare(field, constants.MIN_WIFI_SNR - 1, None)
self.compare(field, 1, 1)
self.compare(field, 40, 40)
self.compare(field, constants.MAX_WIFI_SNR + 1, None)
| |
###################################################################
# Semi-supervised EA-Regularized multilayer perceptron ensembles. #
###################################################################
import numpy as np
import numpy.random as npr
import theano
import theano.tensor as T
import cPickle
#from theano.tensor.shared_randomstreams import RandomStreams as RandStream
from theano.sandbox.cuda.rng_curand import CURAND_RandomStreams as RandStream
from NetLayers import HiddenLayer, JoinLayer, DAELayer, \
relu_actfun, safe_softmax
#####################################################################
# NON-LINEARITIES: Some activation functions, for your convenience. #
#####################################################################
def smooth_kl_divergence(p, q):
"""Measure the KL-divergence from "approximate" distribution q to "true"
distribution p. Use smoothed softmax to convert p and q from encodings
in terms of relative log-likelihoods into sum-to-one distributions."""
p_sm = safe_softmax(p)
q_sm = safe_softmax(q)
# This term is: cross_entropy(p, q) - entropy(p)
kl_sm = T.sum(((T.log(p_sm) - T.log(q_sm)) * p_sm), axis=1, keepdims=True)
return kl_sm
def smooth_js_divergence(p, q):
"""
Measure the Jensen-Shannon divergence between (log-space) p and q.
"""
p_sm = safe_softmax(p)
q_sm = safe_softmax(q)
mean_dist = (p_sm + q_sm) / 2.0
js_1 = T.sum(p_sm * (T.log(p_sm) - T.log(mean_dist)), axis=1, keepdims=True)
js_2 = T.sum(q_sm * (T.log(q_sm) - T.log(mean_dist)), axis=1, keepdims=True)
js_div = (js_1 + js_2) / 2.0
return js_div
def smooth_cross_entropy(p, q):
"""Measure the cross-entropy between "approximate" distribution q and
"true" distribution p. Use smoothed softmax to convert p and q from
encodings in terms of relative log-likelihoods into sum-to-one dists."""
p_sm = safe_softmax(p)
q_sm = safe_softmax(q)
# This term is: entropy(p) + kl_divergence(p, q)
ce_sm = -T.sum((p_sm * T.log(q_sm)), axis=1, keepdims=True)
return ce_sm
##########################
# NETWORK IMPLEMENTATION #
##########################
class PeaNet(object):
"""
A multi-purpose ensemble of noise-perturbed neural networks. This class
constructs and manages the computation graph for a pseudo-ensemble, and
provides costs for imposing pseudo-ensemble agreement regularization on
the pseudo-ensemble. (i.e. droppy fuzzy networks)
Parameters:
rng: a numpy.random RandomState object
Xd: Theano symbolic matrix for "observation" inputs to this PeaNet
params: a dict of parameters describing the desired ensemble:
lam_l2a: L2 regularization weight on neuron activations
vis_drop: drop rate to use on input layers (when desired)
hid_drop: drop rate to use on hidden layers (when desired)
-- note: vis_drop/hid_drop are optional, with defaults 0.2/0.5
activation: non-linearity to apply in hidden layers
init_scale: scaling factor for hidden layer weights
proto_configs: list of lists, where each sublist gives the number
of neurons to put in each hidden layer one of the
proto-networks underlying this ensemble. Sub-lists
need not be the same length, but their first values
should all match, as should their last values. This
is because the proto-nets all take the same input
and output predictions over the same classes.
spawn_configs: list of dicts, where each dict describes the basic
values needed for spawning a noise-perturbed net
from some proto-net. The dict should contain keys:
proto_key: which proto-net to spawn from
input_noise: amount of noise on layer inputs
bias_noise: amount of noise on layer biases
do_dropout: whether to apply dropout
shared_param_dicts: parameters for the MLP controlled by this PeaNet
"""
def __init__(self,
rng=None, \
Xd=None, \
params=None, \
shared_param_dicts=None):
# First, setup a shared random number generator for this layer
self.rng = RandStream(rng.randint(100000))
################################################
# Process user-suplied parameters for this net #
################################################
assert(not (params is None))
assert(len(params['proto_configs']) == 1) # permit only one proto-net
assert(len(params['spawn_configs']) <= 2) # use one or two spawn nets
assert(len(params['spawn_configs']) > 0)
self.Xd = Xd # symbolic input to this computation graph
self.params = params
lam_l2a = params['lam_l2a']
if 'vis_drop' in params:
self.vis_drop = params['vis_drop']
else:
self.vis_drop = 0.2
if 'hid_drop' in params:
self.hid_drop = params['hid_drop']
else:
self.hid_drop = 0.5
if 'activation' in params:
self.activation = params['activation']
else:
self.activation = relu_actfun
if 'init_scale' in params:
self.init_scale = params['init_scale']
else:
self.init_scale = 1.0
self.proto_configs = params['proto_configs']
self.spawn_configs = params['spawn_configs']
# Compute some "structural" properties of this ensemble
self.max_proto_depth = max([(len(pc)-1) for pc in self.proto_configs])
self.spawn_count = len(self.spawn_configs)
# Check if the params for this net were given a priori. This option
# will be used for creating "clones" of a generative network, with all
# of the network parameters shared between clones.
if shared_param_dicts is None:
# This is not a clone, and we will need to make a dict for
# referring to the parameters of each network layer
self.shared_param_dicts = []
self.is_clone = False
else:
# This is a clone, and its layer parameters can be found by
# referring to the given param dict (i.e. shared_param_dicts).
self.shared_param_dicts = shared_param_dicts
self.is_clone = True
########################################
# Initialize all of the proto-networks #
########################################
self.proto_nets = []
# Construct the proto-networks from which to generate spawn-sembles
for (pn_num, proto_config) in enumerate(self.proto_configs):
layer_defs = [ld for ld in proto_config]
layer_connect_defs = zip(layer_defs[:-1], layer_defs[1:])
layer_num = 0
proto_net = []
next_input = self.Xd
for in_def, out_def in layer_connect_defs:
last_layer = (layer_num == (len(layer_connect_defs) - 1))
pnl_name = "pn{0:d}l{1:d}".format(pn_num, layer_num)
if (type(in_def) is list) or (type(in_def) is tuple):
# Receiving input from a poolish layer...
in_dim = in_def[0]
else:
# Receiving input from a normal layer...
in_dim = in_def
if (type(out_def) is list) or (type(out_def) is tuple):
# Applying some sort of pooling in this layer...
out_dim = out_def[0]
pool_size = out_def[1]
else:
# Not applying any pooling in this layer...
out_dim = out_def
pool_size = 0
i_scale = (1.0 / np.sqrt(in_dim)) * self.init_scale
# Add a new layer to the regular model
if not self.is_clone:
##########################################
# Initialize a layer with new parameters #
##########################################
new_layer = HiddenLayer(rng=rng, input=next_input, \
activation=None, pool_size=pool_size, \
drop_rate=0., input_noise=0., bias_noise=0., \
in_dim=in_dim, out_dim=out_dim, \
name=pnl_name, W_scale=i_scale)
proto_net.append(new_layer)
self.shared_param_dicts.append( \
{'W': new_layer.W, 'b': new_layer.b, \
'b_in': new_layer.b_in, 's_in': new_layer.s_in})
else:
##################################################
# Initialize a layer with some shared parameters #
##################################################
init_params = self.shared_param_dicts[layer_num]
new_layer = HiddenLayer(rng=rng, input=next_input, \
activation=None, pool_size=pool_size, \
drop_rate=0., input_noise=0., bias_noise=0., \
in_dim=in_dim, out_dim=out_dim, \
W=init_params['W'], b=init_params['b'], \
b_in=init_params['b_in'], s_in=init_params['s_in'], \
name=pnl_name, W_scale=i_scale)
proto_net.append(new_layer)
next_input = proto_net[-1].output
layer_num = layer_num + 1
# Add this network to the list of proto-networks, and add its
# param dict to the list of pro-net param dicts, if not a clone
self.proto_nets.append(proto_net)
#################################################################
# Initialize all of the spawned (i.e. noise-perturbed) networks #
#################################################################
self.spawn_nets = []
self.proto_keys = []
for spawn_config in self.spawn_configs:
proto_key = spawn_config['proto_key']
self.proto_keys.append(proto_key)
print("spawned from proto-net: {0:d} (of {1:d})".format(proto_key, \
len(self.proto_nets)))
input_noise = spawn_config['input_noise']
bias_noise = spawn_config['bias_noise']
do_dropout = spawn_config['do_dropout']
assert((proto_key >= 0) and (proto_key < len(self.proto_nets)))
# Get info about the proto-network to spawn from
layer_num = 0
spawn_net = []
next_input = self.Xd
proto_net = self.proto_nets[proto_key]
for proto_layer in proto_net:
last_layer = (layer_num == (len(proto_net) - 1))
layer_in = input_noise if (layer_num == 0) else 0.0
d_prob = self.vis_drop if (layer_num == 0) else self.hid_drop
drop_prob = d_prob if do_dropout else 0.0
# Get important properties from the relevant proto-layer
actfun = proto_layer.activation
pool_size = proto_layer.pool_size
in_dim = proto_layer.in_dim
out_dim = proto_layer.out_dim
# Add a new layer to the regular model
spawn_net.append(HiddenLayer(rng=rng, \
input=next_input, activation=actfun, \
pool_size=pool_size, drop_rate=drop_prob, \
input_noise=layer_in, bias_noise=bias_noise, \
W=proto_layer.W, b=proto_layer.b, \
b_in=proto_layer.b_in, s_in=proto_layer.s_in, \
in_dim=in_dim, out_dim=out_dim))
next_input = spawn_net[-1].output
layer_num = layer_num + 1
# Add this network to the list of spawn-networks
self.spawn_nets.append(spawn_net)
# Mash all the parameters together, into a list. Also make a list
# comprising only parameters located in final/classification layers
# of the proto-networks (for use in fine-tuning, probably).
self.proto_params = []
self.class_params = []
for pn in self.proto_nets:
for (i, pl) in enumerate(pn):
self.proto_params.extend(pl.params)
if (i == (len(pn) - 1)):
self.class_params.extend(pl.params)
# Build loss functions for denoising autoencoder training. This sets up
# a cost function for each possible layer, as determined by the maximum
# number of layers in any proto-network. The DAE cost for layer i will
# be the mean DAE cost over all i'th layers in the proto-networks.
self.dae_lam_l1 = theano.shared( \
value=np.asarray([0.2]).astype(theano.config.floatX))
self._construct_dae_layers(rng, lam_l1=self.dae_lam_l1, nz_lvl=0.25)
# create symbolic "hooks" for observing the output of this network,
# either without perturbations or subject to perturbations
self.output_proto = self.proto_nets[0][-1].linear_output
self.output_spawn = [sn[-1].linear_output for sn in self.spawn_nets]
# get a cost function for encouraging "pseudo-ensemble agreement"
self.pea_reg_cost = self._ear_cost()
# get a cost function for penalizing/rewarding prediction entropy
self.ent_reg_cost = self._ent_cost()
self.act_reg_cost = lam_l2a * self._act_reg_cost()
# construct a function for sampling from a categorical
self.sample_posterior = self._construct_sample_posterior()
return
def _act_reg_cost(self):
"""
Apply L2 regularization to the activations in each spawn-net.
"""
act_sq_sums = []
for i in range(self.spawn_count):
sn = self.spawn_nets[i]
for snl in sn:
act_sq_sums.append(snl.act_l2_sum)
full_act_sq_sum = T.sum(act_sq_sums) / self.spawn_count
return full_act_sq_sum
def _ear_cost(self):
"""
Compute the cost of pseudo-ensemble agreement regularization.
"""
if self.spawn_count == 1:
x1 = self.spawn_nets[0][-1].linear_output
ear_loss = 0.0 * smooth_js_divergence(x1, x1)
else:
x1 = self.spawn_nets[0][-1].linear_output
x2 = self.spawn_nets[1][-1].linear_output
#ear_loss = smooth_js_divergence(x1, x2)
ear_loss = (smooth_kl_divergence(x1, x2) + \
smooth_kl_divergence(x2, x1)) / 2.0
return ear_loss
def _ent_cost(self, ent_type=1):
"""
Compute cost for entropy regularization.
"""
if ent_type == 0:
# binary cross-entropy
ent_fun = lambda x: T.sum(T.nnet.binary_crossentropy( \
T.nnet.sigmoid(x), T.nnet.sigmoid(x)), axis=1, keepdims=True)
else:
# multinomial cross-entropy
ent_fun = lambda x: smooth_cross_entropy(x, x)
if self.spawn_count == 1:
x = self.spawn_nets[0][-1].linear_output
ent_loss = ent_fun(x)
else:
x1 = self.spawn_nets[0][-1].linear_output
x2 = self.spawn_nets[1][-1].linear_output
ent_loss = (ent_fun(x1) + ent_fun(x2)) / 2.0
return ent_loss
def _construct_dae_layers(self, rng, lam_l1=None, nz_lvl=0.25):
"""
Build cost functions for training DAEs defined for all hidden layers
of the proto-networks making up this generalized ensemble. That is,
construct a DAE for every proto-layer that isn't a classification
layer. Inputs to each DAE are taken from the clean and post-fuzzed
inputs of the spawn-net layer for the 'first' spawn-net spawned from
any given proto-net.
"""
self.dae_params = []
self.dae_costs = []
ACT_FUN = lambda x: relu_actfun(x)
# The number of hidden layers in each proto-network is depth-1, where
# depth is the total number of layers in the network. This is because
# we count the output layer as well as the hidden layers.
for d in range(self.max_proto_depth - 1):
d_params = []
d_costs = []
for pn_key in range(len(self.proto_nets)):
# Get the "first" spawn-net spawned from this proto-net
sn_key = self.proto_keys.index(pn_key)
sn = self.spawn_nets[sn_key]
if (d < (len(sn) - 1)):
# Construct a DAE for this proto/spawn-net hidden layer
W_sn = sn[d].W
b_sn = sn[d].b
ci_sn = sn[d].clean_input # the input to be reconstructed
fi_sn = sn[d].fuzzy_input # the input to reconstruct from
vis_dim = sn[d].in_dim
hid_dim = sn[d].filt_count
# Construct the DAE layer object
dae_layer = DAELayer(rng=rng, \
clean_input=ci_sn, \
fuzzy_input=fi_sn, \
in_dim=vis_dim, out_dim=hid_dim, \
activation=ACT_FUN, \
input_noise=nz_lvl, \
W=W_sn, b_h=b_sn, b_v=None)
d_params.extend(dae_layer.params)
d_costs.append(dae_layer.compute_costs(lam_l1))
# Record the set of all DAE params to-be-optimized at depth d
self.dae_params.append(d_params)
# Record the sum of reconstruction costs for DAEs at depth d (in
# self.dae_costs[d][0]) and the sum of sparse regularization costs
# for DAEs at depth d (in self.dae_costs[d][1]).
self.dae_costs.append([T.sum([c[0] for c in d_costs]), \
T.sum([c[1] for c in d_costs])])
return
def _construct_sample_posterior(self):
"""
Construct a function for sampling from the categorical distribution
resulting from taking a softmax of the output of this PeaNet.
"""
func = theano.function([self.Xd], \
outputs=safe_softmax(self.output_proto))
# this function is based on "roulette wheel" sampling
def sampler(x):
y_probs = func(x)
y_cumsum = np.cumsum(y_probs, axis=1)
rand_vals = npr.rand(y_probs.shape[0],1)
y_bin = np.zeros(y_probs.shape)
for row in range(y_bin.shape[0]):
for col in range(y_bin.shape[1]):
if y_cumsum[row,col] > rand_vals[row]:
y_bin[row,col] = 1.0
break
y_bin = y_bin.astype(theano.config.floatX)
return y_bin
return sampler
def init_biases(self, b_init=0.0):
"""
Initialize the biases in all hidden layers to some constant.
"""
for layer in self.proto_nets[0][:-1]:
b_vec = (0.0 * layer.b.get_value(borrow=False)) + b_init
layer.b.set_value(b_vec)
return
def shared_param_clone(self, rng=None, Xd=None, params=None):
"""
Return a clone of this network, with shared parameters but with
different symbolic input variables.
"""
if params is None:
# make a clone with the same parameters as this PeaNet
clone_net = PeaNet(rng=rng, Xd=Xd, params=self.params, \
shared_param_dicts=self.shared_param_dicts)
else:
# make a clone with different parameters from this PeaNet
clone_net = PeaNet(rng=rng, Xd=Xd, params=params, \
shared_param_dicts=self.shared_param_dicts)
return clone_net
def save_to_file(self, f_name=None):
"""
Dump important stuff to a Python pickle, so that we can reload this
model later. We'll pickle everything required to create a clone of
this model given the pickle and the rng/Xd params to the cloning
function: "PeaNet.shared_param_clone()".
"""
assert(not (f_name is None))
f_handle = file(f_name, 'wb')
# dump the dict self.params, which just holds "simple" python values
cPickle.dump(self.params, f_handle, protocol=-1)
# make a copy of self.shared_param_dicts, with numpy arrays in place
# of the theano shared variables
numpy_param_dicts = []
for shared_dict in self.shared_param_dicts:
numpy_dict = {}
for key in shared_dict:
numpy_dict[key] = shared_dict[key].get_value(borrow=False)
numpy_param_dicts.append(numpy_dict)
# dump the numpy version of self.shared_param_dicts
cPickle.dump(numpy_param_dicts, f_handle, protocol=-1)
f_handle.close()
return
def load_peanet_from_file(f_name=None, rng=None, Xd=None):
"""
Load a clone of some previously trained model.
"""
assert(not (f_name is None))
pickle_file = open(f_name)
self_dot_params = cPickle.load(pickle_file)
self_dot_numpy_param_dicts = cPickle.load(pickle_file)
self_dot_shared_param_dicts = []
for numpy_dict in self_dot_numpy_param_dicts:
shared_dict = {}
for key in numpy_dict:
val = numpy_dict[key].astype(theano.config.floatX)
shared_dict[key] = theano.shared(val)
self_dot_shared_param_dicts.append(shared_dict)
# now, create a PeaNet with the configuration we just unpickled
clone_net = PeaNet(rng=rng, Xd=Xd, params=self_dot_params, \
shared_param_dicts=self_dot_shared_param_dicts)
return clone_net
if __name__ == "__main__":
# TEST CODE FOR MODEL SAVING AND LOADING
from load_data import load_udm, load_udm_ss, load_mnist
from NetLayers import relu_actfun, softplus_actfun, \
safe_softmax, safe_log
# Simple test code, to check that everything is basically functional.
print("TESTING...")
# Initialize a source of randomness
rng = np.random.RandomState(1234)
# Load some data to train/validate/test with
dataset = 'data/mnist.pkl.gz'
datasets = load_udm(dataset, zero_mean=False)
Xtr = datasets[0][0]
Xtr = Xtr.get_value(borrow=False)
Xva = datasets[1][0]
Xva = Xva.get_value(borrow=False)
print("Xtr.shape: {0:s}, Xva.shape: {1:s}".format(str(Xtr.shape),str(Xva.shape)))
# get and set some basic dataset information
tr_samples = Xtr.shape[0]
data_dim = Xtr.shape[1]
batch_size = 128
prior_dim = 50
prior_sigma = 1.0
Xtr_mean = np.mean(Xtr, axis=0, keepdims=True)
Xtr_mean = (0.0 * Xtr_mean) + np.mean(Xtr)
Xc_mean = np.repeat(Xtr_mean, batch_size, axis=0).astype(theano.config.floatX)
# Symbolic inputs
Xd = T.matrix(name='Xd')
###############################
# Setup discriminator network #
###############################
# Set some reasonable mlp parameters
dn_params = {}
# Set up some proto-networks
pc0 = [data_dim, (250, 4), (250, 4), 10]
dn_params['proto_configs'] = [pc0]
# Set up some spawn networks
sc0 = {'proto_key': 0, 'input_noise': 0.1, 'bias_noise': 0.1, 'do_dropout': True}
#sc1 = {'proto_key': 0, 'input_noise': 0.1, 'bias_noise': 0.1, 'do_dropout': True}
dn_params['spawn_configs'] = [sc0]
dn_params['spawn_weights'] = [1.0]
# Set remaining params
dn_params['lam_l2a'] = 1e-2
dn_params['vis_drop'] = 0.2
dn_params['hid_drop'] = 0.5
dn_params['init_scale'] = 2.0
# Initialize a network object to use as the discriminator
DN = PeaNet(rng=rng, Xd=Xd, params=dn_params)
DN.init_biases(0.0)
pkl_file_name = "TEST_PKL_FILE.pkl"
print("Saving model:")
DN.save_to_file(f_name=pkl_file_name)
print("Loading model:")
DN_clone = load_peanet_from_file(f_name=pkl_file_name, rng=rng, Xd=Xd)
print("DONE!")
##############
# EYE BUFFER #
##############
| |
'''
Author: Dr. Mohamed A. Bouhlel <mbouhlel@umich.edu>
Dr. John T. Hwang <hwangjt@umich.edu>
This package is distributed under New BSD license.
'''
#TODO: Extend to multifidelity problems by adding training_points = {'approx': {}}
#TODO: Complete the mixture of expert model: verify from if self.options['name'] == 'MixExp': (predict)
from __future__ import division
import numpy as np
from collections import defaultdict
from smt.utils.printer import Printer
from smt.utils.options_dictionary import OptionsDictionary
from smt.utils.checks import check_support, check_nx, check_2d_array
class SurrogateModel(object):
"""
Base class for all surrogate models.
Attributes
----------
options : OptionsDictionary
Dictionary of options. Options values can be set on this attribute directly
or they can be passed in as keyword arguments during instantiation.
supports : dict
Dictionary containing information about what this surrogate model supports.
Examples
--------
>>> from smt.surrogate_models import RBF
>>> sm = RBF(print_training=False)
>>> sm.options['print_prediction'] = False
"""
def __init__(self, **kwargs):
"""
Constructor where values of options can be passed in.
For the list of options, see the documentation for the surrogate model being used.
Parameters
----------
**kwargs : named arguments
Set of options that can be optionally set; each option must have been declared.
Examples
--------
>>> from smt.surrogate_models import RBF
>>> sm = RBF(print_global=False)
"""
self.options = OptionsDictionary()
self.supports = supports = {}
supports['training_derivatives'] = False
supports['derivatives'] = False
supports['output_derivatives'] = False
supports['adjoint_api'] = False
supports['variances'] = False
declare = self.options.declare
declare('print_global', True, types=bool,
desc='Global print toggle. If False, all printing is suppressed')
declare('print_training', True, types=bool,
desc='Whether to print training information')
declare('print_prediction', True, types=bool,
desc='Whether to print prediction information')
declare('print_problem', True, types=bool,
desc='Whether to print problem information')
declare('print_solver', True, types=bool,
desc='Whether to print solver information')
self._initialize()
self.options.update(kwargs)
self.training_points = defaultdict(dict)
self.printer = Printer()
def set_training_values(self, xt, yt, name=None):
"""
Set training data (values).
Parameters
----------
xt : np.ndarray[nt, nx] or np.ndarray[nt]
The input values for the nt training points.
yt : np.ndarray[nt, ny] or np.ndarray[nt]
The output values for the nt training points.
name : str or None
An optional label for the group of training points being set.
This is only used in special situations (e.g., multi-fidelity applications).
"""
xt = check_2d_array(xt, 'xt')
yt = check_2d_array(yt, 'yt')
if xt.shape[0] != yt.shape[0]:
raise ValueError('the first dimension of xt and yt must have the same length')
self.nt = xt.shape[0]
self.nx = xt.shape[1]
self.ny = yt.shape[1]
kx = 0
self.training_points[name][kx] = [np.array(xt), np.array(yt)]
def update_training_values(self, yt, name=None):
"""
Update the training data (values) at the previously set input values.
Parameters
----------
yt : np.ndarray[nt, ny] or np.ndarray[nt]
The output values for the nt training points.
name : str or None
An optional label for the group of training points being set.
This is only used in special situations (e.g., multi-fidelity applications).
"""
yt = check_2d_array(yt, 'yt')
kx = 0
if kx not in self.training_points[name]:
raise ValueError(
'The training points must be set first with set_training_values ' +
'before calling update_training_values.')
xt = self.training_points[name][kx][0]
if xt.shape[0] != yt.shape[0]:
raise ValueError(
'The number of training points does not agree with the earlier call of ' +
'set_training_values.')
self.training_points[name][kx][1] = np.array(yt)
def set_training_derivatives(self, xt, dyt_dxt, kx, name=None):
"""
Set training data (derivatives).
Parameters
----------
xt : np.ndarray[nt, nx] or np.ndarray[nt]
The input values for the nt training points.
dyt_dxt : np.ndarray[nt, ny] or np.ndarray[nt]
The derivatives values for the nt training points.
kx : int
0-based index of the derivatives being set.
name : str or None
An optional label for the group of training points being set.
This is only used in special situations (e.g., multi-fidelity applications).
"""
check_support(self, 'training_derivatives')
xt = check_2d_array(xt, 'xt')
dyt_dxt = check_2d_array(dyt_dxt, 'dyt_dxt')
if xt.shape[0] != dyt_dxt.shape[0]:
raise ValueError('the first dimension of xt and dyt_dxt must have the same length')
if not isinstance(kx, int):
raise ValueError('kx must be an int')
self.training_points[name][kx + 1] = [np.array(xt), np.array(dyt_dxt)]
def update_training_derivatives(self, dyt_dxt, kx, name=None):
"""
Update the training data (values) at the previously set input values.
Parameters
----------
dyt_dxt : np.ndarray[nt, ny] or np.ndarray[nt]
The derivatives values for the nt training points.
kx : int
0-based index of the derivatives being set.
name : str or None
An optional label for the group of training points being set.
This is only used in special situations (e.g., multi-fidelity applications).
"""
check_support(self, 'training_derivatives')
dyt_dxt = check_2d_array(dyt_dxt, 'dyt_dxt')
if kx not in self.training_points[name]:
raise ValueError(
'The training points must be set first with set_training_values ' +
'before calling update_training_values.')
xt = self.training_points[name][kx][0]
if xt.shape[0] != dyt_dxt.shape[0]:
raise ValueError(
'The number of training points does not agree with the earlier call of ' +
'set_training_values.')
self.training_points[name][kx + 1][1] = np.array(dyt_dxt)
def train(self):
"""
Train the model
"""
n_exact = self.training_points[None][0][0].shape[0]
self.printer.active = self.options['print_global']
self.printer._line_break()
self.printer._center(self.name)
self.printer.active = self.options['print_global'] and self.options['print_problem']
self.printer._title('Problem size')
self.printer(' %-25s : %i' % ('# training points.', n_exact))
self.printer()
self.printer.active = self.options['print_global'] and self.options['print_training']
if self.name == 'MixExp':
# Mixture of experts model
self.printer._title('Training of the Mixture of experts')
else:
self.printer._title('Training')
#Train the model using the specified model-method
with self.printer._timed_context('Training', 'training'):
self._train()
def predict_values(self, x):
"""
Predict the output values at a set of points.
Parameters
----------
x : np.ndarray[n, nx] or np.ndarray[n]
Input values for the prediction points.
Returns
-------
y : np.ndarray[n, ny]
Output values at the prediction points.
"""
x = check_2d_array(x, 'x')
check_nx(self.nx, x)
n = x.shape[0]
self.printer.active = self.options['print_global'] and self.options['print_prediction']
if self.name == 'MixExp':
# Mixture of experts model
self.printer._title('Evaluation of the Mixture of experts')
else:
self.printer._title('Evaluation')
self.printer(' %-12s : %i' % ('# eval points.', n))
self.printer()
#Evaluate the unknown points using the specified model-method
with self.printer._timed_context('Predicting', key='prediction'):
y = self._predict_values(x)
time_pt = self.printer._time('prediction')[-1] / n
self.printer()
self.printer('Prediction time/pt. (sec) : %10.7f' % time_pt)
self.printer()
return y.reshape((n, self.ny))
def predict_derivatives(self, x, kx):
"""
Predict the dy_dx derivatives at a set of points.
Parameters
----------
x : np.ndarray[n, nx] or np.ndarray[n]
Input values for the prediction points.
kx : int
The 0-based index of the input variable with respect to which derivatives are desired.
Returns
-------
dy_dx : np.ndarray[n, ny]
Derivatives.
"""
check_support(self, 'derivatives')
x = check_2d_array(x, 'x')
check_nx(self.nx, x)
n = x.shape[0]
self.printer.active = self.options['print_global'] and self.options['print_prediction']
if self.name == 'MixExp':
# Mixture of experts model
self.printer._title('Evaluation of the Mixture of experts')
else:
self.printer._title('Evaluation')
self.printer(' %-12s : %i' % ('# eval points.', n))
self.printer()
#Evaluate the unknown points using the specified model-method
with self.printer._timed_context('Predicting', key='prediction'):
y = self._predict_derivatives(x, kx)
time_pt = self.printer._time('prediction')[-1] / n
self.printer()
self.printer('Prediction time/pt. (sec) : %10.7f' % time_pt)
self.printer()
return y.reshape((n, self.ny))
def predict_output_derivatives(self, x):
"""
Predict the derivatives dy_dyt at a set of points.
Parameters
----------
x : np.ndarray[n, nx] or np.ndarray[n]
Input values for the prediction points.
Returns
-------
dy_dyt : dict of np.ndarray[n, nt]
Dictionary of output derivatives.
Key is None for derivatives wrt yt and kx for derivatives wrt dyt_dxt.
"""
check_support(self, 'output_derivatives')
check_nx(self.nx, x)
dy_dyt = self._predict_output_derivatives(x)
return dy_dyt
def predict_variances(self, x):
"""
Predict the variances at a set of points.
Parameters
----------
x : np.ndarray[n, nx] or np.ndarray[n]
Input values for the prediction points.
Returns
-------
s2 : np.ndarray[n, ny]
Variances.
"""
check_support(self, 'variances')
check_nx(self.nx, x)
n = x.shape[0]
s2 = self._predict_variances(x)
return s2.reshape((n, self.ny))
def _initialize(self):
"""
Implemented by surrogate models to declare options and declare what they support (optional).
Examples
--------
self.options.declare('option_name', default_value, types=(bool, int), desc='description')
self.supports['derivatives'] = True
"""
pass
def _train(self):
"""
Implemented by surrogate models to perform training (optional, but typically implemented).
"""
pass
def _predict_values(self, x):
"""
Implemented by surrogate models to predict the output values.
Parameters
----------
x : np.ndarray[n, nx]
Input values for the prediction points.
Returns
-------
y : np.ndarray[n, ny]
Output values at the prediction points.
"""
raise Exception('This surrogate model is incorrectly implemented')
def _predict_derivatives(self, x, kx):
"""
Implemented by surrogate models to predict the dy_dx derivatives (optional).
If this method is implemented, the surrogate model should have
::
self.supports['derivatives'] = True
in the _initialize() implementation.
Parameters
----------
x : np.ndarray[n, nx]
Input values for the prediction points.
kx : int
The 0-based index of the input variable with respect to which derivatives are desired.
Returns
-------
dy_dx : np.ndarray[n, ny]
Derivatives.
"""
check_support(self, 'derivatives', fail=True)
def _predict_output_derivatives(self, x):
"""
Implemented by surrogate models to predict the dy_dyt derivatives (optional).
If this method is implemented, the surrogate model should have
::
self.supports['output_derivatives'] = True
in the _initialize() implementation.
Parameters
----------
x : np.ndarray[n, nx]
Input values for the prediction points.
Returns
-------
dy_dyt : dict of np.ndarray[n, nt]
Dictionary of output derivatives.
Key is None for derivatives wrt yt and kx for derivatives wrt dyt_dxt.
"""
check_support(self, 'output_derivatives', fail=True)
def _predict_variances(self, x):
"""
Implemented by surrogate models to predict the variances at a set of points (optional).
If this method is implemented, the surrogate model should have
::
self.supports['variances'] = True
in the _initialize() implementation.
Parameters
----------
x : np.ndarray[n, nx]
Input values for the prediction points.
Returns
-------
s2 : np.ndarray[n, ny]
Variances.
"""
check_support(self, 'variances', fail=True)
| |
'''Server module.
Handle and response challenge requests from the frontend server.
'''
import sys
import json
import traceback
from collections import deque
from tornado import gen, concurrent
from tornado.ioloop import IOLoop, PollIOLoop
from tornado.web import Application, RequestHandler
from tornado.websocket import WebSocketHandler
import PyExt
import Privilege
import Config
from StdChal import StdChal
class EvIOLoop(PollIOLoop):
'''Tornado compatible ioloop interface.'''
def initialize(self, **kwargs):
'''Initialize.'''
super().initialize(impl=PyExt.EvPoll(), **kwargs)
class JudgeDispatcher:
'''Judge request dispatcher.
Static attributes:
chal_running_count (int): Number of current running challenges.
chal_queue (deque): Pending challenges.
'''
chal_running_count = 0
chal_queue = deque()
@staticmethod
@gen.coroutine
def start_chal(obj, callback):
'''Start a challenge.
Check the challenge config, issue judge tasks, then report the result.
Args:
obj (dict): Challenge config.
callback: Challenge callback.
Returns:
None
'''
# The worst exception, there is no chal_id in the obj.
chal_id = None
try:
chal_id = obj['chal_id']
code_path = obj['code_path']
res_path = obj['res_path']
test_list = obj['test']
metadata = obj['metadata']
comp_type = obj['comp_type']
check_type = obj['check_type']
test_paramlist = list()
assert comp_type in ['g++', 'clang++', 'makefile', 'python3']
assert check_type in ['diff', 'ioredir']
for test in test_list:
test_idx = test['test_idx']
memlimit = test['memlimit']
timelimit = test['timelimit']
data_ids = test['metadata']['data']
for data_id in data_ids:
test_paramlist.append({
'in': res_path + '/testdata/%d.in'%data_id,
'ans': res_path + '/testdata/%d.out'%data_id,
'timelimit': timelimit,
'memlimit': memlimit,
})
chal = StdChal(chal_id, code_path, comp_type, check_type, \
res_path, test_paramlist, metadata)
result_list = yield chal.start()
result = []
idx = 0
for test in test_list:
test_idx = test['test_idx']
data_ids = test['metadata']['data']
total_runtime = 0
total_mem = 0
total_status = 0
subverdicts = list()
for data_id in data_ids:
runtime, peakmem, status, subverdict = result_list[idx]
total_runtime += runtime
total_mem += peakmem
total_status = max(total_status, status)
subverdicts.append(subverdict)
idx += 1
result.append({
'test_idx': test_idx,
'state': total_status,
'runtime': total_runtime,
'peakmem': total_mem,
'verdict': subverdicts,
})
callback({
'chal_id': chal_id,
'result': result,
})
except Exception:
traceback.print_exception(*sys.exc_info())
callback({
'chal_id': chal_id,
'verdict': None,
'result': None,
})
finally:
JudgeDispatcher.chal_running_count -= 1
JudgeDispatcher.emit_chal()
@staticmethod
def emit_chal(obj=None, callback=None):
'''Emit a challenge to the queue and trigger the start_chal.
Args:
obj (dict, optional): Challenge config.
callback: Challange callback.
Returns:
None
'''
if obj is not None:
JudgeDispatcher.chal_queue.append((obj, callback))
while (len(JudgeDispatcher.chal_queue) > 0
and JudgeDispatcher.chal_running_count < Config.TASK_MAXCONCURRENT):
chal = JudgeDispatcher.chal_queue.popleft()
JudgeDispatcher.chal_running_count += 1
IOLoop.instance().add_callback(JudgeDispatcher.start_chal, *chal)
class WebSocketClient(WebSocketHandler):
'''Websocket request handler.'''
def open(self):
'''Handle open event'''
print('Frontend connected')
def on_message(self, msg):
'''Handle message event'''
obj = json.loads(msg, 'utf-8')
JudgeDispatcher.emit_chal(obj,
lambda res: self.write_message(json.dumps(res)))
def on_close(self):
'''Handle close event'''
print('Frontend disconnected')
class RequestClient(RequestHandler):
'''HTTP request handler.'''
@concurrent.return_future
def post(self, callback):
'''Handle POST request'''
def _chal_cb(res):
self.write(res)
callback()
obj = json.loads(self.request.body.decode('utf-8'))
JudgeDispatcher.emit_chal(obj, _chal_cb)
def init_socket_server():
'''Initialize socket server.'''
app = Application([
(r'/judge', WebSocketClient),
(r'/reqjudge', RequestClient),
])
app.listen(2501)
def main():
'''Main function.'''
Privilege.init()
PyExt.init()
StdChal.init()
IOLoop.configure(EvIOLoop)
init_socket_server()
IOLoop.instance().start()
if __name__ == '__main__':
main()
| |
#!/usr/bin/env python3
# cardinal_pythonlib/openxml/find_recovered_openxml.py
"""
===============================================================================
Original code copyright (C) 2009-2021 Rudolf Cardinal (rudolf@pobox.com).
This file is part of cardinal_pythonlib.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
===============================================================================
**Tool to recognize and rescue Microsoft Office OpenXML files, even if they
have garbage appended to them. See the command-line help for details.**
Version history:
- Written 28 Sep 2017.
Notes:
- use the ``vbindiff`` tool to show *how* two binary files differ.
Output from ``zip -FF bad.zip --out good.zip``
.. code-block:: none
Fix archive (-FF) - salvage what can
zip warning: Missing end (EOCDR) signature - either this archive
is not readable or the end is damaged
Is this a single-disk archive? (y/n):
... and note there are some tabs in that, too.
More ``zip -FF`` output:
.. code-block:: none
Fix archive (-FF) - salvage what can
Found end record (EOCDR) - says expect 50828 splits
Found archive comment
Scanning for entries...
Could not find:
/home/rudolf/tmp/ziptest/00008470.z01
Hit c (change path to where this split file is)
s (skip this split)
q (abort archive - quit)
e (end this archive - no more splits)
z (look for .zip split - the last split)
or ENTER (try reading this split again):
More ``zip -FF`` output:
.. code-block:: none
zip: malloc.c:2394: sysmalloc: ...
... this heralds a crash in ``zip``. We need to kill it; otherwise it just sits
there doing nothing and not asking for any input. Presumably this means the
file is badly corrupted (or not a zip at all).
"""
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import fnmatch
import logging
import multiprocessing
import os
import re
import shutil
import struct
import tempfile
from time import sleep
import traceback
from typing import List
from zipfile import BadZipFile, ZipFile
from cardinal_pythonlib.logs import (
BraceStyleAdapter,
main_only_quicksetup_rootlogger,
)
from cardinal_pythonlib.fileops import exists_locked, gen_filenames
from cardinal_pythonlib.subproc import (
mimic_user_input,
SOURCE_STDERR,
SOURCE_STDOUT,
TERMINATE_SUBPROCESS,
)
log = BraceStyleAdapter(logging.getLogger(__name__))
DOCX_CONTENTS_REGEX_STR = "word/.*xml"
PPTX_CONTENTS_REGEX_STR = "ppt/.*xml"
XLSX_CONTENTS_REGEX_STR = "xl/.*xml"
DOCX_CONTENTS_REGEX = re.compile(DOCX_CONTENTS_REGEX_STR)
PPTX_CONTENTS_REGEX = re.compile(PPTX_CONTENTS_REGEX_STR)
XLSX_CONTENTS_REGEX = re.compile(XLSX_CONTENTS_REGEX_STR)
DOCX = "docx"
PPTX = "pptx"
XLSX = "xlsx"
FILETYPES = [DOCX, PPTX, XLSX]
ZIP_PROMPTS_RESPONSES = [
(SOURCE_STDOUT, "Is this a single-disk archive? (y/n): ", "y\n"),
(SOURCE_STDOUT, " or ENTER (try reading this split again): ", "q\n"),
(SOURCE_STDERR,
"zip: malloc.c:2394: sysmalloc: Assertion `(old_top == initial_top (av) "
"&& old_size == 0) || ((unsigned long) (old_size) >= MINSIZE && "
"prev_inuse (old_top) && ((unsigned long) old_end & (pagesize - 1)) "
"== 0)' failed.", TERMINATE_SUBPROCESS),
]
ZIP_STDOUT_TERMINATORS = ["\n", "): "]
class CorruptedZipReader(object):
"""
Class to open a zip file, even one that is corrupted, and detect the
files within.
"""
def __init__(self, filename: str, show_zip_output: bool = False) -> None:
"""
Args:
filename: filename of the ``.zip`` file (or corrupted ``.zip``
file) to open
show_zip_output: show the output of the external ``zip`` tool?
"""
self.src_filename = filename
self.rescue_filename = ""
self.tmp_dir = ""
self.contents_filenames = [] # type: List[str]
try:
# A happy zip file will be readable like this:
with ZipFile(self.src_filename, 'r') as zip_ref:
self.contents_filenames = zip_ref.namelist()
except (BadZipFile, OSError) as e:
# Here we have an unhappy zip file.
log.debug("File {!r} raised error: {!r}", filename, e)
self._fix_zip(show_zip_output=show_zip_output)
try:
with ZipFile(self.rescue_filename, 'r') as zip_ref:
self.contents_filenames = zip_ref.namelist()
except (BadZipFile, OSError, struct.error) as e:
log.debug("... exception raised even after fix attempt: {!r}",
e)
if self.contents_filenames:
log.debug("... recovered!")
else:
log.debug("... attempt at recovery failed")
def _fix_zip(self, show_zip_output: bool = False) -> None:
# We are trying to deal with ZIP (specifically, PPTX) files that
# have been retrieved by Scalpel so have large extra bits of junk
# on the end.
# Make a file in a temporary directory
self.tmp_dir = tempfile.mkdtemp()
self.rescue_filename = os.path.join(
self.tmp_dir, os.path.basename(self.src_filename))
cmdargs = [
"zip", # Linux zip tool
"-FF", # or "--fixfix": "fix very broken things"
self.src_filename, # input file
"--temp-path", self.tmp_dir, # temporary storage path
"--out", self.rescue_filename # output file
]
# We would like to be able to say "y" automatically to
# "Is this a single-disk archive? (y/n):"
# The source code (api.c, zip.c, zipfile.c), from
# ftp://ftp.info-zip.org/pub/infozip/src/ , suggests that "-q"
# should do this (internally "-q" sets "noisy = 0") - but in
# practice it doesn't work. This is a critical switch.
# Therefore we will do something very ugly, and send raw text via
# stdin.
log.debug("Running {!r}", cmdargs)
mimic_user_input(cmdargs,
source_challenge_response=ZIP_PROMPTS_RESPONSES,
line_terminators=ZIP_STDOUT_TERMINATORS,
print_stdout=show_zip_output,
print_stdin=show_zip_output)
# ... will raise if the 'zip' tool isn't available
def move_to(self, destination_filename: str,
alter_if_clash: bool = True) -> None:
"""
Move the file to which this class refers to a new location.
The function will not overwrite existing files (but offers the option
to rename files slightly to avoid a clash).
Args:
destination_filename: filename to move to
alter_if_clash: if ``True`` (the default), appends numbers to
the filename if the destination already exists, so that the
move can proceed.
"""
if not self.src_filename:
return
if alter_if_clash:
counter = 0
while os.path.exists(destination_filename):
root, ext = os.path.splitext(destination_filename)
destination_filename = f"{root}_{counter}{ext}"
counter += 1
# ... for example, "/a/b/c.txt" becomes "/a/b/c_0.txt", then
# "/a/b/c_1.txt", and so on.
else:
if os.path.exists(destination_filename):
src = self.rescue_filename or self.src_filename
log.warning("Destination exists; won't move {!r} to {!r}",
src, destination_filename)
return
if self.rescue_filename:
shutil.move(self.rescue_filename, destination_filename)
os.remove(self.src_filename)
log.info("Moved recovered file {!r} to {!r} and deleted corrupted "
"original {!r}",
self.rescue_filename,
destination_filename,
self.src_filename)
self.rescue_filename = ""
else:
shutil.move(self.src_filename, destination_filename)
log.info("Moved {!r} to {!r}", self.src_filename,
destination_filename)
self.src_filename = ""
def __del__(self) -> None:
if self.tmp_dir:
shutil.rmtree(self.tmp_dir)
class CorruptedOpenXmlReader(CorruptedZipReader):
"""
Class to read a potentially corrupted OpenXML file.
As it is created, it sets its ``file_type`` member to the detected OpenXML
file type, if it can.
"""
def __init__(self, filename: str, show_zip_output: bool = False) -> None:
super().__init__(filename=filename,
show_zip_output=show_zip_output)
self.file_type = ""
self._recognize()
def _recognize(self) -> None:
for fname in self.contents_filenames:
if DOCX_CONTENTS_REGEX.match(fname):
log.debug("Zip file {!r} has Word DOCX contents {!r}",
self.src_filename, fname)
self.file_type = DOCX
return
if PPTX_CONTENTS_REGEX.match(fname):
log.debug("Zip file {!r} has Powerpoint PPTX contents {!r}",
self.src_filename, fname)
self.file_type = PPTX
return
if XLSX_CONTENTS_REGEX.match(fname):
log.debug("Zip file {!r} has Excel XLSX contents {!r}",
self.src_filename, fname)
self.file_type = XLSX
return
def suggested_extension(self) -> str:
if not self.file_type:
return ""
return "." + self.file_type
@property
def recognized(self) -> bool:
return bool(self.file_type)
@property
def description(self) -> str:
return self.file_type.upper()
def process_file(filename: str,
filetypes: List[str],
move_to: str,
delete_if_not_specified_file_type: bool,
show_zip_output: bool) -> None:
"""
Deals with an OpenXML, including if it is potentially corrupted.
Args:
filename: filename to process
filetypes: list of filetypes that we care about, e.g.
``['docx', 'pptx', 'xlsx']``.
move_to: move matching files to this directory
delete_if_not_specified_file_type: if ``True``, and the file is **not**
a type specified in ``filetypes``, then delete the file.
show_zip_output: show the output from the external ``zip`` tool?
"""
# log.critical("process_file: start")
try:
reader = CorruptedOpenXmlReader(filename,
show_zip_output=show_zip_output)
if reader.file_type in filetypes:
log.info("Found {}: {}", reader.description, filename)
if move_to:
dest_file = os.path.join(move_to, os.path.basename(filename))
_, ext = os.path.splitext(dest_file)
if ext != reader.suggested_extension():
dest_file += reader.suggested_extension()
reader.move_to(destination_filename=dest_file)
else:
log.info("Unrecognized or unwanted contents: " + filename)
if delete_if_not_specified_file_type:
log.info("Deleting: " + filename)
os.remove(filename)
except Exception as e:
# Must explicitly catch and report errors, since otherwise they vanish
# into the ether.
log.critical("Uncaught error in subprocess: {!r}\n{}", e,
traceback.format_exc())
raise
# See also good advice, not implemented here, at
# https://stackoverflow.com/questions/19924104/python-multiprocessing-handling-child-errors-in-parent # noqa
# https://stackoverflow.com/questions/6126007/python-getting-a-traceback-from-a-multiprocessing-process/26096355#26096355 # noqa
# log.critical("process_file: end")
def main() -> None:
"""
Command-line handler for the ``find_recovered_openxml`` tool.
Use the ``--help`` option for help.
"""
parser = ArgumentParser(
formatter_class=RawDescriptionHelpFormatter,
description=f"""
Tool to recognize and rescue Microsoft Office OpenXML files, even if they have
garbage appended to them.
- Rationale: when you have accidentally deleted files from an NTFS disk, and
they really matter, you should (a) stop what you're doing; (b) clone the disk
to an image file using "dd" under Linux; (c) perform all subsequent
operations on the cloned image (in read-only mode). Those steps might
include:
- ntfsundelete, to find files that the filesystem is still aware of;
- scalpel, to find files based on their contents.
- Scalpel is great at finding stuff efficiently, but it works best when files
can be defined by both a start (header) signature and an end (footer)
signature. However, the Microsoft Office OpenXML file format has a
recognizable header, but no standard footer. In these circumstances, Scalpel
reads up to a certain limit that you specify in its configuration file. (To
retrieve large Powerpoint files, this limit needs to be substantial, e.g.
50 Mb or more, depending on your ways of working with Powerpoint.)
- That means that files emerging from a Scalpel search for DOCX/PPTX/XLSX files
may be
- false positives, having nothing to do with Office;
- clean Office files (the least likely category!);
- Office files with garbage stuck on the end.
- The OpenXML file format is just a zip file. If you stick too much garbage on
the end of a zip file, zip readers will see it as corrupt.
- THIS TOOL detects (and optionally moves) potentially corrupted zipfiles based
on file contents, by unzipping the file and checking for "inner" files with
names like:
File type Contents filename signature (regular expression)
----------------------------------------------------------------
DOCX {DOCX_CONTENTS_REGEX_STR}
PPTX {PPTX_CONTENTS_REGEX_STR}
XLSX {XLSX_CONTENTS_REGEX_STR}
- WARNING: it's possible for an OpenXML file to contain more than one of these.
If so, they may be mis-classified.
- If a file is not immediately readable as a zip, it uses Linux's "zip -FF" to
repair zip files with corrupted ends, and tries again.
- Having found valid-looking files, you can elect to move them elsewhere.
- As an additional and VERY DANGEROUS operation, you can elect to delete files
that this tool doesn't recognize. (Why? Because a 450Gb disk might produce
well in excess of 1.7Tb of candidate files; many will be false positives and
even the true positives will all be expanded to your file size limit, e.g.
50 Mb. You may have a problem with available disk space, so running this tool
regularly allows you to clear up the junk. Use the --run_every option to help
with this.)
"""
)
parser.add_argument(
"filename", nargs="+",
help="File(s) to check. You can also specify directores if you use "
"--recursive"
)
parser.add_argument(
"--recursive", action="store_true",
help="Allow search to descend recursively into any directories "
"encountered."
)
parser.add_argument(
"--skip_files", nargs="*", default=[],
help="File pattern(s) to skip. You can specify wildcards like '*.txt' "
"(but you will have to enclose that pattern in quotes under "
"UNIX-like operating systems). The basename of each file will be "
"tested against these filenames/patterns. Consider including "
"Scalpel's 'audit.txt'."
)
parser.add_argument(
"--filetypes", nargs="+", default=FILETYPES,
help=f"File types to check. Options: {FILETYPES}"
)
parser.add_argument(
"--move_to",
help="If the file is recognized as one of the specified file types, "
"move it to the directory specified here."
)
parser.add_argument(
"--delete_if_not_specified_file_type", action="store_true",
help="If a file is NOT recognized as one of the specified file types, "
"delete it. VERY DANGEROUS."
)
parser.add_argument(
"--run_repeatedly", type=int,
help="Run the tool repeatedly with a pause of <run_repeatedly> "
"seconds between runs. (For this to work well with the move/"
"delete options, you should specify one or more DIRECTORIES in "
"the 'filename' arguments, not files, and you will need the "
"--recursive option.)"
)
parser.add_argument(
"--nprocesses", type=int, default=multiprocessing.cpu_count(),
help="Specify the number of processes to run in parallel."
)
parser.add_argument(
"--verbose", action="store_true",
help="Verbose output"
)
parser.add_argument(
"--show_zip_output", action="store_true",
help="Verbose output from the external 'zip' tool"
)
args = parser.parse_args()
main_only_quicksetup_rootlogger(
level=logging.DEBUG if args.verbose else logging.INFO,
with_process_id=True
)
# Further argument checks
if args.move_to and not os.path.isdir(args.move_to):
raise ValueError(
f"Destination directory {args.move_to!r} is not a directory")
if not args.filetypes:
raise ValueError("No file type to scan for")
filetypes = [ft.lower() for ft in args.filetypes]
if any(ft not in FILETYPES for ft in filetypes):
raise ValueError(f"Invalid filetypes; choose from {FILETYPES}")
assert shutil.which("zip"), "Need 'zip' tool!"
# Repeated scanning loop
while True:
log.info("Starting scan.")
log.info("- Looking for filetypes {}", filetypes)
log.info("- Scanning files/directories {!r}{}",
args.filename,
" recursively" if args.recursive else "")
log.info("- Skipping files matching {!r}", args.skip_files)
log.info("- Using {} simultaneous processes", args.nprocesses)
if args.move_to:
log.info("- Moving target files to " + args.move_to)
if args.delete_if_not_specified_file_type:
log.info("- Deleting non-target files.")
# Iterate through files
pool = multiprocessing.Pool(processes=args.nprocesses)
for filename in gen_filenames(starting_filenames=args.filename,
recursive=args.recursive):
src_basename = os.path.basename(filename)
if any(fnmatch.fnmatch(src_basename, pattern)
for pattern in args.skip_files):
log.info("Skipping file as ordered: " + filename)
continue
exists, locked = exists_locked(filename)
if locked or not exists:
log.info("Skipping currently inaccessible file: " + filename)
continue
kwargs = {
'filename': filename,
'filetypes': filetypes,
'move_to': args.move_to,
'delete_if_not_specified_file_type':
args.delete_if_not_specified_file_type,
'show_zip_output': args.show_zip_output,
}
# log.critical("start")
pool.apply_async(process_file, [], kwargs)
# result = pool.apply_async(process_file, [], kwargs)
# result.get() # will re-raise any child exceptions
# ... but it waits for the process to complete! That's no help.
# log.critical("next")
# ... https://stackoverflow.com/questions/22094852/how-to-catch-exceptions-in-workers-in-multiprocessing # noqa
pool.close()
pool.join()
log.info("Finished scan.")
if args.run_repeatedly is None:
break
log.info("Sleeping for {} s...", args.run_repeatedly)
sleep(args.run_repeatedly)
if __name__ == '__main__':
main()
| |
# TNC Python interface
# @(#) $Jeannot: tnc.py,v 1.11 2005/01/28 18:27:31 js Exp $
# Copyright (c) 2004-2005, Jean-Sebastien Roy (js@jeannot.org)
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
TNC: A python interface to the TNC non-linear optimizer
TNC is a non-linear optimizer. To use it, you must provide a function to
minimize. The function must take one argument: the list of coordinates where to
evaluate the function; and it must return either a tuple, whose first element is the
value of the function, and whose second argument is the gradient of the function
(as a list of values); or None, to abort the minimization.
"""
from __future__ import division, print_function, absolute_import
from scipy.optimize import moduleTNC, approx_fprime
from .optimize import MemoizeJac, OptimizeResult, _check_unknown_options
from numpy import inf, array, zeros, asfarray
__all__ = ['fmin_tnc']
MSG_NONE = 0 # No messages
MSG_ITER = 1 # One line per iteration
MSG_INFO = 2 # Informational messages
MSG_VERS = 4 # Version info
MSG_EXIT = 8 # Exit reasons
MSG_ALL = MSG_ITER + MSG_INFO + MSG_VERS + MSG_EXIT
MSGS = {
MSG_NONE: "No messages",
MSG_ITER: "One line per iteration",
MSG_INFO: "Informational messages",
MSG_VERS: "Version info",
MSG_EXIT: "Exit reasons",
MSG_ALL: "All messages"
}
INFEASIBLE = -1 # Infeasible (low > up)
LOCALMINIMUM = 0 # Local minima reach (|pg| ~= 0)
FCONVERGED = 1 # Converged (|f_n-f_(n-1)| ~= 0)
XCONVERGED = 2 # Converged (|x_n-x_(n-1)| ~= 0)
MAXFUN = 3 # Max. number of function evaluations reach
LSFAIL = 4 # Linear search failed
CONSTANT = 5 # All lower bounds are equal to the upper bounds
NOPROGRESS = 6 # Unable to progress
USERABORT = 7 # User requested end of minimization
RCSTRINGS = {
INFEASIBLE: "Infeasible (low > up)",
LOCALMINIMUM: "Local minima reach (|pg| ~= 0)",
FCONVERGED: "Converged (|f_n-f_(n-1)| ~= 0)",
XCONVERGED: "Converged (|x_n-x_(n-1)| ~= 0)",
MAXFUN: "Max. number of function evaluations reach",
LSFAIL: "Linear search failed",
CONSTANT: "All lower bounds are equal to the upper bounds",
NOPROGRESS: "Unable to progress",
USERABORT: "User requested end of minimization"
}
# Changes to interface made by Travis Oliphant, Apr. 2004 for inclusion in
# SciPy
def fmin_tnc(func, x0, fprime=None, args=(), approx_grad=0,
bounds=None, epsilon=1e-8, scale=None, offset=None,
messages=MSG_ALL, maxCGit=-1, maxfun=None, eta=-1,
stepmx=0, accuracy=0, fmin=0, ftol=-1, xtol=-1, pgtol=-1,
rescale=-1, disp=None, callback=None):
"""
Minimize a function with variables subject to bounds, using
gradient information in a truncated Newton algorithm. This
method wraps a C implementation of the algorithm.
Parameters
----------
func : callable ``func(x, *args)``
Function to minimize. Must do one of:
1. Return f and g, where f is the value of the function and g its
gradient (a list of floats).
2. Return the function value but supply gradient function
separately as `fprime`.
3. Return the function value and set ``approx_grad=True``.
If the function returns None, the minimization
is aborted.
x0 : array_like
Initial estimate of minimum.
fprime : callable ``fprime(x, *args)``, optional
Gradient of `func`. If None, then either `func` must return the
function value and the gradient (``f,g = func(x, *args)``)
or `approx_grad` must be True.
args : tuple, optional
Arguments to pass to function.
approx_grad : bool, optional
If true, approximate the gradient numerically.
bounds : list, optional
(min, max) pairs for each element in x0, defining the
bounds on that parameter. Use None or +/-inf for one of
min or max when there is no bound in that direction.
epsilon : float, optional
Used if approx_grad is True. The stepsize in a finite
difference approximation for fprime.
scale : array_like, optional
Scaling factors to apply to each variable. If None, the
factors are up-low for interval bounded variables and
1+|x| for the others. Defaults to None.
offset : array_like, optional
Value to subtract from each variable. If None, the
offsets are (up+low)/2 for interval bounded variables
and x for the others.
messages : int, optional
Bit mask used to select messages display during
minimization values defined in the MSGS dict. Defaults to
MGS_ALL.
disp : int, optional
Integer interface to messages. 0 = no message, 5 = all messages
maxCGit : int, optional
Maximum number of hessian*vector evaluations per main
iteration. If maxCGit == 0, the direction chosen is
-gradient if maxCGit < 0, maxCGit is set to
max(1,min(50,n/2)). Defaults to -1.
maxfun : int, optional
Maximum number of function evaluation. if None, maxfun is
set to max(100, 10*len(x0)). Defaults to None.
eta : float, optional
Severity of the line search. if < 0 or > 1, set to 0.25.
Defaults to -1.
stepmx : float, optional
Maximum step for the line search. May be increased during
call. If too small, it will be set to 10.0. Defaults to 0.
accuracy : float, optional
Relative precision for finite difference calculations. If
<= machine_precision, set to sqrt(machine_precision).
Defaults to 0.
fmin : float, optional
Minimum function value estimate. Defaults to 0.
ftol : float, optional
Precision goal for the value of f in the stoping criterion.
If ftol < 0.0, ftol is set to 0.0 defaults to -1.
xtol : float, optional
Precision goal for the value of x in the stopping
criterion (after applying x scaling factors). If xtol <
0.0, xtol is set to sqrt(machine_precision). Defaults to
-1.
pgtol : float, optional
Precision goal for the value of the projected gradient in
the stopping criterion (after applying x scaling factors).
If pgtol < 0.0, pgtol is set to 1e-2 * sqrt(accuracy).
Setting it to 0.0 is not recommended. Defaults to -1.
rescale : float, optional
Scaling factor (in log10) used to trigger f value
rescaling. If 0, rescale at each iteration. If a large
value, never rescale. If < 0, rescale is set to 1.3.
callback : callable, optional
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
Returns
-------
x : ndarray
The solution.
nfeval : int
The number of function evaluations.
rc : int
Return code as defined in the RCSTRINGS dict.
See also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See the 'TNC' `method` in particular.
Notes
-----
The underlying algorithm is truncated Newton, also called
Newton Conjugate-Gradient. This method differs from
scipy.optimize.fmin_ncg in that
1. It wraps a C implementation of the algorithm
2. It allows each variable to be given an upper and lower bound.
The algorithm incoporates the bound constraints by determining
the descent direction as in an unconstrained truncated Newton,
but never taking a step-size large enough to leave the space
of feasible x's. The algorithm keeps track of a set of
currently active constraints, and ignores them when computing
the minimum allowable step size. (The x's associated with the
active constraint are kept fixed.) If the maximum allowable
step size is zero then a new constraint is added. At the end
of each iteration one of the constraints may be deemed no
longer active and removed. A constraint is considered
no longer active is if it is currently active
but the gradient for that variable points inward from the
constraint. The specific constraint removed is the one
associated with the variable of largest index whose
constraint is no longer active.
References
----------
Wright S., Nocedal J. (2006), 'Numerical Optimization'
Nash S.G. (1984), "Newton-Type Minimization Via the Lanczos Method",
SIAM Journal of Numerical Analysis 21, pp. 770-778
"""
# handle fprime/approx_grad
if approx_grad:
fun = func
jac = None
elif fprime is None:
fun = MemoizeJac(func)
jac = fun.derivative
else:
fun = func
jac = fprime
if disp is not None: # disp takes precedence over messages
mesg_num = disp
else:
mesg_num = {0:MSG_NONE, 1:MSG_ITER, 2:MSG_INFO, 3:MSG_VERS,
4:MSG_EXIT, 5:MSG_ALL}.get(messages, MSG_ALL)
# build options
opts = {'eps': epsilon,
'scale': scale,
'offset': offset,
'mesg_num': mesg_num,
'maxCGit': maxCGit,
'maxiter': maxfun,
'eta': eta,
'stepmx': stepmx,
'accuracy': accuracy,
'minfev': fmin,
'ftol': ftol,
'xtol': xtol,
'gtol': pgtol,
'rescale': rescale,
'disp': False}
res = _minimize_tnc(fun, x0, args, jac, bounds, callback=callback, **opts)
return res['x'], res['nfev'], res['status']
def _minimize_tnc(fun, x0, args=(), jac=None, bounds=None,
eps=1e-8, scale=None, offset=None, mesg_num=None,
maxCGit=-1, maxiter=None, eta=-1, stepmx=0, accuracy=0,
minfev=0, ftol=-1, xtol=-1, gtol=-1, rescale=-1, disp=False,
callback=None, **unknown_options):
"""
Minimize a scalar function of one or more variables using a truncated
Newton (TNC) algorithm.
Options
-------
eps : float
Step size used for numerical approximation of the jacobian.
scale : list of floats
Scaling factors to apply to each variable. If None, the
factors are up-low for interval bounded variables and
1+|x] fo the others. Defaults to None
offset : float
Value to subtract from each variable. If None, the
offsets are (up+low)/2 for interval bounded variables
and x for the others.
disp : bool
Set to True to print convergence messages.
maxCGit : int
Maximum number of hessian*vector evaluations per main
iteration. If maxCGit == 0, the direction chosen is
-gradient if maxCGit < 0, maxCGit is set to
max(1,min(50,n/2)). Defaults to -1.
maxiter : int
Maximum number of function evaluation. if None, `maxiter` is
set to max(100, 10*len(x0)). Defaults to None.
eta : float
Severity of the line search. if < 0 or > 1, set to 0.25.
Defaults to -1.
stepmx : float
Maximum step for the line search. May be increased during
call. If too small, it will be set to 10.0. Defaults to 0.
accuracy : float
Relative precision for finite difference calculations. If
<= machine_precision, set to sqrt(machine_precision).
Defaults to 0.
minfev : float
Minimum function value estimate. Defaults to 0.
ftol : float
Precision goal for the value of f in the stoping criterion.
If ftol < 0.0, ftol is set to 0.0 defaults to -1.
xtol : float
Precision goal for the value of x in the stopping
criterion (after applying x scaling factors). If xtol <
0.0, xtol is set to sqrt(machine_precision). Defaults to
-1.
gtol : float
Precision goal for the value of the projected gradient in
the stopping criterion (after applying x scaling factors).
If gtol < 0.0, gtol is set to 1e-2 * sqrt(accuracy).
Setting it to 0.0 is not recommended. Defaults to -1.
rescale : float
Scaling factor (in log10) used to trigger f value
rescaling. If 0, rescale at each iteration. If a large
value, never rescale. If < 0, rescale is set to 1.3.
"""
_check_unknown_options(unknown_options)
epsilon = eps
maxfun = maxiter
fmin = minfev
pgtol = gtol
x0 = asfarray(x0).flatten()
n = len(x0)
if bounds is None:
bounds = [(None,None)] * n
if len(bounds) != n:
raise ValueError('length of x0 != length of bounds')
if mesg_num is not None:
messages = {0:MSG_NONE, 1:MSG_ITER, 2:MSG_INFO, 3:MSG_VERS,
4:MSG_EXIT, 5:MSG_ALL}.get(mesg_num, MSG_ALL)
elif disp:
messages = MSG_ALL
else:
messages = MSG_NONE
if jac is None:
def func_and_grad(x):
f = fun(x, *args)
g = approx_fprime(x, fun, epsilon, *args)
return f, g
else:
def func_and_grad(x):
f = fun(x, *args)
g = jac(x, *args)
return f, g
"""
low, up : the bounds (lists of floats)
if low is None, the lower bounds are removed.
if up is None, the upper bounds are removed.
low and up defaults to None
"""
low = zeros(n)
up = zeros(n)
for i in range(n):
if bounds[i] is None:
l, u = -inf, inf
else:
l,u = bounds[i]
if l is None:
low[i] = -inf
else:
low[i] = l
if u is None:
up[i] = inf
else:
up[i] = u
if scale is None:
scale = array([])
if offset is None:
offset = array([])
if maxfun is None:
maxfun = max(100, 10*len(x0))
rc, nf, nit, x = moduleTNC.minimize(func_and_grad, x0, low, up, scale,
offset, messages, maxCGit, maxfun,
eta, stepmx, accuracy, fmin, ftol,
xtol, pgtol, rescale, callback)
funv, jacv = func_and_grad(x)
return OptimizeResult(x=x, fun=funv, jac=jacv, nfev=nf, nit=nit, status=rc,
message=RCSTRINGS[rc], success=(-1 < rc < 3))
if __name__ == '__main__':
# Examples for TNC
def example():
print("Example")
# A function to minimize
def function(x):
f = pow(x[0],2.0)+pow(abs(x[1]),3.0)
g = [0,0]
g[0] = 2.0*x[0]
g[1] = 3.0*pow(abs(x[1]),2.0)
if x[1] < 0:
g[1] = -g[1]
return f, g
# Optimizer call
x, nf, rc = fmin_tnc(function, [-7, 3], bounds=([-10, 1], [10, 10]))
print("After", nf, "function evaluations, TNC returned:", RCSTRINGS[rc])
print("x =", x)
print("exact value = [0, 1]")
print()
example()
| |
# Copyright 2012-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .. import coredata
from ..mesonlib import version_compare
from .c import CCompiler, VisualStudioCCompiler
from .compilers import (
GCC_MINGW,
gnu_winlibs,
msvc_winlibs,
ClangCompiler,
GnuCompiler,
IntelCompiler,
)
class CPPCompiler(CCompiler):
def __init__(self, exelist, version, is_cross, exe_wrap):
# If a child ObjCPP class has already set it, don't set it ourselves
if not hasattr(self, 'language'):
self.language = 'cpp'
CCompiler.__init__(self, exelist, version, is_cross, exe_wrap)
def get_display_language(self):
return 'C++'
def get_no_stdinc_args(self):
return ['-nostdinc++']
def sanity_check(self, work_dir, environment):
code = 'class breakCCompiler;int main(int argc, char **argv) { return 0; }\n'
return self.sanity_check_impl(work_dir, environment, 'sanitycheckcpp.cc', code)
def get_compiler_check_args(self):
# -fpermissive allows non-conforming code to compile which is necessary
# for many C++ checks. Particularly, the has_header_symbol check is
# too strict without this and always fails.
return super().get_compiler_check_args() + ['-fpermissive']
def has_header_symbol(self, hname, symbol, prefix, env, extra_args=None, dependencies=None):
# Check if it's a C-like symbol
if super().has_header_symbol(hname, symbol, prefix, env, extra_args, dependencies):
return True
# Check if it's a class or a template
if extra_args is None:
extra_args = []
fargs = {'prefix': prefix, 'header': hname, 'symbol': symbol}
t = '''{prefix}
#include <{header}>
using {symbol};
int main () {{ return 0; }}'''
return self.compiles(t.format(**fargs), env, extra_args, dependencies)
class ClangCPPCompiler(ClangCompiler, CPPCompiler):
def __init__(self, exelist, version, cltype, is_cross, exe_wrapper=None):
CPPCompiler.__init__(self, exelist, version, is_cross, exe_wrapper)
ClangCompiler.__init__(self, cltype)
default_warn_args = ['-Wall', '-Winvalid-pch', '-Wnon-virtual-dtor']
self.warn_args = {'1': default_warn_args,
'2': default_warn_args + ['-Wextra'],
'3': default_warn_args + ['-Wextra', '-Wpedantic']}
def get_options(self):
return {'cpp_std': coredata.UserComboOption('cpp_std', 'C++ language standard to use',
['none', 'c++03', 'c++11', 'c++14', 'c++1z',
'gnu++11', 'gnu++14', 'gnu++1z'],
'none')}
def get_option_compile_args(self, options):
args = []
std = options['cpp_std']
if std.value != 'none':
args.append('-std=' + std.value)
return args
def get_option_link_args(self, options):
return []
class GnuCPPCompiler(GnuCompiler, CPPCompiler):
def __init__(self, exelist, version, gcc_type, is_cross, exe_wrap, defines):
CPPCompiler.__init__(self, exelist, version, is_cross, exe_wrap)
GnuCompiler.__init__(self, gcc_type, defines)
default_warn_args = ['-Wall', '-Winvalid-pch', '-Wnon-virtual-dtor']
self.warn_args = {'1': default_warn_args,
'2': default_warn_args + ['-Wextra'],
'3': default_warn_args + ['-Wextra', '-Wpedantic']}
def get_options(self):
opts = {'cpp_std': coredata.UserComboOption('cpp_std', 'C++ language standard to use',
['none', 'c++03', 'c++11', 'c++14', 'c++1z',
'gnu++03', 'gnu++11', 'gnu++14', 'gnu++1z'],
'none'),
'cpp_debugstl': coredata.UserBooleanOption('cpp_debugstl',
'STL debug mode',
False)}
if self.gcc_type == GCC_MINGW:
opts.update({
'cpp_winlibs': coredata.UserStringArrayOption('cpp_winlibs', 'Standard Win libraries to link against',
gnu_winlibs), })
return opts
def get_option_compile_args(self, options):
args = []
std = options['cpp_std']
if std.value != 'none':
args.append('-std=' + std.value)
if options['cpp_debugstl'].value:
args.append('-D_GLIBCXX_DEBUG=1')
return args
def get_option_link_args(self, options):
if self.gcc_type == GCC_MINGW:
return options['cpp_winlibs'].value[:]
return []
class IntelCPPCompiler(IntelCompiler, CPPCompiler):
def __init__(self, exelist, version, icc_type, is_cross, exe_wrap):
CPPCompiler.__init__(self, exelist, version, is_cross, exe_wrap)
IntelCompiler.__init__(self, icc_type)
self.lang_header = 'c++-header'
default_warn_args = ['-Wall', '-w3', '-diag-disable:remark',
'-Wpch-messages', '-Wnon-virtual-dtor']
self.warn_args = {'1': default_warn_args,
'2': default_warn_args + ['-Wextra'],
'3': default_warn_args + ['-Wextra', '-Wpedantic']}
def get_options(self):
c_stds = []
g_stds = ['gnu++98']
if version_compare(self.version, '>=15.0.0'):
c_stds += ['c++11', 'c++14']
g_stds += ['gnu++11']
if version_compare(self.version, '>=16.0.0'):
c_stds += ['c++17']
if version_compare(self.version, '>=17.0.0'):
g_stds += ['gnu++14']
opts = {'cpp_std': coredata.UserComboOption('cpp_std', 'C++ language standard to use',
['none'] + c_stds + g_stds,
'none'),
'cpp_debugstl': coredata.UserBooleanOption('cpp_debugstl',
'STL debug mode',
False)}
return opts
def get_option_compile_args(self, options):
args = []
std = options['cpp_std']
if std.value != 'none':
args.append('-std=' + std.value)
if options['cpp_debugstl'].value:
args.append('-D_GLIBCXX_DEBUG=1')
return args
def get_option_link_args(self, options):
return []
def has_multi_arguments(self, args, env):
return super().has_multi_arguments(args + ['-diag-error', '10006'], env)
class VisualStudioCPPCompiler(VisualStudioCCompiler, CPPCompiler):
def __init__(self, exelist, version, is_cross, exe_wrap, is_64):
self.language = 'cpp'
VisualStudioCCompiler.__init__(self, exelist, version, is_cross, exe_wrap, is_64)
self.base_options = ['b_pch'] # FIXME add lto, pgo and the like
def get_options(self):
return {'cpp_eh': coredata.UserComboOption('cpp_eh',
'C++ exception handling type.',
['none', 'a', 's', 'sc'],
'sc'),
'cpp_winlibs': coredata.UserStringArrayOption('cpp_winlibs',
'Windows libs to link against.',
msvc_winlibs)
}
def get_option_compile_args(self, options):
args = []
std = options['cpp_eh']
if std.value != 'none':
args.append('/EH' + std.value)
return args
def get_option_link_args(self, options):
return options['cpp_winlibs'].value[:]
def get_compiler_check_args(self):
# Visual Studio C++ compiler doesn't support -fpermissive,
# so just use the plain C args.
return super(VisualStudioCCompiler, self).get_compiler_check_args()
| |
#!/usr/bin/env python
"""
conference.py -- Udacity conference server-side Python App Engine API;
uses Google Cloud Endpoints
$Id: conference.py,v 1.25 2014/05/24 23:42:19 wesc Exp wesc $
created by wesc on 2014 apr 21
"""
__author__ = 'wesc+api@google.com (Wesley Chun)'
from datetime import datetime
import endpoints
from protorpc import messages
from protorpc import message_types
from protorpc import remote
from google.appengine.api import memcache
from google.appengine.api import taskqueue
from google.appengine.ext import ndb
from models import ConflictException
from models import Profile
from models import ProfileMiniForm
from models import ProfileForm
from models import StringMessage
from models import BooleanMessage
from models import Conference
from models import ConferenceForm
from models import ConferenceForms
from models import ConferenceQueryForm
from models import ConferenceQueryForms
from models import TeeShirtSize
from models import Session
from models import SessionForm
from models import SessionForms
from models import SessionQueryForm
from models import SessionQueryForms
from settings import WEB_CLIENT_ID
from settings import ANDROID_CLIENT_ID
from settings import IOS_CLIENT_ID
from settings import ANDROID_AUDIENCE
from utils import getUserId
EMAIL_SCOPE = endpoints.EMAIL_SCOPE
API_EXPLORER_CLIENT_ID = endpoints.API_EXPLORER_CLIENT_ID
MEMCACHE_ANNOUNCEMENTS_KEY = "RECENT_ANNOUNCEMENTS"
MEMCACHE_FEATURED_SPEAKER_KEY = "FEATURED_SPEAKER"
ANNOUNCEMENT_TPL = ('Last chance to attend! The following conferences '
'are nearly sold out: %s')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
DEFAULTS = {
"city": "Default City",
"maxAttendees": 0,
"seatsAvailable": 0,
"topics": ["Default", "Topic"],
}
SESS_DEFAULTS = {
"speaker": "TBD",
"duration": 45,
"maxAttendees": 0,
"typeOfSession": "Workshop",
}
OPERATORS = {
'EQ': '=',
'GT': '>',
'GTEQ': '>=',
'LT': '<',
'LTEQ': '<=',
'NE': '!='
}
FIELDS = {
'CITY': 'city',
'TOPIC': 'topics',
'MONTH': 'month',
'MAX_ATTENDEES': 'maxAttendees',
}
CONF_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeConferenceKey=messages.StringField(1),
)
CONF_POST_REQUEST = endpoints.ResourceContainer(
ConferenceForm,
websafeConferenceKey=messages.StringField(1),
)
# - - - Session Requests - - - - - - - - - - - - - - - - - - - - - - -
SESS_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeKey=messages.StringField(1),
)
SESS_POST_REQUEST = endpoints.ResourceContainer(
SessionForm,
websafeConferenceKey=messages.StringField(1),
)
SESS_CONF_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeConferenceKey=messages.StringField(1),
)
SESS_TYPE_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeConferenceKey=messages.StringField(1),
typeOfSession=messages.StringField(2),
)
SESS_SPEAKER_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
speaker=messages.StringField(1),
)
WISHLIST_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeKey=messages.StringField(1),
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@endpoints.api(
name='conference',
version='v1',
audiences=[ANDROID_AUDIENCE],
allowed_client_ids=[
WEB_CLIENT_ID,
API_EXPLORER_CLIENT_ID,
ANDROID_CLIENT_ID,
IOS_CLIENT_ID],
scopes=[EMAIL_SCOPE])
class ConferenceApi(remote.Service):
"""Conference API v0.1"""
# - - - Conference objects - - - - - - - - - - - - - - - - -
def _copyConferenceToForm(self, conf, displayName):
"""Copy relevant fields from Conference to ConferenceForm."""
cf = ConferenceForm()
for field in cf.all_fields():
if hasattr(conf, field.name):
# convert Date to date string; just copy others
if field.name.endswith('Date'):
setattr(cf, field.name, str(getattr(conf, field.name)))
else:
setattr(cf, field.name, getattr(conf, field.name))
elif field.name == "websafeKey":
setattr(cf, field.name, conf.key.urlsafe())
if displayName:
setattr(cf, 'organizerDisplayName', displayName)
cf.check_initialized()
return cf
def _createConferenceObject(self, request):
"""Create or update Conference object, returning ConferenceForm/request."""
# preload necessary data items
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
if not request.name:
raise endpoints.BadRequestException(
"Conference 'name' field required")
# copy ConferenceForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name)
for field in request.all_fields()}
del data['websafeKey']
del data['organizerDisplayName']
# add default values for those missing (both data model & outbound
# Message)
for df in DEFAULTS:
if data[df] in (None, []):
data[df] = DEFAULTS[df]
setattr(request, df, DEFAULTS[df])
# convert dates from strings to Date objects; set month based on
# start_date
if data['startDate']:
data['startDate'] = datetime.strptime(
data['startDate'][:10], "%Y-%m-%d").date()
data['month'] = data['startDate'].month
else:
data['month'] = 0
if data['endDate']:
data['endDate'] = datetime.strptime(
data['endDate'][:10], "%Y-%m-%d").date()
# set seatsAvailable to be same as maxAttendees on creation
if data["maxAttendees"] > 0:
data["seatsAvailable"] = data["maxAttendees"]
# generate Profile Key based on user ID and Conference
# ID based on Profile key get Conference key from ID
p_key = ndb.Key(Profile, user_id)
c_id = Conference.allocate_ids(size=1, parent=p_key)[0]
c_key = ndb.Key(Conference, c_id, parent=p_key)
data['key'] = c_key
data['organizerUserId'] = request.organizerUserId = user_id
# create Conference, send email to organizer confirming
# creation of Conference & return (modified) ConferenceForm
Conference(**data).put()
taskqueue.add(params={'email': user.email(),
'conferenceInfo': repr(request)},
url='/tasks/send_confirmation_email'
)
return request
@ndb.transactional()
def _updateConferenceObject(self, request):
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
# copy ConferenceForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name)
for field in request.all_fields()}
# update existing conference
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
# check that conference exists
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' %
request.websafeConferenceKey)
# check that user is owner
if user_id != conf.organizerUserId:
raise endpoints.ForbiddenException(
'Only the owner can update the conference.')
# Not getting all the fields, so don't create a new object; just
# copy relevant fields from ConferenceForm to Conference object
for field in request.all_fields():
data = getattr(request, field.name)
# only copy fields where we get data
if data not in (None, []):
# special handling for dates (convert string to Date)
if field.name in ('startDate', 'endDate'):
data = datetime.strptime(data, "%Y-%m-%d").date()
if field.name == 'startDate':
conf.month = data.month
# write to Conference object
setattr(conf, field.name, data)
conf.put()
prof = ndb.Key(Profile, user_id).get()
return self._copyConferenceToForm(conf, getattr(prof, 'displayName'))
@endpoints.method(ConferenceForm, ConferenceForm, path='conference',
http_method='POST', name='createConference')
def createConference(self, request):
"""Create new conference."""
return self._createConferenceObject(request)
@endpoints.method(CONF_POST_REQUEST, ConferenceForm,
path='conference/{websafeConferenceKey}',
http_method='PUT', name='updateConference')
def updateConference(self, request):
"""Update conference w/provided fields & return w/updated info."""
return self._updateConferenceObject(request)
@endpoints.method(CONF_GET_REQUEST, ConferenceForm,
path='conference/{websafeConferenceKey}',
http_method='GET', name='getConference')
def getConference(self, request):
"""Return requested conference (by websafeConferenceKey)."""
# get Conference object from request; bail if not found
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' %
request.websafeConferenceKey)
prof = conf.key.parent().get()
# return ConferenceForm
return self._copyConferenceToForm(conf, getattr(prof, 'displayName'))
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='getConferencesCreated',
http_method='POST', name='getConferencesCreated')
def getConferencesCreated(self, request):
"""Return conferences created by user."""
# make sure user is authed
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
# create ancestor query for all key matches for this user
confs = Conference.query(ancestor=ndb.Key(Profile, user_id))
prof = ndb.Key(Profile, user_id).get()
# return set of ConferenceForm objects per Conference
return ConferenceForms(
items=[
self._copyConferenceToForm(
conf,
getattr(
prof,
'displayName')) for conf in confs])
def _getQuery(self, request):
"""Return formatted query from the submitted filters."""
q = Conference.query()
inequality_filter, filters = self._formatFilters(request.filters)
# If exists, sort on inequality filter first
if not inequality_filter:
q = q.order(Conference.name)
else:
q = q.order(ndb.GenericProperty(inequality_filter))
q = q.order(Conference.name)
for filtr in filters:
if filtr["field"] in ["month", "maxAttendees"]:
filtr["value"] = int(filtr["value"])
formatted_query = ndb.query.FilterNode(
filtr["field"], filtr["operator"], filtr["value"])
q = q.filter(formatted_query)
return q
def _formatFilters(self, filters):
"""Parse, check validity and format user supplied filters."""
formatted_filters = []
inequality_field = None
for f in filters:
filtr = {field.name: getattr(f, field.name)
for field in f.all_fields()}
try:
filtr["field"] = FIELDS[filtr["field"]]
filtr["operator"] = OPERATORS[filtr["operator"]]
except KeyError:
raise endpoints.BadRequestException(
"Filter contains invalid field or operator.")
# Every operation except "=" is an inequality
if filtr["operator"] != "=":
# check if inequality operation has been used in previous filters
# disallow the filter if inequality was performed on a different field before
# track the field on which the inequality operation is
# performed
if inequality_field and inequality_field != filtr["field"]:
raise endpoints.BadRequestException(
"Inequality filter is allowed on only one field.")
else:
inequality_field = filtr["field"]
formatted_filters.append(filtr)
return (inequality_field, formatted_filters)
@endpoints.method(ConferenceQueryForms, ConferenceForms,
path='queryConferences',
http_method='POST',
name='queryConferences')
def queryConferences(self, request):
"""Query for conferences."""
conferences = self._getQuery(request)
# need to fetch organiser displayName from profiles
# get all keys and use get_multi for speed
organisers = [(ndb.Key(Profile, conf.organizerUserId))
for conf in conferences]
profiles = ndb.get_multi(organisers)
# put display names in a dict for easier fetching
names = {}
for profile in profiles:
names[profile.key.id()] = profile.displayName
# return individual ConferenceForm object per Conference
return ConferenceForms(
items=[
self._copyConferenceToForm(
conf, names[
conf.organizerUserId]) for conf in conferences])
# - - - Profile objects - - - - - - - - - - - - - - - - - - -
def _copyProfileToForm(self, prof):
"""Copy relevant fields from Profile to ProfileForm."""
# copy relevant fields from Profile to ProfileForm
pf = ProfileForm()
for field in pf.all_fields():
if hasattr(prof, field.name):
# convert t-shirt string to Enum; just copy others
if field.name == 'teeShirtSize':
setattr(
pf, field.name, getattr(
TeeShirtSize, getattr(
prof, field.name)))
else:
setattr(pf, field.name, getattr(prof, field.name))
pf.check_initialized()
return pf
def _getProfileFromUser(self):
"""Return user Profile from datastore, creating new one if non-existent."""
# make sure user is authed
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
# get Profile from datastore
user_id = getUserId(user)
p_key = ndb.Key(Profile, user_id)
profile = p_key.get()
# create new Profile if not there
if not profile:
profile = Profile(
key=p_key,
displayName=user.nickname(),
mainEmail=user.email(),
teeShirtSize=str(TeeShirtSize.NOT_SPECIFIED),
)
profile.put()
return profile # return Profile
def _doProfile(self, save_request=None):
"""Get user Profile and return to user, possibly updating it first."""
# get user Profile
prof = self._getProfileFromUser()
# if saveProfile(), process user-modifyable fields
if save_request:
for field in ('displayName', 'teeShirtSize'):
if hasattr(save_request, field):
val = getattr(save_request, field)
if val:
setattr(prof, field, str(val))
# if field == 'teeShirtSize':
# setattr(prof, field, str(val).upper())
# else:
# setattr(prof, field, val)
prof.put()
# return ProfileForm
return self._copyProfileToForm(prof)
@endpoints.method(message_types.VoidMessage, ProfileForm,
path='profile', http_method='GET', name='getProfile')
def getProfile(self, request):
"""Return user profile."""
return self._doProfile()
@endpoints.method(ProfileMiniForm, ProfileForm,
path='profile', http_method='POST', name='saveProfile')
def saveProfile(self, request):
"""Update & return user profile."""
return self._doProfile(request)
# - - - Announcements - - - - - - - - - - - - - - - - - - - -
@staticmethod
def _cacheAnnouncement():
"""Create Announcement & assign to memcache; used by
memcache cron job & putAnnouncement().
"""
confs = Conference.query(ndb.AND(
Conference.seatsAvailable <= 5,
Conference.seatsAvailable > 0)
).fetch(projection=[Conference.name])
if confs:
# If there are almost sold out conferences,
# format announcement and set it in memcache
announcement = ANNOUNCEMENT_TPL % (
', '.join(conf.name for conf in confs))
memcache.set(MEMCACHE_ANNOUNCEMENTS_KEY, announcement)
else:
# If there are no sold out conferences,
# delete the memcache announcements entry
announcement = ""
memcache.delete(MEMCACHE_ANNOUNCEMENTS_KEY)
return announcement
@endpoints.method(message_types.VoidMessage, StringMessage,
path='conference/announcement/get',
http_method='GET', name='getAnnouncement')
def getAnnouncement(self, request):
"""Return Announcement from memcache."""
return StringMessage(data=memcache.get(
MEMCACHE_ANNOUNCEMENTS_KEY) or "")
# - - - Registration - - - - - - - - - - - - - - - - - - - -
@ndb.transactional(xg=True)
def _conferenceRegistration(self, request, reg=True):
"""Register or unregister user for selected conference."""
retval = None
prof = self._getProfileFromUser() # get user Profile
# check if conf exists given websafeConfKey
# get conference; check that it exists
wsck = request.websafeConferenceKey
conf = ndb.Key(urlsafe=wsck).get()
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % wsck)
# register
if reg:
# check if user already registered otherwise add
if wsck in prof.conferenceKeysToAttend:
raise ConflictException(
"You have already registered for this conference")
# check if seats avail
if conf.seatsAvailable <= 0:
raise ConflictException(
"There are no seats available.")
# register user, take away one seat
prof.conferenceKeysToAttend.append(wsck)
conf.seatsAvailable -= 1
retval = True
# unregister
else:
# check if user already registered
if wsck in prof.conferenceKeysToAttend:
# unregister user, add back one seat
prof.conferenceKeysToAttend.remove(wsck)
conf.seatsAvailable += 1
retval = True
else:
retval = False
# write things back to the datastore & return
prof.put()
conf.put()
return BooleanMessage(data=retval)
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='conferences/attending',
http_method='GET', name='getConferencesToAttend')
def getConferencesToAttend(self, request):
"""Get list of conferences that user has registered for."""
prof = self._getProfileFromUser() # get user Profile
conf_keys = [ndb.Key(urlsafe=wsck)
for wsck in prof.conferenceKeysToAttend]
conferences = ndb.get_multi(conf_keys)
# get organizers
organisers = [ndb.Key(Profile, conf.organizerUserId)
for conf in conferences]
profiles = ndb.get_multi(organisers)
# put display names in a dict for easier fetching
names = {}
for profile in profiles:
names[profile.key.id()] = profile.displayName
# return set of ConferenceForm objects per Conference
return ConferenceForms(
items=[
self._copyConferenceToForm(
conf, names[
conf.organizerUserId]) for conf in conferences])
@endpoints.method(CONF_GET_REQUEST, BooleanMessage,
path='conference/{websafeConferenceKey}',
http_method='POST', name='registerForConference')
def registerForConference(self, request):
"""Register user for selected conference."""
return self._conferenceRegistration(request)
@endpoints.method(CONF_GET_REQUEST, BooleanMessage,
path='conference/{websafeConferenceKey}',
http_method='DELETE', name='unregisterFromConference')
def unregisterFromConference(self, request):
"""Unregister user for selected conference."""
return self._conferenceRegistration(request, reg=False)
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='filterPlayground',
http_method='GET', name='filterPlayground')
def filterPlayground(self, request):
"""Filter Playground"""
q = Conference.query()
# field = "city"
# operator = "="
# value = "London"
# f = ndb.query.FilterNode(field, operator, value)
# q = q.filter(f)
q = q.filter(Conference.city == "London")
q = q.filter(Conference.topics == "Medical Innovations")
q = q.filter(Conference.month == 6)
return ConferenceForms(
items=[self._copyConferenceToForm(conf, "") for conf in q]
)
# - - - Sessions - - - - - - - - - - - - - - - - - - - -
# - Based off of Conference Objects: lines 149-390
def _copySessionToForm(self, sess, conf):
"""Copy relevant fields from Session to SessionForm."""
sf = SessionForm()
for field in sf.all_fields():
if hasattr(sess, field.name):
# convert Date to date string; just copy others
if field.name.endswith('date'):
setattr(sf, field.name, str(getattr(sess, field.name)))
elif field.name.endswith('Time'):
setattr(sf, field.name, str(getattr(sess, field.name)))
else:
setattr(sf, field.name, getattr(sess, field.name))
elif field.name == "websafeKey":
setattr(sf, field.name, sess.key.urlsafe())
if conf:
setattr(sf, 'conference', conf)
sf.check_initialized()
return sf
def _createSessionObject(self, request):
"""Create Session object, returning SessionForm/request."""
# preload necessary data items
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
if not request.name:
raise endpoints.BadRequestException(
"Session 'name' field required")
# fetch conference
conf_key = ndb.Key(urlsafe=request.websafeConferenceKey)
conf = conf_key.get()
# check existence and ownership of conference
if not conf:
raise endpoints.NotFoundException(
'No conference found for key: %s' %
request.websafeConferenceKey)
if user_id != conf.organizerUserId:
raise endpoints.ForbiddenException(
'Only the organizer can add sessions.')
# copy SessionForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name)
for field in request.all_fields()}
# delete, just in case
del data['conference']
del data['websafeKey']
del data['websafeConferenceKey']
# add default values for those missing (both data model & outbound
# Message)
for df in SESS_DEFAULTS:
if data[df] in (None, []):
data[df] = SESS_DEFAULTS[df]
setattr(request, df, SESS_DEFAULTS[df])
# convert dates from strings to Date objects
if data['date']:
date = data['date'][:10]
data['date'] = datetime.strptime(date, "%Y-%m-%d").date()
# convert start time
if data['startTime']:
startTime = data['startTime'][:5]
data['startTime'] = datetime.strptime(startTime, "%H:%M").time()
# normalize date input
if data['typeOfSession']:
data['typeOfSession'] = data['typeOfSession'].lower()
# generate Conf Key based on Conference
# ID based on Conf key get Session key from ID
sess_id = Session.allocate_ids(size=1, parent=conf_key)[0]
sess_key = ndb.Key(Session, sess_id, parent=conf_key)
data['key'] = sess_key
# creation of Session & return (modified) SessionForm
Session(**data).put()
# check if featured speaker
taskqueue.add(params={'speaker': data['speaker']},
url='/tasks/set_featured_speaker'
)
return self._copySessionToForm(sess_key.get(), conf.name)
@endpoints.method(
SESS_POST_REQUEST,
SessionForm,
path='session/{websafeConferenceKey}',
http_method='POST',
name='createSession')
def createSession(self, request):
"""Create new session."""
return self._createSessionObject(request)
@endpoints.method(SESS_GET_REQUEST, SessionForm,
path='session/{websafeKey}',
http_method='GET', name='getSession')
def getSession(self, request):
"""Return requested session (by websafeKey)."""
# get Session object from request; bail if not found
sess = ndb.Key(urlsafe=request.websafeKey).get()
if not sess:
raise endpoints.NotFoundException(
'No session found with key: %s' %
request.websafeKey)
conf = sess.key.parent().get()
# return SessionForm
return self._copySessionToForm(sess, conf.name)
@endpoints.method(SESS_CONF_GET_REQUEST, SessionForms,
path='sessions/{websafeConferenceKey}',
http_method='GET', name='getConferenceSessions')
def getConferenceSessions(self, request):
"""Return requested sessions (by websafeConferenceKey)."""
conf_key = ndb.Key(urlsafe=request.websafeConferenceKey)
conf = conf_key.get()
if not conf:
raise endpoints.NotFoundException(
'No conference was found for key: %s' %
request.websafeConferenceKey)
# fetch sessions of conference, if conference exists
sessions = Session.query(ancestor=conf_key)
return SessionForms(
items=[
self._copySessionToForm(
session,
session.key.parent().get().name) for session in sessions])
@endpoints.method(SESS_TYPE_GET_REQUEST, SessionForms,
path='sessions/{websafeConferenceKey}/{typeOfSession}',
http_method='GET', name='getConferenceSessionsByType')
def getConferenceSessionsByType(self, request):
"""Return requested sessions (by websafeConferenceKey)."""
conf_key = ndb.Key(urlsafe=request.websafeConferenceKey)
conf = conf_key.get()
if not conf:
raise endpoints.NotFoundException(
'No conference was found for key: %s' %
request.websafeConferenceKey)
# fetch sessions of conference, if conference exists
# - filter by type
sessions = Session.query(ancestor=conf_key).filter(
Session.typeOfSession == request.typeOfSession)
return SessionForms(
items=[
self._copySessionToForm(
session,
session.key.parent().get().name) for session in sessions])
@endpoints.method(SESS_SPEAKER_GET_REQUEST, SessionForms,
path='speakerSessions/{speaker}',
http_method='GET', name='getSessionsBySpeaker')
def getSessionsBySpeaker(self, request):
"""Returns requested sessions (by speaker)."""
sessions = Session.query(Session.speaker == request.speaker)
return SessionForms(
items=[
self._copySessionToForm(
session,
session.key.parent().get().name) for session in sessions])
# - - - Wish List - - - - - - - - - - - - - - - - - - - -
# - Based off of Conference Objects: lines 506-593
@ndb.transactional(xg=True)
def _wishlistAddition(self, request, reg=True):
"""Add or Remove for selected wishlist."""
retval = None
prof = self._getProfileFromUser() # get user Profile
# check if sess exists given websafeConfKey
# get session; check that it exists
wsck = request.websafeKey
sess = ndb.Key(urlsafe=wsck).get()
if not sess:
raise endpoints.NotFoundException(
'No session found with key: %s' % wsck)
# register
if reg:
# check if user already registered otherwise add
if wsck in prof.sessionWishListKeys:
raise ConflictException(
"You have already added this session to your wishlist.")
# register sess
prof.sessionWishListKeys.append(wsck)
retval = True
# unregister
else:
if wsck in prof.sessionWishListKeys:
prof.sessionWishListKeys.remove(wsck)
retval = True
else:
retval = False
# write things back to the datastore & return
prof.put()
sess.put()
return BooleanMessage(data=retval)
@endpoints.method(message_types.VoidMessage, SessionForms,
path='wishlist',
http_method='GET', name='getSessionsInWishlist')
def getSessionsInWishlist(self, request):
"""Get list of sessions that user has registered for."""
# get sessions from user profile
prof = self._getProfileFromUser() # get user Profile
sess_keys = [ndb.Key(urlsafe=wsck)
for wsck in prof.sessionWishListKeys]
sessions = ndb.get_multi(sess_keys)
return SessionForms(
items=[
self._copySessionToForm(
session,
session.key.parent().get().name) for session in sessions])
@endpoints.method(WISHLIST_GET_REQUEST, BooleanMessage,
path='wishlist/{websafeKey}',
http_method='POST', name='addSessionToWishlist')
def addSessionToWishlist(self, request):
"""Register session for selected user's wishlist."""
return self._wishlistAddition(request)
@endpoints.method(WISHLIST_GET_REQUEST, BooleanMessage,
path='wishlist/{websafeKey}',
http_method='DELETE', name='deleteSessionInWishlist')
def deleteSessionInWishlist(self, request):
"""Unregister session for selected user's wishlist."""
return self._wishlistAddition(request, reg=False)
# - - - Queries - - - - - - - - - - - - - - - - - - - -
# - Based off of Session Objects: lines 715-771
@endpoints.method(message_types.VoidMessage, SessionForms,
path='sessions/tbd',
http_method='GET', name='getTBDSessions')
def getTBDSessions(self, request):
"""Return requested sessions (by absence of speaker)."""
sessions = Session.query().filter(Session.speaker == "TBD")
return SessionForms(
items=[
self._copySessionToForm(
session,
session.key.parent().get().name) for session in sessions])
@endpoints.method(message_types.VoidMessage, SessionForms,
path='sessions/hackathons',
http_method='GET', name='getHackathonSessions')
def getHackathonSessions(self, request):
"""Return requested sessions (hackathons)."""
sessions = Session.query().filter(Session.typeOfSession == "hackathon")
return SessionForms(
items=[
self._copySessionToForm(
session,
session.key.parent().get().name) for session in sessions])
@endpoints.method(message_types.VoidMessage, SessionForms,
path='sessions/query',
http_method='GET', name='getEarlyNonWorkshops')
def getEarlyNonWorkshops(self, request):
"""Return non-workshop sessions before 7 pm"""
sessionQueryTime = datetime.strptime("19:00", "%H:%M").time()
# Find Workshop Sessions
not_workshop_sessions = Session.query().filter(
Session.typeOfSession != "workshop")
sessions = []
# Add sessions in `early_sessions` that are not in `workshop_sessions`
# and add to the array to return them
for session in not_workshop_sessions:
if session.startTime:
if session.startTime < sessionQueryTime:
sessions.append(session)
return SessionForms(
items=[
self._copySessionToForm(
session,
session.key.parent().get().name) for session in sessions])
# - - - Featured Speaker - - - - - - - - - - - - - - - - - - - -
# - Based off of Conference Objects: lines 472-502, and Session objects: lines 707-717
@staticmethod
def _cacheFeaturedSpeaker(speaker):
"""Check and Assign Featured Speaker to memcache."""
# query for sessions by speaker
sessionsBySpeaker = Session.query(Session.speaker == speaker)
# memcache if more than one session
if sessionsBySpeaker.count() > 1:
memcache.set(MEMCACHE_FEATURED_SPEAKER_KEY, speaker)
# fetch speaker
return StringMessage(data=memcache.get(
MEMCACHE_FEATURED_SPEAKER_KEY) or "")
@endpoints.method(message_types.VoidMessage, StringMessage,
path='featured_speaker/get',
http_method='GET', name='getFeaturedSpeaker')
def getFeaturedSpeaker(self, request):
"""Return Featured Speaker from memcache."""
return StringMessage(data=memcache.get(
MEMCACHE_FEATURED_SPEAKER_KEY) or "")
api = endpoints.api_server([ConferenceApi]) # register API
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for sparse_image_warp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.image.python.ops import sparse_image_warp
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
from tensorflow.python.training import momentum
class SparseImageWarpTest(test_util.TensorFlowTestCase):
def setUp(self):
np.random.seed(0)
def testGetBoundaryLocations(self):
image_height = 11
image_width = 11
num_points_per_edge = 4
locs = sparse_image_warp._get_boundary_locations(image_height, image_width,
num_points_per_edge)
num_points = locs.shape[0]
self.assertEqual(num_points, 4 + 4 * num_points_per_edge)
locs = [(locs[i, 0], locs[i, 1]) for i in range(num_points)]
for i in (0, image_height - 1):
for j in (0, image_width - 1):
self.assertIn((i, j), locs, '{},{} not in the locations'.format(i, j))
for i in (2, 4, 6, 8):
for j in (0, image_width - 1):
self.assertIn((i, j), locs, '{},{} not in the locations'.format(i, j))
for i in (0, image_height - 1):
for j in (2, 4, 6, 8):
self.assertIn((i, j), locs, '{},{} not in the locations'.format(i, j))
def testGetGridLocations(self):
image_height = 5
image_width = 3
grid = sparse_image_warp._get_grid_locations(image_height, image_width)
for i in range(image_height):
for j in range(image_width):
self.assertEqual(grid[i, j, 0], i)
self.assertEqual(grid[i, j, 1], j)
def testZeroShift(self):
"""Run assertZeroShift for various hyperparameters."""
for order in (1, 2):
for regularization in (0, 0.01):
for num_boundary_points in (0, 1):
self.assertZeroShift(order, regularization, num_boundary_points)
def assertZeroShift(self, order, regularization, num_boundary_points):
"""Check that warping with zero displacements doesn't change the image."""
batch_size = 1
image_height = 4
image_width = 4
channels = 3
image = np.random.uniform(
size=[batch_size, image_height, image_width, channels])
input_image_op = constant_op.constant(np.float32(image))
control_point_locations = [[1., 1.], [2., 2.], [2., 1.]]
control_point_locations = constant_op.constant(
np.float32(np.expand_dims(control_point_locations, 0)))
control_point_displacements = np.zeros(
control_point_locations.shape.as_list())
control_point_displacements = constant_op.constant(
np.float32(control_point_displacements))
(warped_image_op, flow_field) = sparse_image_warp.sparse_image_warp(
input_image_op,
control_point_locations,
control_point_locations + control_point_displacements,
interpolation_order=order,
regularization_weight=regularization,
num_boundary_points=num_boundary_points)
with self.cached_session() as sess:
warped_image, input_image, _ = sess.run(
[warped_image_op, input_image_op, flow_field])
self.assertAllClose(warped_image, input_image)
def testMoveSinglePixel(self):
"""Run assertMoveSinglePixel for various hyperparameters and data types."""
for order in (1, 2):
for num_boundary_points in (1, 2):
for type_to_use in (dtypes.float32, dtypes.float64):
self.assertMoveSinglePixel(order, num_boundary_points, type_to_use)
def assertMoveSinglePixel(self, order, num_boundary_points, type_to_use):
"""Move a single block in a small grid using warping."""
batch_size = 1
image_height = 7
image_width = 7
channels = 3
image = np.zeros([batch_size, image_height, image_width, channels])
image[:, 3, 3, :] = 1.0
input_image_op = constant_op.constant(image, dtype=type_to_use)
# Place a control point at the one white pixel.
control_point_locations = [[3., 3.]]
control_point_locations = constant_op.constant(
np.float32(np.expand_dims(control_point_locations, 0)),
dtype=type_to_use)
# Shift it one pixel to the right.
control_point_displacements = [[0., 1.0]]
control_point_displacements = constant_op.constant(
np.float32(np.expand_dims(control_point_displacements, 0)),
dtype=type_to_use)
(warped_image_op, flow_field) = sparse_image_warp.sparse_image_warp(
input_image_op,
control_point_locations,
control_point_locations + control_point_displacements,
interpolation_order=order,
num_boundary_points=num_boundary_points)
with self.cached_session() as sess:
warped_image, input_image, flow = sess.run(
[warped_image_op, input_image_op, flow_field])
# Check that it moved the pixel correctly.
self.assertAllClose(
warped_image[0, 4, 5, :],
input_image[0, 4, 4, :],
atol=1e-5,
rtol=1e-5)
# Test that there is no flow at the corners.
for i in (0, image_height - 1):
for j in (0, image_width - 1):
self.assertAllClose(
flow[0, i, j, :], np.zeros([2]), atol=1e-5, rtol=1e-5)
def load_image(self, image_file, sess):
image_op = image_ops.decode_png(
io_ops.read_file(image_file), dtype=dtypes.uint8, channels=4)[:, :, 0:3]
return sess.run(image_op)
def testSmileyFace(self):
"""Check warping accuracy by comparing to hardcoded warped images."""
test_data_dir = test.test_src_dir_path('contrib/image/python/'
'kernel_tests/test_data/')
input_file = test_data_dir + 'Yellow_Smiley_Face.png'
with self.cached_session() as sess:
input_image = self.load_image(input_file, sess)
control_points = np.asarray([[64, 59], [180 - 64, 59], [39, 111],
[180 - 39, 111], [90, 143], [58, 134],
[180 - 58, 134]]) # pyformat: disable
control_point_displacements = np.asarray(
[[-10.5, 10.5], [10.5, 10.5], [0, 0], [0, 0], [0, -10], [-20, 10.25],
[10, 10.75]])
control_points_op = constant_op.constant(
np.expand_dims(np.float32(control_points[:, [1, 0]]), 0))
control_point_displacements_op = constant_op.constant(
np.expand_dims(np.float32(control_point_displacements[:, [1, 0]]), 0))
float_image = np.expand_dims(np.float32(input_image) / 255, 0)
input_image_op = constant_op.constant(float_image)
for interpolation_order in (1, 2, 3):
for num_boundary_points in (0, 1, 4):
warp_op, _ = sparse_image_warp.sparse_image_warp(
input_image_op,
control_points_op,
control_points_op + control_point_displacements_op,
interpolation_order=interpolation_order,
num_boundary_points=num_boundary_points)
with self.cached_session() as sess:
warped_image = sess.run(warp_op)
out_image = np.uint8(warped_image[0, :, :, :] * 255)
target_file = (
test_data_dir +
'Yellow_Smiley_Face_Warp-interp' + '-{}-clamp-{}.png'.format(
interpolation_order, num_boundary_points))
target_image = self.load_image(target_file, sess)
# Check that the target_image and out_image difference is no
# bigger than 2 (on a scale of 0-255). Due to differences in
# floating point computation on different devices, the float
# output in warped_image may get rounded to a different int
# than that in the saved png file loaded into target_image.
self.assertAllClose(target_image, out_image, atol=2, rtol=1e-3)
def testThatBackpropRuns(self):
"""Run optimization to ensure that gradients can be computed."""
batch_size = 1
image_height = 9
image_width = 12
image = variables.Variable(
np.float32(
np.random.uniform(size=[batch_size, image_height, image_width, 3])))
control_point_locations = [[3., 3.]]
control_point_locations = constant_op.constant(
np.float32(np.expand_dims(control_point_locations, 0)))
control_point_displacements = [[0.25, -0.5]]
control_point_displacements = constant_op.constant(
np.float32(np.expand_dims(control_point_displacements, 0)))
warped_image, _ = sparse_image_warp.sparse_image_warp(
image,
control_point_locations,
control_point_locations + control_point_displacements,
num_boundary_points=3)
loss = math_ops.reduce_mean(math_ops.abs(warped_image - image))
optimizer = momentum.MomentumOptimizer(0.001, 0.9)
grad = gradients.gradients(loss, [image])
grad, _ = clip_ops.clip_by_global_norm(grad, 1.0)
opt_func = optimizer.apply_gradients(zip(grad, [image]))
init_op = variables.global_variables_initializer()
with self.cached_session() as sess:
sess.run(init_op)
for _ in range(5):
sess.run([loss, opt_func])
if __name__ == '__main__':
googletest.main()
| |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import re
import sys
import time
from django import forms
from django.contrib import messages
from django.contrib.auth.models import User
from django.db.models import Q
from django.http import HttpResponse, QueryDict
from django.shortcuts import redirect
from django.utils.html import escape
from django.utils.translation import ugettext as _
from django.core.urlresolvers import reverse
from desktop.appmanager import get_apps_dict
from desktop.context_processors import get_app_name
from desktop.lib.paginator import Paginator
from desktop.lib.django_util import JsonResponse
from desktop.lib.django_util import copy_query_dict, format_preserving_redirect, render
from desktop.lib.django_util import login_notrequired, get_desktop_uri_prefix
from desktop.lib.exceptions_renderable import PopupException
from desktop.models import Document
from desktop.lib.parameterization import find_variables
from notebook.models import escape_rows
import beeswax.forms
import beeswax.design
import beeswax.management.commands.beeswax_install_examples
from beeswax import common, data_export, models
from beeswax.models import QueryHistory, SavedQuery, Session
from beeswax.server import dbms
from beeswax.server.dbms import expand_exception, get_query_server_config, QueryServerException
LOG = logging.getLogger(__name__)
def index(request):
return execute_query(request)
"""
Design views
"""
def save_design(request, form, type_, design, explicit_save):
"""
save_design(request, form, type_, design, explicit_save) -> SavedQuery
A helper method to save the design:
* If ``explicit_save``, then we save the data in the current design.
* If the user clicked the submit button, we do NOT overwrite the current
design. Instead, we create a new "auto" design (iff the user modified
the data). This new design is named after the current design, with the
AUTO_DESIGN_SUFFIX to signify that it's different.
Need to return a SavedQuery because we may end up with a different one.
Assumes that form.saveform is the SaveForm, and that it is valid.
"""
authorized_get_design(request, design.id)
assert form.saveform.is_valid()
sub_design_form = form # Beeswax/Impala case
if type_ == models.HQL:
design_cls = beeswax.design.HQLdesign
elif type_ == models.IMPALA:
design_cls = beeswax.design.HQLdesign
elif type_ == models.SPARK:
from spark.design import SparkDesign
design_cls = SparkDesign
sub_design_form = form.query
else:
raise ValueError(_('Invalid design type %(type)s') % {'type': type_})
design_obj = design_cls(sub_design_form, query_type=type_)
name = form.saveform.cleaned_data['name']
desc = form.saveform.cleaned_data['desc']
return _save_design(request.user, design, type_, design_obj, explicit_save, name, desc)
def _save_design(user, design, type_, design_obj, explicit_save, name=None, desc=None):
# Design here means SavedQuery
old_design = design
new_data = design_obj.dumps()
# Auto save if (1) the user didn't click "save", and (2) the data is different.
# Create an history design if the user is executing a shared design.
# Don't generate an auto-saved design if the user didn't change anything.
if explicit_save and (not design.doc.exists() or design.doc.get().can_write_or_exception(user)):
design.name = name
design.desc = desc
design.is_auto = False
elif design_obj != old_design.get_design():
# Auto save iff the data is different
if old_design.id is not None:
# Clone iff the parent design isn't a new unsaved model
design = old_design.clone(new_owner=user)
if not old_design.is_auto:
design.name = old_design.name + models.SavedQuery.AUTO_DESIGN_SUFFIX
else:
design.name = models.SavedQuery.DEFAULT_NEW_DESIGN_NAME
design.is_auto = True
design.name = design.name[:64]
design.type = type_
design.data = new_data
design.save()
LOG.info('Saved %s design "%s" (id %s) for %s' % (explicit_save and '' or 'auto ', design.name, design.id, design.owner))
if design.doc.exists():
design.doc.update(name=design.name, description=design.desc)
else:
Document.objects.link(design, owner=design.owner, extra=design.type, name=design.name, description=design.desc)
if design.is_auto:
design.doc.get().add_to_history()
return design
def delete_design(request):
if request.method == 'POST':
ids = request.POST.getlist('designs_selection')
designs = dict([(design_id, authorized_get_design(request, design_id, owner_only=True)) for design_id in ids])
if None in designs.values():
LOG.error('Cannot delete non-existent design(s) %s' % ','.join([key for key, name in designs.items() if name is None]))
return list_designs(request)
for design in designs.values():
if request.POST.get('skipTrash', 'false') == 'false':
design.doc.get().send_to_trash()
else:
design.doc.all().delete()
design.delete()
return redirect(reverse(get_app_name(request) + ':list_designs'))
else:
return render('confirm.mako', request, {'url': request.path, 'title': _('Delete design(s)?')})
def restore_design(request):
if request.method == 'POST':
ids = request.POST.getlist('designs_selection')
designs = dict([(design_id, authorized_get_design(request, design_id)) for design_id in ids])
if None in designs.values():
LOG.error('Cannot restore non-existent design(s) %s' % ','.join([key for key, name in designs.items() if name is None]))
return list_designs(request)
for design in designs.values():
design.doc.get().restore_from_trash()
return redirect(reverse(get_app_name(request) + ':list_designs'))
else:
return render('confirm.mako', request, {'url': request.path, 'title': _('Restore design(s)?')})
def clone_design(request, design_id):
"""Clone a design belonging to any user"""
design = authorized_get_design(request, design_id)
if design is None:
LOG.error('Cannot clone non-existent design %s' % (design_id,))
return list_designs(request)
copy = design.clone(request.user)
copy.save()
name = copy.name + '-copy'
design.doc.get().copy(content_object=copy, name=name, owner=request.user)
messages.info(request, _('Copied design: %(name)s') % {'name': design.name})
return format_preserving_redirect(request, reverse(get_app_name(request) + ':execute_design', kwargs={'design_id': copy.id}))
def list_designs(request):
"""
View function for show all saved queries.
We get here from /beeswax/list_designs?filterargs, with the options being:
page=<n> - Controls pagination. Defaults to 1.
user=<name> - Show design items belonging to a user. Default to all users.
type=<type> - <type> is "hql", for saved query type. Default to show all.
sort=<key> - Sort by the attribute <key>, which is one of:
"date", "name", "desc", and "type" (design type)
Accepts the form "-date", which sort in descending order.
Default to "-date".
text=<frag> - Search for fragment "frag" in names and descriptions.
"""
DEFAULT_PAGE_SIZE = 20
app_name = get_app_name(request)
# Extract the saved query list.
prefix = 'q-'
querydict_query = _copy_prefix(prefix, request.GET)
# Manually limit up the user filter.
querydict_query[ prefix + 'type' ] = app_name
# Get search filter input if any
search_filter = request.GET.get('text', None)
if search_filter is not None:
querydict_query[ prefix + 'text' ] = search_filter
page, filter_params = _list_designs(request.user, querydict_query, DEFAULT_PAGE_SIZE, prefix)
return render('list_designs.mako', request, {
'page': page,
'filter_params': filter_params,
'prefix': prefix,
'user': request.user,
'designs_json': json.dumps([query.id for query in page.object_list])
})
def list_trashed_designs(request):
DEFAULT_PAGE_SIZE = 20
app_name= get_app_name(request)
user = request.user
# Extract the saved query list.
prefix = 'q-'
querydict_query = _copy_prefix(prefix, request.GET)
# Manually limit up the user filter.
querydict_query[ prefix + 'type' ] = app_name
# Get search filter input if any
search_filter = request.GET.get('text', None)
if search_filter is not None:
querydict_query[ prefix + 'text' ] = search_filter
page, filter_params = _list_designs(user, querydict_query, DEFAULT_PAGE_SIZE, prefix, is_trashed=True)
return render('list_trashed_designs.mako', request, {
'page': page,
'filter_params': filter_params,
'prefix': prefix,
'user': request.user,
'designs_json': json.dumps([query.id for query in page.object_list])
})
def my_queries(request):
"""
View a mix of history and saved queries.
It understands all the GET params in ``list_query_history`` (with a ``h-`` prefix)
and those in ``list_designs`` (with a ``q-`` prefix). The only thing it disallows
is the ``user`` filter, since this view only shows what belongs to the user.
"""
DEFAULT_PAGE_SIZE = 30
app_name= get_app_name(request)
# Extract the history list.
prefix = 'h-'
querydict_history = _copy_prefix(prefix, request.GET)
# Manually limit up the user filter.
querydict_history[ prefix + 'user' ] = request.user
querydict_history[ prefix + 'type' ] = app_name
hist_page, hist_filter = _list_query_history(request.user,
querydict_history,
DEFAULT_PAGE_SIZE,
prefix)
# Extract the saved query list.
prefix = 'q-'
querydict_query = _copy_prefix(prefix, request.GET)
# Manually limit up the user filter.
querydict_query[ prefix + 'user' ] = request.user
querydict_query[ prefix + 'type' ] = app_name
query_page, query_filter = _list_designs(request.user, querydict_query, DEFAULT_PAGE_SIZE, prefix)
filter_params = hist_filter
filter_params.update(query_filter)
return render('my_queries.mako', request, {
'request': request,
'h_page': hist_page,
'q_page': query_page,
'filter_params': filter_params,
'designs_json': json.dumps([query.id for query in query_page.object_list])
})
def list_query_history(request):
"""
View the history of query (for the current user).
We get here from /beeswax/query_history?filterargs, with the options being:
page=<n> - Controls pagination. Defaults to 1.
user=<name> - Show history items from a user. Default to current user only.
Also accepts ':all' to show all history items.
type=<type> - <type> is "beeswax|impala", for design type. Default to show all.
design_id=<id> - Show history for this particular design id.
sort=<key> - Sort by the attribute <key>, which is one of:
"date", "state", "name" (design name), and "type" (design type)
Accepts the form "-date", which sort in descending order.
Default to "-date".
auto_query=<bool> - Show auto generated actions (drop table, read data, etc). Default True
"""
DEFAULT_PAGE_SIZE = 100
prefix = 'q-'
share_queries = request.user.is_superuser
querydict_query = request.GET.copy()
if not share_queries:
querydict_query[prefix + 'user'] = request.user.username
app_name = get_app_name(request)
querydict_query[prefix + 'type'] = app_name
page, filter_params = _list_query_history(request.user, querydict_query, DEFAULT_PAGE_SIZE, prefix)
filter = request.GET.get(prefix + 'search') and request.GET.get(prefix + 'search') or ''
if request.GET.get('format') == 'json':
resp = {
'queries': [massage_query_history_for_json(app_name, query_history) for query_history in page.object_list]
}
return JsonResponse(resp)
return render('list_history.mako', request, {
'request': request,
'page': page,
'filter_params': filter_params,
'share_queries': share_queries,
'prefix': prefix,
'filter': filter,
})
def massage_query_history_for_json(app_name, query_history):
return {
'id': query_history.id,
'design_id': query_history.design.id,
'query': escape(query_history.query),
'timeInMs': time.mktime(query_history.submission_date.timetuple()),
'timeFormatted': query_history.submission_date.strftime("%x %X"),
'designUrl': reverse(app_name + ':execute_design', kwargs={'design_id': query_history.design.id}),
'resultsUrl': not query_history.is_failure() and reverse(app_name + ':watch_query_history', kwargs={'query_history_id': query_history.id}) or ""
}
def download(request, id, format):
try:
query_history = authorized_get_query_history(request, id, must_exist=True)
db = dbms.get(request.user, query_history.get_query_server_config())
LOG.debug('Download results for query %s: [ %s ]' % (query_history.server_id, query_history.query))
return data_export.download(query_history.get_handle(), format, db)
except Exception, e:
if not hasattr(e, 'message') or not e.message:
message = e
else:
message = e.message
raise PopupException(message, detail='')
"""
Queries Views
"""
def execute_query(request, design_id=None, query_history_id=None):
"""
View function for executing an arbitrary query.
"""
action = 'query'
if query_history_id:
query_history = authorized_get_query_history(request, query_history_id, must_exist=True)
design = query_history.design
try:
if query_history.server_id and query_history.server_guid:
handle, state = _get_query_handle_and_state(query_history)
if 'on_success_url' in request.GET:
if request.GET.get('on_success_url'):
action = 'watch-redirect'
else:
action = 'watch-results'
else:
action = 'editor-results'
except QueryServerException, e:
if 'Invalid query handle' in e.message or 'Invalid OperationHandle' in e.message:
query_history.save_state(QueryHistory.STATE.expired)
LOG.warn("Invalid query handle", exc_info=sys.exc_info())
action = 'editor-expired-results'
else:
raise e
else:
# Check perms.
authorized_get_design(request, design_id)
app_name = get_app_name(request)
query_type = SavedQuery.TYPES_MAPPING[app_name]
design = safe_get_design(request, query_type, design_id)
query_history = None
doc = design and design.id and design.doc.get()
context = {
'design': design,
'query': query_history, # Backward
'query_history': query_history,
'autocomplete_base_url': reverse(get_app_name(request) + ':api_autocomplete_databases', kwargs={}),
'autocomplete_base_url_hive': reverse('beeswax:api_autocomplete_databases', kwargs={}),
'can_edit_name': design and design.id and not design.is_auto,
'doc_id': doc and doc.id or -1,
'can_edit': doc and doc.can_write(request.user),
'action': action,
'on_success_url': request.GET.get('on_success_url'),
'has_metastore': 'metastore' in get_apps_dict(request.user)
}
return render('execute.mako', request, context)
def view_results(request, id, first_row=0):
"""
Returns the view for the results of the QueryHistory with the given id.
The query results MUST be ready.
To display query results, one should always go through the execute_query view.
If the result set has has_result_set=False, display an empty result.
If ``first_row`` is 0, restarts (if necessary) the query read. Otherwise, just
spits out a warning if first_row doesn't match the servers conception.
Multiple readers will produce a confusing interaction here, and that's known.
It understands the ``context`` GET parameter. (See execute_query().)
"""
first_row = long(first_row)
start_over = (first_row == 0)
results = type('Result', (object,), {
'rows': 0,
'columns': [],
'has_more': False,
'start_row': 0,
})
data = []
fetch_error = False
error_message = ''
log = ''
columns = []
app_name = get_app_name(request)
query_history = authorized_get_query_history(request, id, must_exist=True)
query_server = query_history.get_query_server_config()
db = dbms.get(request.user, query_server)
handle, state = _get_query_handle_and_state(query_history)
context_param = request.GET.get('context', '')
query_context = parse_query_context(context_param)
# Update the status as expired should not be accessible
expired = state == models.QueryHistory.STATE.expired
# Retrieve query results or use empty result if no result set
try:
if query_server['server_name'] == 'impala' and not handle.has_result_set:
downloadable = False
else:
results = db.fetch(handle, start_over, 100)
# Materialize and HTML escape results
data = escape_rows(results.rows())
# We display the "Download" button only when we know that there are results:
downloadable = first_row > 0 or data
log = db.get_log(handle)
columns = results.data_table.cols()
except Exception, ex:
LOG.exception('error fetching results')
fetch_error = True
error_message, log = expand_exception(ex, db, handle)
# Handle errors
error = fetch_error or results is None or expired
context = {
'error': error,
'message': error_message,
'query': query_history,
'results': data,
'columns': columns,
'expected_first_row': first_row,
'log': log,
'hadoop_jobs': app_name != 'impala' and _parse_out_hadoop_jobs(log),
'query_context': query_context,
'can_save': False,
'context_param': context_param,
'expired': expired,
'app_name': app_name,
'next_json_set': None,
'is_finished': query_history.is_finished()
}
if not error:
download_urls = {}
if downloadable:
for format in common.DL_FORMATS:
download_urls[format] = reverse(app_name + ':download', kwargs=dict(id=str(id), format=format))
results.start_row = first_row
context.update({
'id': id,
'results': data,
'has_more': results.has_more,
'next_row': results.start_row + len(data),
'start_row': results.start_row,
'expected_first_row': first_row,
'columns': columns,
'download_urls': download_urls,
'can_save': query_history.owner == request.user,
'next_json_set':
reverse(get_app_name(request) + ':view_results', kwargs={
'id': str(id),
'first_row': results.start_row + len(data)
}
)
+ ('?context=' + context_param or '') + '&format=json'
})
context['columns'] = massage_columns_for_json(columns)
if 'save_form' in context:
del context['save_form']
if 'query' in context:
del context['query']
return JsonResponse(context)
def configuration(request):
app_name = get_app_name(request)
query_server = get_query_server_config(app_name)
session = Session.objects.get_session(request.user, query_server['server_name'])
if session:
properties = json.loads(session.properties)
# Redact passwords
for key, value in properties.items():
if 'password' in key.lower():
properties[key] = '*' * len(value)
else:
properties = {}
return render("configuration.mako", request, {'configuration': properties})
"""
Other views
"""
def install_examples(request):
response = {'status': -1, 'message': ''}
if request.method == 'POST':
try:
app_name = get_app_name(request)
beeswax.management.commands.beeswax_install_examples.Command().handle(app_name=app_name, user=request.user)
response['status'] = 0
except Exception, err:
LOG.exception(err)
response['message'] = str(err)
else:
response['message'] = _('A POST request is required.')
return JsonResponse(response)
@login_notrequired
def query_done_cb(request, server_id):
"""
A callback for query completion notification. When the query is done,
BeeswaxServer notifies us by sending a GET request to this view.
"""
message_template = '<html><head></head>%(message)s<body></body></html>'
message = {'message': 'error'}
try:
query_history = QueryHistory.objects.get(server_id=server_id + '\n')
# Update the query status
query_history.set_to_available()
# Find out details about the query
if not query_history.notify:
message['message'] = 'email_notify is false'
return HttpResponse(message_template % message)
design = query_history.design
user = query_history.owner
subject = _("Beeswax query completed.")
if design:
subject += ": %s" % (design.name,)
link = "%s%s" % \
(get_desktop_uri_prefix(),
reverse(get_app_name(request) + ':watch_query_history', kwargs={'query_history_id': query_history.id}))
body = _("%(subject)s. See the results here: %(link)s\n\nQuery:\n%(query)s") % {
'subject': subject, 'link': link, 'query': query_history.query
}
user.email_user(subject, body)
message['message'] = 'sent'
except Exception, ex:
msg = "Failed to send query completion notification via e-mail: %s" % (ex)
LOG.error(msg)
message['message'] = msg
return HttpResponse(message_template % message)
"""
Utils
"""
def massage_columns_for_json(cols):
massaged_cols = []
for column in cols:
massaged_cols.append({
'name': column.name,
'type': column.type,
'comment': column.comment
})
return massaged_cols
def authorized_get_design(request, design_id, owner_only=False, must_exist=False):
if design_id is None and not must_exist:
return None
try:
design = SavedQuery.objects.get(id=design_id)
except SavedQuery.DoesNotExist:
if must_exist:
raise PopupException(_('Design %(id)s does not exist.') % {'id': design_id})
else:
return None
if owner_only:
design.doc.get().can_write_or_exception(request.user)
else:
design.doc.get().can_read_or_exception(request.user)
return design
def authorized_get_query_history(request, query_history_id, owner_only=False, must_exist=False):
if query_history_id is None and not must_exist:
return None
try:
query_history = QueryHistory.get(id=query_history_id)
except QueryHistory.DoesNotExist:
if must_exist:
raise PopupException(_('QueryHistory %(id)s does not exist.') % {'id': query_history_id})
else:
return None
# Some queries don't have a design so are not linked to Document Model permission
if query_history.design is None or not query_history.design.doc.exists():
if not request.user.is_superuser and request.user != query_history.owner:
raise PopupException(_('Permission denied to read QueryHistory %(id)s') % {'id': query_history_id})
else:
query_history.design.doc.get().can_read_or_exception(request.user)
return query_history
def safe_get_design(request, design_type, design_id=None):
"""
Return a new design, if design_id is None,
Return the design with the given id and type. If the design is not found,
display a notification and return a new design.
"""
design = None
if design_id is not None:
design = authorized_get_design(request, design_id)
if design is None:
design = SavedQuery(owner=request.user, type=design_type)
return design
def make_parameterization_form(query_str):
"""
Creates a django form on the fly with arguments from the
query.
"""
variables = find_variables(query_str)
if len(variables) > 0:
class Form(forms.Form):
for name in sorted(variables):
locals()[name] = forms.CharField(required=True)
return Form
else:
return None
def execute_directly(request, query, query_server=None,
design=None, on_success_url=None, on_success_params=None,
**kwargs):
"""
execute_directly(request, query_msg, tablename, design) -> HTTP response for execution
This method wraps around dbms.execute_query() to take care of the HTTP response
after the execution.
query
The HQL model Query object.
query_server
To which Query Server to submit the query.
Dictionary with keys: ['server_name', 'server_host', 'server_port'].
design
The design associated with the query.
on_success_url
Where to go after the query is done. The URL handler may expect an option "context" GET
param. (See ``watch_query``.) For advanced usage, on_success_url can be a function, in
which case the on complete URL is the return of:
on_success_url(history_obj) -> URL string
Defaults to the view results page.
on_success_params
Optional params to pass to the on_success_url (in additional to "context").
Note that this may throw a Beeswax exception.
"""
if design is not None:
authorized_get_design(request, design.id)
db = dbms.get(request.user, query_server)
database = query.query.get('database', 'default')
db.use(database)
query_history = db.execute_query(query, design)
watch_url = reverse(get_app_name(request) + ':watch_query_history', kwargs={'query_history_id': query_history.id})
# Prepare the GET params for the watch_url
get_dict = QueryDict(None, mutable=True)
# (1) on_success_url
if on_success_url:
if callable(on_success_url):
on_success_url = on_success_url(query_history)
get_dict['on_success_url'] = on_success_url
# (2) misc
if on_success_params:
get_dict.update(on_success_params)
return format_preserving_redirect(request, watch_url, get_dict)
def _list_designs(user, querydict, page_size, prefix="", is_trashed=False):
"""
_list_designs(user, querydict, page_size, prefix, is_trashed) -> (page, filter_param)
A helper to gather the designs page. It understands all the GET params in
``list_designs``, by reading keys from the ``querydict`` with the given ``prefix``.
"""
DEFAULT_SORT = ('-', 'date') # Descending date
SORT_ATTR_TRANSLATION = dict(
date='last_modified',
name='name',
desc='description',
type='extra',
)
# Trash and security
if is_trashed:
db_queryset = Document.objects.trashed_docs(SavedQuery, user)
else:
db_queryset = Document.objects.available_docs(SavedQuery, user)
# Filter by user
filter_username = querydict.get(prefix + 'user')
if filter_username:
try:
db_queryset = db_queryset.filter(owner=User.objects.get(username=filter_username))
except User.DoesNotExist:
# Don't care if a bad filter term is provided
pass
# Design type
d_type = querydict.get(prefix + 'type')
if d_type and d_type in SavedQuery.TYPES_MAPPING.keys():
db_queryset = db_queryset.filter(extra=str(SavedQuery.TYPES_MAPPING[d_type]))
# Text search
frag = querydict.get(prefix + 'text')
if frag:
db_queryset = db_queryset.filter(Q(name__icontains=frag) | Q(description__icontains=frag))
# Ordering
sort_key = querydict.get(prefix + 'sort')
if sort_key:
if sort_key[0] == '-':
sort_dir, sort_attr = '-', sort_key[1:]
else:
sort_dir, sort_attr = '', sort_key
if not SORT_ATTR_TRANSLATION.has_key(sort_attr):
LOG.warn('Bad parameter to list_designs: sort=%s' % (sort_key,))
sort_dir, sort_attr = DEFAULT_SORT
else:
sort_dir, sort_attr = DEFAULT_SORT
db_queryset = db_queryset.order_by(sort_dir + SORT_ATTR_TRANSLATION[sort_attr])
designs = [job.content_object for job in db_queryset.all() if job.content_object and job.content_object.is_auto == False]
pagenum = int(querydict.get(prefix + 'page', 1))
paginator = Paginator(designs, page_size)
page = paginator.page(pagenum)
# We need to pass the parameters back to the template to generate links
keys_to_copy = [ prefix + key for key in ('user', 'type', 'sort', 'text') ]
filter_params = copy_query_dict(querydict, keys_to_copy)
return page, filter_params
def _get_query_handle_and_state(query_history):
"""
Front-end wrapper to handle exceptions. Expects the query to be submitted.
"""
handle = query_history.get_handle()
if handle is None:
raise PopupException(_("Failed to retrieve query state from the Query Server."))
state = dbms.get(query_history.owner, query_history.get_query_server_config()).get_state(handle)
if state is None:
raise PopupException(_("Failed to contact Server to check query status."))
return (handle, state)
def parse_query_context(context):
"""
parse_query_context(context) -> ('table', <table_name>) -or- ('design', <design_obj>)
"""
if not context:
return None
pair = context.split(':', 1)
if len(pair) != 2 or pair[0] not in ('table', 'design'):
LOG.error("Invalid query context data: %s" % (context,))
return None
if pair[0] == 'design': # Translate design id to design obj
pair[1] = models.SavedQuery.get(int(pair[1]))
return pair
HADOOP_JOBS_RE = re.compile("Starting Job = ([a-z0-9_]+?),")
def _parse_out_hadoop_jobs(log):
"""
Ideally, Hive would tell us what jobs it has run directly from the Thrift interface.
"""
ret = []
for match in HADOOP_JOBS_RE.finditer(log):
job_id = match.group(1)
if job_id not in ret:
ret.append(job_id)
return ret
def _copy_prefix(prefix, base_dict):
"""Copy keys starting with ``prefix``"""
querydict = QueryDict(None, mutable=True)
for key, val in base_dict.iteritems():
if key.startswith(prefix):
querydict[key] = val
return querydict
def _list_query_history(user, querydict, page_size, prefix=""):
"""
_list_query_history(user, querydict, page_size, prefix) -> (page, filter_param)
A helper to gather the history page. It understands all the GET params in
``list_query_history``, by reading keys from the ``querydict`` with the
given ``prefix``.
"""
DEFAULT_SORT = ('-', 'date') # Descending date
SORT_ATTR_TRANSLATION = dict(
date='submission_date',
state='last_state',
name='design__name',
type='design__type',
)
db_queryset = models.QueryHistory.objects.select_related()
# Filtering
#
# Queries without designs are the ones we submitted on behalf of the user,
# (e.g. view table data). Exclude those when returning query history.
if querydict.get(prefix + 'auto_query', 'on') != 'on':
db_queryset = db_queryset.exclude(design__isnull=False, design__is_auto=True)
user_filter = querydict.get(prefix + 'user', user.username)
if user_filter != ':all':
db_queryset = db_queryset.filter(owner__username=user_filter)
# Design id
design_id = querydict.get(prefix + 'design_id')
if design_id:
db_queryset = db_queryset.filter(design__id=int(design_id))
# Search
search_filter = querydict.get(prefix + 'search')
if search_filter:
db_queryset = db_queryset.filter(Q(design__name__icontains=search_filter) | Q(query__icontains=search_filter) | Q(owner__username__icontains=search_filter))
# Design type
d_type = querydict.get(prefix + 'type')
if d_type:
if d_type not in SavedQuery.TYPES_MAPPING.keys():
LOG.warn('Bad parameter to list_query_history: type=%s' % (d_type,))
else:
db_queryset = db_queryset.filter(design__type=SavedQuery.TYPES_MAPPING[d_type])
# If recent query
recent = querydict.get('recent')
if recent:
db_queryset = db_queryset.filter(is_cleared=False)
# Ordering
sort_key = querydict.get(prefix + 'sort')
if sort_key:
sort_dir, sort_attr = '', sort_key
if sort_key[0] == '-':
sort_dir, sort_attr = '-', sort_key[1:]
if not SORT_ATTR_TRANSLATION.has_key(sort_attr):
LOG.warn('Bad parameter to list_query_history: sort=%s' % (sort_key,))
sort_dir, sort_attr = DEFAULT_SORT
else:
sort_dir, sort_attr = DEFAULT_SORT
db_queryset = db_queryset.order_by(sort_dir + SORT_ATTR_TRANSLATION[sort_attr], '-id')
# Get the total return count before slicing
total_count = db_queryset.count()
# Slicing (must be the last filter applied)
pagenum = int(querydict.get(prefix + 'page', 1))
if pagenum < 1:
pagenum = 1
db_queryset = db_queryset[ page_size * (pagenum - 1) : page_size * pagenum ]
paginator = Paginator(db_queryset, page_size, total=total_count)
page = paginator.page(pagenum)
# We do slicing ourselves, rather than letting the Paginator handle it, in order to
# update the last_state on the running queries
for history in page.object_list:
_update_query_state(history.get_full_object())
# We need to pass the parameters back to the template to generate links
keys_to_copy = [ prefix + key for key in ('user', 'type', 'sort', 'design_id', 'auto_query', 'search') ]
filter_params = copy_query_dict(querydict, keys_to_copy)
return page, filter_params
def _update_query_state(query_history):
"""
Update the last_state for a QueryHistory object. Returns success as True/False.
This only occurs iff the current last_state is submitted or running, since the other
states are stable, more-or-less.
Note that there is a transition from available/failed to expired. That occurs lazily
when the user attempts to view results that have expired.
"""
if query_history.last_state <= models.QueryHistory.STATE.running.index:
try:
state_enum = dbms.get(query_history.owner, query_history.get_query_server_config()).get_state(query_history.get_handle())
if state_enum is None:
# Error was logged at the source
return False
except Exception, e:
LOG.error(e)
state_enum = models.QueryHistory.STATE.failed
query_history.save_state(state_enum)
return True
def get_db_choices(request):
app_name = get_app_name(request)
query_server = get_query_server_config(app_name)
db = dbms.get(request.user, query_server)
dbs = db.get_databases()
return [(db, db) for db in dbs]
WHITESPACE = re.compile("\s+", re.MULTILINE)
def collapse_whitespace(s):
return WHITESPACE.sub(" ", s).strip()
| |
import pyactiveresource.connection
from pyactiveresource.activeresource import ActiveResource, ResourceMeta
import pyactiveresource.util as util
import shopify.yamlobjects
import shopify.mixins as mixins
import shopify
import threading
import urllib
import urllib2
import urlparse
import sys
# Store the response from the last request in the connection object
class ShopifyConnection(pyactiveresource.connection.Connection):
response = None
def _open(self, *args, **kwargs):
self.response = None
try:
self.response = super(ShopifyConnection, self)._open(*args, **kwargs)
except pyactiveresource.connection.ConnectionError, err:
self.response = err.response
raise
return self.response
# Inherit from pyactiveresource's metaclass in order to use ShopifyConnection
class ShopifyResourceMeta(ResourceMeta):
@property
def connection(cls):
"""HTTP connection for the current thread"""
local = cls._threadlocal
if not getattr(local, 'connection', None):
# Make sure these variables are no longer affected by other threads.
local.user = cls.user
local.password = cls.password
local.site = cls.site
local.timeout = cls.timeout
local.headers = cls.headers
local.format = cls.format
if cls.site is None:
raise ValueError("No shopify session is active")
local.connection = ShopifyConnection(
cls.site, cls.user, cls.password, cls.timeout, cls.format)
return local.connection
def get_user(cls):
return getattr(cls._threadlocal, 'user', ShopifyResource._user)
def set_user(cls, value):
cls._threadlocal.connection = None
ShopifyResource._user = cls._threadlocal.user = value
user = property(get_user, set_user, None,
"The username for HTTP Basic Auth.")
def get_password(cls):
return getattr(cls._threadlocal, 'password', ShopifyResource._password)
def set_password(cls, value):
cls._threadlocal.connection = None
ShopifyResource._password = cls._threadlocal.password = value
password = property(get_password, set_password, None,
"The password for HTTP Basic Auth.")
def get_site(cls):
return getattr(cls._threadlocal, 'site', ShopifyResource._site)
def set_site(cls, value):
cls._threadlocal.connection = None
ShopifyResource._site = cls._threadlocal.site = value
if value is not None:
host = urlparse.urlsplit(value)[1]
auth_info, host = urllib2.splituser(host)
if auth_info:
user, password = urllib2.splitpasswd(auth_info)
if user:
cls.user = urllib.unquote(user)
if password:
cls.password = urllib.unquote(password)
site = property(get_site, set_site, None,
'The base REST site to connect to.')
def get_timeout(cls):
return getattr(cls._threadlocal, 'timeout', ShopifyResource._timeout)
def set_timeout(cls, value):
cls._threadlocal.connection = None
ShopifyResource._timeout = cls._threadlocal.timeout = value
timeout = property(get_timeout, set_timeout, None,
'Socket timeout for HTTP requests')
def get_headers(cls):
if not hasattr(cls._threadlocal, 'headers'):
cls._threadlocal.headers = ShopifyResource._headers.copy()
return cls._threadlocal.headers
def set_headers(cls, value):
cls._threadlocal.headers = value
headers = property(get_headers, set_headers, None,
'The headers sent with HTTP requests')
def get_format(cls):
return getattr(cls._threadlocal, 'format', ShopifyResource._format)
def set_format(cls, value):
cls._threadlocal.connection = None
ShopifyResource._format = cls._threadlocal.format = value
format = property(get_format, set_format, None,
'Encoding used for request and responses')
def get_primary_key(cls):
return cls._primary_key
def set_primary_key(cls, value):
cls._primary_key = value
primary_key = property(get_primary_key, set_primary_key, None,
'Name of attribute that uniquely identies the resource')
class ShopifyResource(ActiveResource, mixins.Countable):
__metaclass__ = ShopifyResourceMeta
_primary_key = "id"
_threadlocal = threading.local()
_headers = { 'User-Agent': 'ShopifyPythonAPI/%s Python/%s' % (shopify.VERSION, sys.version.split(' ', 1)[0]) }
def __init__(self, attributes=None, prefix_options=None):
if attributes is not None and prefix_options is None:
prefix_options, attributes = self.__class__._split_options(attributes)
return super(ShopifyResource, self).__init__(attributes, prefix_options)
def is_new(self):
return not self.id
def _load_attributes_from_response(self, response):
self._update(self.__class__.format.decode(response.body))
def __get_id(self):
return self.attributes.get(self.klass.primary_key)
def __set_id(self, value):
self.attributes[self.klass.primary_key] = value
id = property(__get_id, __set_id, None, 'Value stored in the primary key')
# Backport changes to _update, to_dict and to_xml from upstream
# patch to suport loading:
# https://groups.google.com/forum/#!msg/pyactiveresource/JpE-Qg_pEZc/RlrbQFafk3IJ
def _update(self, attributes):
if not isinstance(attributes, dict):
return
for key, value in attributes.items():
if isinstance(value, dict):
klass = self._find_class_for(key)
attr = klass(value)
elif isinstance(value, list):
klass = None
attr = []
for child in value:
if isinstance(child, dict):
if klass is None:
klass = self._find_class_for_collection(key)
attr.append(klass(child))
else:
attr.append(child)
else:
attr = value
self.attributes[key] = attr
def to_dict(self):
values = {}
for key, value in self.attributes.iteritems():
if isinstance(value, list):
new_value = []
for item in value:
if isinstance(item, ActiveResource):
new_value.append(item.to_dict())
else:
new_value.append(item)
values[key] = new_value
elif isinstance(value, ActiveResource):
values[key] = value.to_dict()
else:
values[key] = value
return values
@staticmethod
def __to_xml_element(obj, root, dasherize):
root = dasherize and root.replace('_', '-') or root
root_element = util.ET.Element(root)
if isinstance(obj, list):
root_element.set('type', 'array')
for value in obj:
root_element.append(ShopifyResource.__to_xml_element(value, util.singularize(root), dasherize))
elif isinstance(obj, dict):
for key, value in obj.iteritems():
root_element.append(ShopifyResource.__to_xml_element(value, key, dasherize))
else:
util.serialize(obj, root_element)
return root_element
def to_xml(self, root=None, header=True, pretty=False, dasherize=True):
if not root:
root = self._singular
root_element = ShopifyResource.__to_xml_element(self.to_dict(), root, dasherize)
if pretty:
xml_pretty_format(root_element)
xml_data = util.ET.tostring(root_element)
if header:
return util.XML_HEADER + '\n' + xml_data
return xml_data
@classmethod
def activate_session(cls, session):
cls.site = session.site
if not session.legacy:
cls.user = None
cls.password = None
cls.headers['X-Shopify-Access-Token'] = session.token
@classmethod
def clear_session(cls):
cls.site = None
cls.user = None
cls.password = None
if 'X-Shopify-Access-Token' in cls.headers:
del cls.headers['X-Shopify-Access-Token']
| |
from __future__ import absolute_import, division, unicode_literals
from datetime import timedelta
from operator import attrgetter
import requests
from basic_site.models import UniquelySlugable
from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from django.db.models import Count
from django.shortcuts import get_object_or_404, render
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
from modelcluster.fields import ParentalKey
from wagtail.contrib.wagtailroutablepage.models import RoutablePageMixin, route
from wagtail.wagtailadmin.edit_handlers import (FieldPanel, InlinePanel,
MultiFieldPanel, ObjectList,
PageChooserPanel,
StreamFieldPanel,
TabbedInterface)
from wagtail.wagtailcore.fields import RichTextField, StreamField
from wagtail.wagtailcore.models import Orderable, Page
from wagtail.wagtailimages.edit_handlers import ImageChooserPanel
from wagtail.wagtailsearch import index
from wagtail.wagtailsnippets.edit_handlers import SnippetChooserPanel
from wagtail.wagtailsnippets.models import register_snippet
from people.models import ContributorPage
from . import fields as article_fields
@python_2_unicode_compatible
class Colour(models.Model):
name = models.CharField(max_length=100)
hex_value = models.CharField(max_length=7)
def rgb(self):
split = (self.hex_value[1:3], self.hex_value[3:5], self.hex_value[5:7])
rgb_value = [str(int(x, 16)) for x in split]
rgb_string = ', '.join(rgb_value)
return rgb_string
def __str__(self):
return self.name
def save(self, *args, **kwargs):
if not self.hex_value.startswith("#"):
self.hex_value = "#{}".format(self.hex_value)
super(Colour, self).save(*args, **kwargs)
class Meta:
ordering = ['name', ]
register_snippet(Colour)
@python_2_unicode_compatible
class FontStyle(models.Model):
name = models.CharField(max_length=1024)
font_size = models.FloatField(default=1, help_text="The size of the fonts in ems.")
line_size = models.FloatField(default=100, help_text="The line height as a percentage.")
text_colour = models.ForeignKey(
Colour,
default=1,
null=True,
on_delete=models.SET_NULL
)
panels = [
FieldPanel('name'),
FieldPanel('font_size'),
FieldPanel('line_size'),
FieldPanel('text_colour'),
]
def __str__(self):
return self.name
register_snippet(FontStyle)
class ArticleListPage(Page):
subpage_types = ['ArticlePage',
'ChapteredArticlePage',
]
articles_per_page = models.IntegerField(default=20)
filter_for_visualizations = models.BooleanField(default=False)
@property
def subpages(self):
if self.filter_for_visualizations:
subpages = ArticlePage.objects.live().filter(visualization=True).order_by('-first_published_at')
else:
subpages = ArticlePage.objects.live().order_by('-first_published_at')
return subpages
def get_context(self, request):
articles = self.subpages
page = request.GET.get('page')
paginator = Paginator(articles, self.articles_per_page)
try:
articles = paginator.page(page)
except PageNotAnInteger:
articles = paginator.page(1)
except EmptyPage:
articles = paginator.page(paginator.num_pages)
context = super(ArticleListPage, self).get_context(request)
context['articles'] = articles
return context
content_panels = Page.content_panels + [
FieldPanel('articles_per_page'),
FieldPanel('filter_for_visualizations'),
]
class ExternalArticleListPage(Page):
subpage_types = ['ExternalArticlePage']
articles_per_page = models.IntegerField(default=20)
@property
def subpages(self):
subpages = ExternalArticlePage.objects.live().descendant_of(self).order_by('-first_published_at')
return subpages
def get_context(self, request):
articles = self.subpages
page = request.GET.get('page')
paginator = Paginator(articles, self.articles_per_page)
try:
articles = paginator.page(page)
except PageNotAnInteger:
articles = paginator.page(1)
except EmptyPage:
articles = paginator.page(paginator.num_pages)
context = super(ExternalArticleListPage, self).get_context(request)
context['articles'] = articles
return context
content_panels = Page.content_panels + [
FieldPanel('articles_per_page'),
]
@python_2_unicode_compatible
class Topic(UniquelySlugable):
name = models.CharField(max_length=1024)
def __str__(self):
return self.name
class Meta:
ordering = ["name", ]
register_snippet(Topic)
Topic.panels = [
FieldPanel("name"),
]
class TopicListPage(RoutablePageMixin, Page):
@property
def topics(self):
popular_topics = Topic.objects.annotate(num_articles=Count('article_links') + Count('articles') + Count('series')).order_by("-num_articles")[:25]
return sorted(popular_topics, key=lambda x: x.name)
@route(r'^$', name="topic_list")
def topics_list(self, request):
context = {
"self": self,
}
return render(request, "articles/topic_list_page.html", context)
@route(r'^([\w-]+)/$', name="topic")
def topic_view(self, request, topic_slug):
topic = get_object_or_404(Topic, slug=topic_slug)
articles = ArticlePage.objects.live().filter(
models.Q(primary_topic=topic) | models.Q(topic_links__topic=topic)
).order_by('-first_published_at').distinct()
context = {
"self": self,
"topic": topic,
"articles": articles,
}
return render(request, "articles/topic_page.html", context)
class ArticleCategoryManager(models.Manager):
def get_by_natural_key(self, slug):
return self.get(slug=slug)
@python_2_unicode_compatible
class ArticleCategory(UniquelySlugable):
objects = ArticleCategoryManager()
name = models.CharField(max_length=1024)
class Meta:
verbose_name_plural = "Article Categories"
ordering = ['name', ]
def natural_key(self):
return (self.slug, )
def __str__(self):
return self.name
register_snippet(ArticleCategory)
class Promotable(models.Model):
sticky = models.BooleanField(default=False)
editors_pick = models.BooleanField(default=False)
class Meta:
abstract = True
class Sharelinks(models.Model):
cached_twitter_count = models.IntegerField(default=0)
cached_facebook_count = models.IntegerField(default=0)
cached_last_updated = models.DateTimeField(blank=True, null=True)
def update_cache(self):
if not self.cached_last_updated or (timezone.now() - self.cached_last_updated) > timedelta(minutes=10):
url = 'https://cdn.api.twitter.com/1/urls/count.json?url=http://opencanada.org' + self.url
response = requests.get(url)
j = response.json()
self.cached_twitter_count = j['count']
url = 'https://graph.facebook.com/?id=http://opencanada.org' + self.url
response = requests.get(url)
j = response.json()
self.cached_facebook_count = j['shares']
self.cached_last_updated = timezone.now()
self.save()
@property
def twitter_count(self):
self.update_cache()
return self.cached_twitter_count
@property
def facebook_count(self):
self.update_cache()
return self.cached_facebook_count
class Meta:
abstract = True
@python_2_unicode_compatible
class FeatureStyle(models.Model):
name = models.CharField(max_length=100)
number_of_columns = models.IntegerField(default=1)
number_of_rows = models.IntegerField(default=1)
include_image = models.BooleanField(default=False)
overlay_text = models.BooleanField(default=False)
def __str__(self):
return self.name
register_snippet(FeatureStyle)
class FeatureStyleFields(models.Model):
feature_style = models.ForeignKey(
FeatureStyle,
default=2,
null=True,
on_delete=models.SET_NULL
)
image_overlay_color = models.ForeignKey(
Colour,
default=1,
null=True,
on_delete=models.SET_NULL
)
image_overlay_opacity = models.PositiveIntegerField(
validators=[MinValueValidator(0), MaxValueValidator(100)],
default=30,
help_text="Set the value from 0 (Solid overlay, original image not visible) to 100 (No overlay, original image completely visible)"
)
font_style = models.ForeignKey(
'articles.FontStyle',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
def opacity(self):
return self.image_overlay_opacity / 100
class Meta:
abstract = True
class ArticlePage(Page, FeatureStyleFields, Promotable, Sharelinks):
excerpt = RichTextField(blank=True, default="")
body = article_fields.BodyField()
main_image = models.ForeignKey(
'images.AttributedImage',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
primary_topic = models.ForeignKey(
'articles.Topic',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='articles'
)
category = models.ForeignKey(
'articles.ArticleCategory',
related_name='%(class)s',
on_delete=models.SET_NULL,
null=True,
default=1
)
include_author_block = models.BooleanField(default=True)
include_main_image = models.BooleanField(default=True)
visualization = models.BooleanField(default=False)
search_fields = Page.search_fields + (
index.SearchField('excerpt', partial_match=True),
index.SearchField('body', partial_match=True),
index.SearchField('get_primary_topic_name', partial_match=True),
index.SearchField('get_category_name', partial_match=True),
index.SearchField('get_topic_names', partial_match=True),
index.SearchField('get_author_names', partial_match=True),
)
def get_primary_topic_name(self):
if self.primary_topic:
return self.primary_topic.name
return ""
def get_category_name(self):
if self.category:
return self.category.name
return ""
def get_topic_names(self):
return '\n'.join([link.topic.name if link.topic else "" for link in self.topic_links.all()])
def get_author_names(self):
return '\n'.join([author_link.author.full_name if author_link.author else "" for author_link in self.author_links.all()])
@property
def authors(self):
return [link.author for link in self.author_links.all()]
@property
def series_articles(self):
related_series_data = []
for link in self.series_links.all():
series_page = link.series
series_articles = series_page.articles
series_articles.remove(self)
related_series_data.append((series_page, series_articles))
return related_series_data
@property
def topics(self):
primary_topic = self.primary_topic
all_topics = [link.topic for link in self.topic_links.all()]
if primary_topic:
all_topics.append(primary_topic)
all_topics = list(set(all_topics))
if len(all_topics) > 0:
all_topics.sort(key=attrgetter('name'))
return all_topics
def related_articles(self, number):
included = [self.id]
articles = ArticlePage.objects.live().filter(primary_topic=self.primary_topic).exclude(id=self.id).distinct().order_by('-first_published_at')[:number]
article_list = list(articles.all())
included.extend([article.id for article in articles.all()])
current_total = len(article_list)
if current_total < number:
# still don't have enough, so pick using secondary topics
topics = Topic.objects.filter(article_links__article=self)
additional_articles = ArticlePage.objects.live().filter(primary_topic__in=topics).exclude(id__in=included).distinct().order_by('-first_published_at')[:number - current_total]
article_list.extend(additional_articles.all())
current_total = len(article_list)
included.extend([article.id for article in additional_articles.all()])
if current_total < number:
authors = ContributorPage.objects.live().filter(article_links__article=self)
additional_articles = ArticlePage.objects.live().filter(author_links__author__in=authors).exclude(id__in=included).distinct().order_by('-first_published_at')[:number - current_total]
article_list.extend(additional_articles.all())
current_total = len(article_list)
included.extend([article.id for article in additional_articles.all()])
if current_total < number:
# still don't have enough, so just pick the most recent
additional_articles = ArticlePage.objects.live().exclude(id__in=included).order_by('-first_published_at')[:number - current_total]
article_list.extend(additional_articles.all())
return article_list
content_panels = Page.content_panels + [
FieldPanel('excerpt'),
InlinePanel('author_links', label="Authors"),
ImageChooserPanel('main_image'),
StreamFieldPanel('body'),
SnippetChooserPanel('primary_topic', Topic),
InlinePanel('topic_links', label="Secondary Topics"),
SnippetChooserPanel('category', ArticleCategory),
FieldPanel('visualization'),
]
promote_panels = Page.promote_panels + [
MultiFieldPanel(
[
FieldPanel('sticky'),
FieldPanel('editors_pick'),
FieldPanel('feature_style'),
MultiFieldPanel(
[
FieldPanel('image_overlay_opacity'),
SnippetChooserPanel('image_overlay_color', Colour),
SnippetChooserPanel("font_style", FontStyle),
],
heading="Image Overlay Settings"
)
],
heading="Featuring Settings"
),
]
style_panels = [
MultiFieldPanel(
[
FieldPanel('include_main_image'),
FieldPanel('include_author_block'),
],
heading="Sections"
)
]
edit_handler = TabbedInterface([
ObjectList(content_panels, heading='Content'),
ObjectList(style_panels, heading='Page Style Options'),
ObjectList(promote_panels, heading='Promote'),
ObjectList(Page.settings_panels, heading='Settings', classname="settings"),
])
class ChapteredArticlePage(ArticlePage):
chapters = article_fields.ChapterField(blank=True, null=True)
works_cited = StreamField(
block_types=[
('citation', article_fields.CitationBlock()),
],
blank=True, null=True
)
end_notes = StreamField(
block_types=[
('end_note', article_fields.EndNoteBlock()),
],
blank=True, null=True
)
ChapteredArticlePage.content_panels = Page.content_panels + [
FieldPanel('excerpt'),
InlinePanel('author_links', label="Authors"),
ImageChooserPanel('main_image'),
StreamFieldPanel('body'),
SnippetChooserPanel('primary_topic', Topic),
InlinePanel('topic_links', label="Secondary Topics"),
SnippetChooserPanel('category', ArticleCategory),
StreamFieldPanel('chapters'),
StreamFieldPanel('works_cited'),
StreamFieldPanel('end_notes'),
# InlinePanel('chapters', label="Chapters"),
]
#
# class Chapter(models.Model):
# heading = models.CharField(max_length=512, blank=True)
# body = article_fields.BodyField(blank=True, null=True)
#
# content_panels = [
# FieldPanel('heading'),
# StreamFieldPanel('body'),
# ]
#
# class Meta:
# abstract = True
#
#
# class ArticleChapter(Orderable, Chapter):
# page = ParentalKey(ChapteredArticlePage, related_name='chapters')
@python_2_unicode_compatible
class Source(models.Model):
name = models.CharField(max_length=100)
website = models.URLField(max_length=255)
logo = models.ForeignKey(
'images.AttributedImage',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
def __str__(self):
return self.name
register_snippet(Source)
Source.panels = [
FieldPanel('name'),
FieldPanel('website'),
ImageChooserPanel('logo'),
]
@python_2_unicode_compatible
class ExternalArticlePage(Page, FeatureStyleFields, Promotable):
body = RichTextField()
website_link = models.URLField(max_length=255)
main_image = models.ForeignKey(
'images.AttributedImage',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
source = models.ForeignKey(
'Source',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
def __str__(self):
return "{}".format(
self.title
)
search_fields = Page.search_fields + (
index.SearchField('body', partial_match=True),
index.SearchField('source', partial_match=True),
)
def get_source_name(self):
if self.source:
return self.source.name
else:
return ""
content_panels = Page.content_panels + [
FieldPanel("body"),
FieldPanel("website_link"),
SnippetChooserPanel('source', Source),
ImageChooserPanel('main_image'),
]
@python_2_unicode_compatible
class ArticleTopicLink(models.Model):
topic = models.ForeignKey(
"Topic",
related_name='article_links'
)
article = ParentalKey(
"ArticlePage",
related_name='topic_links'
)
def __str__(self):
return "{} - {}".format(
self.article.title,
self.topic.name
)
panels = [
SnippetChooserPanel('topic', Topic),
]
@python_2_unicode_compatible
class ArticleAuthorLink(Orderable, models.Model):
author = models.ForeignKey(
"people.ContributorPage",
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='article_links'
)
article = ParentalKey(
"ArticlePage",
related_name='author_links'
)
def __str__(self):
return "{} - {}".format(self.article.title, self.author.full_name)
panels = [
PageChooserPanel('author', 'people.ContributorPage'),
]
class SeriesListPage(Page):
subpage_types = ['SeriesPage']
series_per_page = models.IntegerField(default=5)
@property
def subpages(self):
subpages = SeriesPage.objects.live().descendant_of(self).order_by('-first_published_at')
return subpages
def get_context(self, request):
series_list = self.subpages
page = request.GET.get('page')
paginator = Paginator(series_list, self.series_per_page)
try:
series_list = paginator.page(page)
except PageNotAnInteger:
series_list = paginator.page(1)
except EmptyPage:
series_list = paginator.page(paginator.num_pages)
context = super(SeriesListPage, self).get_context(request)
context['series_list'] = series_list
return context
content_panels = Page.content_panels + [
FieldPanel('series_per_page')
]
class SeriesArticleLink(Orderable, models.Model):
override_image = models.ForeignKey(
'images.AttributedImage',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
help_text="This field is optional. If not provided, the image will be "
"pulled from the article page automatically. This field "
"allows you to override the automatic image."
)
override_text = RichTextField(
blank=True,
default="",
help_text="This field is optional. If not provided, the text will be "
"pulled from the article page automatically. This field "
"allows you to override the automatic text."
)
article = models.ForeignKey(
"ArticlePage",
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='series_links'
)
series = ParentalKey(
"SeriesPage",
related_name='related_article_links'
)
panels = [
PageChooserPanel("article", 'articles.ArticlePage'),
FieldPanel("override_text"),
ImageChooserPanel("override_image"),
]
class SeriesPage(Page, FeatureStyleFields, Promotable, Sharelinks):
subtitle = RichTextField(blank=True, default="")
body = article_fields.BodyField(blank=True, default="")
main_image = models.ForeignKey(
'images.AttributedImage',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
primary_topic = models.ForeignKey(
'articles.Topic',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='series'
)
search_fields = Page.search_fields + (
index.SearchField('subtitle', partial_match=True),
index.SearchField('body', partial_match=True),
index.SearchField('get_primary_topic_name', partial_match=True),
index.SearchField('get_topic_names', partial_match=True),
)
include_main_image = models.BooleanField(default=True)
def get_primary_topic_name(self):
if self.primary_topic:
return self.primary_topic.name
else:
""
def get_topic_names(self):
return '\n'.join([topic.name if topic else "" for topic in self.topics])
def get_author_names(self):
return '\n'.join([author.full_name if author else "" for author in self.authors])
@property
def articles(self):
article_list = []
for article_link in self.related_article_links.all():
if article_link.article:
article_link.article.override_text = article_link.override_text
article_link.article.override_image = article_link.override_image
article_list.append(article_link.article)
return article_list
@property
def authors(self):
author_list = []
for article_link in self.related_article_links.all():
if article_link.article:
if article_link.article:
for author_link in article_link.article.author_links.all():
author_list.append(author_link.author)
author_list.sort(key=attrgetter('last_name'))
return author_list
@property
def topics(self):
all_topics = []
if self.primary_topic:
all_topics.append(self.primary_topic)
for article_link in self.related_article_links.all():
if article_link.article:
all_topics.extend(article_link.article.topics)
all_topics = list(set(all_topics))
if all_topics:
all_topics.sort(key=attrgetter('name'))
return all_topics
def related_articles(self, number):
articles = list(ArticlePage.objects.live().filter(primary_topic=self.primary_topic).distinct().order_by('-first_published_at')[:number])
current_total = len(articles)
for article in self.articles:
if current_total < number:
articles.extend(list(article.related_articles(number)))
articles = list(set(articles))[:number]
current_total = len(articles)
else:
return articles
return articles
content_panels = Page.content_panels + [
FieldPanel('subtitle'),
ImageChooserPanel('main_image'),
StreamFieldPanel('body'),
InlinePanel('related_article_links', label="Articles"),
SnippetChooserPanel('primary_topic', Topic),
]
promote_panels = Page.promote_panels + [
MultiFieldPanel(
[
FieldPanel('sticky'),
FieldPanel('editors_pick'),
FieldPanel('feature_style'),
MultiFieldPanel(
[
FieldPanel('image_overlay_opacity'),
SnippetChooserPanel('image_overlay_color', Colour),
SnippetChooserPanel("font_style", FontStyle),
],
heading="Image Overlay Settings"
)
],
heading="Featuring Settings"
)
]
style_panels = [
MultiFieldPanel(
[
FieldPanel('include_main_image'),
],
heading="Sections"
)
]
edit_handler = TabbedInterface([
ObjectList(content_panels, heading='Content'),
ObjectList(style_panels, heading='Page Style Options'),
ObjectList(promote_panels, heading='Promote'),
ObjectList(Page.settings_panels, heading='Settings', classname="settings"),
])
@python_2_unicode_compatible
class Headline(FeatureStyleFields):
containing_page = models.ForeignKey(
'wagtailcore.Page',
related_name='historic_headlines'
)
featured_item = models.ForeignKey(
'wagtailcore.Page',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
start_date = models.DateTimeField(auto_now_add=True)
end_date = models.DateTimeField(null=True)
def __str__(self):
return "{}".format(self.id)
| |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2008 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
"""Basic support for evaluating XPath expressions against streams.
>>> from genshi.input import XML
>>> doc = XML('''<doc>
... <items count="4">
... <item status="new">
... <summary>Foo</summary>
... </item>
... <item status="closed">
... <summary>Bar</summary>
... </item>
... <item status="closed" resolution="invalid">
... <summary>Baz</summary>
... </item>
... <item status="closed" resolution="fixed">
... <summary>Waz</summary>
... </item>
... </items>
... </doc>''')
>>> print doc.select('items/item[@status="closed" and '
... '(@resolution="invalid" or not(@resolution))]/summary/text()')
BarBaz
Because the XPath engine operates on markup streams (as opposed to tree
structures), it only implements a subset of the full XPath 1.0 language.
"""
from math import ceil, floor
import operator
import re
from genshi.core import Stream, Attrs, Namespace, QName
from genshi.core import START, END, TEXT, START_NS, END_NS, COMMENT, PI, \
START_CDATA, END_CDATA
__all__ = ['Path', 'PathSyntaxError']
__docformat__ = 'restructuredtext en'
class Axis(object):
"""Defines constants for the various supported XPath axes."""
ATTRIBUTE = 'attribute'
CHILD = 'child'
DESCENDANT = 'descendant'
DESCENDANT_OR_SELF = 'descendant-or-self'
SELF = 'self'
def forname(cls, name):
"""Return the axis constant for the given name, or `None` if no such
axis was defined.
"""
return getattr(cls, name.upper().replace('-', '_'), None)
forname = classmethod(forname)
ATTRIBUTE = Axis.ATTRIBUTE
CHILD = Axis.CHILD
DESCENDANT = Axis.DESCENDANT
DESCENDANT_OR_SELF = Axis.DESCENDANT_OR_SELF
SELF = Axis.SELF
class Path(object):
"""Implements basic XPath support on streams.
Instances of this class represent a "compiled" XPath expression, and provide
methods for testing the path against a stream, as well as extracting a
substream matching that path.
"""
def __init__(self, text, filename=None, lineno=-1):
"""Create the path object from a string.
:param text: the path expression
:param filename: the name of the file in which the path expression was
found (used in error messages)
:param lineno: the line on which the expression was found
"""
self.source = text
self.paths = PathParser(text, filename, lineno).parse()
def __repr__(self):
paths = []
for path in self.paths:
steps = []
for axis, nodetest, predicates in path:
steps.append('%s::%s' % (axis, nodetest))
for predicate in predicates:
steps[-1] += '[%s]' % predicate
paths.append('/'.join(steps))
return '<%s "%s">' % (self.__class__.__name__, '|'.join(paths))
def select(self, stream, namespaces=None, variables=None):
"""Returns a substream of the given stream that matches the path.
If there are no matches, this method returns an empty stream.
>>> from genshi.input import XML
>>> xml = XML('<root><elem><child>Text</child></elem></root>')
>>> print Path('.//child').select(xml)
<child>Text</child>
>>> print Path('.//child/text()').select(xml)
Text
:param stream: the stream to select from
:param namespaces: (optional) a mapping of namespace prefixes to URIs
:param variables: (optional) a mapping of variable names to values
:return: the substream matching the path, or an empty stream
:rtype: `Stream`
"""
if namespaces is None:
namespaces = {}
if variables is None:
variables = {}
stream = iter(stream)
def _generate():
test = self.test()
for event in stream:
result = test(event, namespaces, variables)
if result is True:
yield event
if event[0] is START:
depth = 1
while depth > 0:
subevent = stream.next()
if subevent[0] is START:
depth += 1
elif subevent[0] is END:
depth -= 1
yield subevent
test(subevent, namespaces, variables,
updateonly=True)
elif result:
yield result
return Stream(_generate(),
serializer=getattr(stream, 'serializer', None))
def test(self, ignore_context=False):
"""Returns a function that can be used to track whether the path matches
a specific stream event.
The function returned expects the positional arguments ``event``,
``namespaces`` and ``variables``. The first is a stream event, while the
latter two are a mapping of namespace prefixes to URIs, and a mapping
of variable names to values, respectively. In addition, the function
accepts an ``updateonly`` keyword argument that default to ``False``. If
it is set to ``True``, the function only updates its internal state,
but does not perform any tests or return a result.
If the path matches the event, the function returns the match (for
example, a `START` or `TEXT` event.) Otherwise, it returns ``None``.
>>> from genshi.input import XML
>>> xml = XML('<root><elem><child id="1"/></elem><child id="2"/></root>')
>>> test = Path('child').test()
>>> for event in xml:
... if test(event, {}, {}):
... print event[0], repr(event[1])
START (QName(u'child'), Attrs([(QName(u'id'), u'2')]))
:param ignore_context: if `True`, the path is interpreted like a pattern
in XSLT, meaning for example that it will match
at any depth
:return: a function that can be used to test individual events in a
stream against the path
:rtype: ``function``
"""
paths = [(p, len(p), [0], [], [0] * len(p)) for p in [
(ignore_context and [_DOTSLASHSLASH] or []) + p for p in self.paths
]]
def _test(event, namespaces, variables, updateonly=False):
kind, data, pos = event[:3]
retval = None
for steps, size, cursors, cutoff, counter in paths:
# Manage the stack that tells us "where we are" in the stream
if kind is END:
if cursors:
cursors.pop()
continue
elif kind is START:
cursors.append(cursors and cursors[-1] or 0)
elif kind is START_NS or kind is END_NS \
or kind is START_CDATA or kind is END_CDATA:
continue
if updateonly or retval or not cursors:
continue
cursor = cursors[-1]
depth = len(cursors)
if cutoff and depth + int(kind is not START) > cutoff[0]:
continue
ctxtnode = not ignore_context and kind is START \
and depth == 2
matched = None
while 1:
# Fetch the next location step
axis, nodetest, predicates = steps[cursor]
# If this is the start event for the context node, and the
# axis of the location step doesn't include the current
# element, skip the test
if ctxtnode and (axis is CHILD or axis is DESCENDANT):
break
# Is this the last step of the location path?
last_step = cursor + 1 == size
# Perform the actual node test
matched = nodetest(kind, data, pos, namespaces, variables)
# The node test matched
if matched:
# Check all the predicates for this step
if predicates:
for predicate in predicates:
pretval = predicate(kind, data, pos, namespaces,
variables)
if type(pretval) is float: # FIXME <- need to
# check this for
# other types that
# can be coerced to
# float
counter[cursor] += 1
if counter[cursor] != int(pretval):
pretval = False
if not pretval:
matched = None
break
# Both the node test and the predicates matched
if matched:
if last_step:
if not ctxtnode or kind is not START \
or axis is ATTRIBUTE or axis is SELF:
retval = matched
elif not ctxtnode or axis is SELF \
or axis is DESCENDANT_OR_SELF:
cursor += 1
cursors[-1] = cursor
cutoff[:] = []
if kind is START:
if last_step and not (axis is DESCENDANT or
axis is DESCENDANT_OR_SELF):
cutoff[:] = [depth]
elif steps[cursor][0] is ATTRIBUTE:
# If the axis of the next location step is the
# attribute axis, we need to move on to processing
# that step without waiting for the next markup
# event
continue
# We're done with this step if it's the last step or the
# axis isn't "self"
if not matched or last_step or not (
axis is SELF or axis is DESCENDANT_OR_SELF):
break
if ctxtnode and axis is DESCENDANT_OR_SELF:
ctxtnode = False
if (retval or not matched) and kind is START and \
not (axis is DESCENDANT or axis is DESCENDANT_OR_SELF):
# If this step is not a closure, it cannot be matched until
# the current element is closed... so we need to move the
# cursor back to the previous closure and retest that
# against the current element
backsteps = [(i, k, d, p) for i, (k, d, p)
in enumerate(steps[:cursor])
if k is DESCENDANT or k is DESCENDANT_OR_SELF]
backsteps.reverse()
for cursor, axis, nodetest, predicates in backsteps:
if nodetest(kind, data, pos, namespaces, variables):
cutoff[:] = []
break
cursors[-1] = cursor
return retval
return _test
class PathSyntaxError(Exception):
"""Exception raised when an XPath expression is syntactically incorrect."""
def __init__(self, message, filename=None, lineno=-1, offset=-1):
if filename:
message = '%s (%s, line %d)' % (message, filename, lineno)
Exception.__init__(self, message)
self.filename = filename
self.lineno = lineno
self.offset = offset
class PathParser(object):
"""Tokenizes and parses an XPath expression."""
_QUOTES = (("'", "'"), ('"', '"'))
_TOKENS = ('::', ':', '..', '.', '//', '/', '[', ']', '()', '(', ')', '@',
'=', '!=', '!', '|', ',', '>=', '>', '<=', '<', '$')
_tokenize = re.compile('("[^"]*")|(\'[^\']*\')|((?:\d+)?\.\d+)|(%s)|([^%s\s]+)|\s+' % (
'|'.join([re.escape(t) for t in _TOKENS]),
''.join([re.escape(t[0]) for t in _TOKENS]))).findall
def __init__(self, text, filename=None, lineno=-1):
self.filename = filename
self.lineno = lineno
self.tokens = filter(None, [dqstr or sqstr or number or token or name
for dqstr, sqstr, number, token, name in
self._tokenize(text)])
self.pos = 0
# Tokenizer
at_end = property(lambda self: self.pos == len(self.tokens) - 1)
cur_token = property(lambda self: self.tokens[self.pos])
def next_token(self):
self.pos += 1
return self.tokens[self.pos]
def peek_token(self):
if not self.at_end:
return self.tokens[self.pos + 1]
return None
# Recursive descent parser
def parse(self):
"""Parses the XPath expression and returns a list of location path
tests.
For union expressions (such as `*|text()`), this function returns one
test for each operand in the union. For patch expressions that don't
use the union operator, the function always returns a list of size 1.
Each path test in turn is a sequence of tests that correspond to the
location steps, each tuples of the form `(axis, testfunc, predicates)`
"""
paths = [self._location_path()]
while self.cur_token == '|':
self.next_token()
paths.append(self._location_path())
if not self.at_end:
raise PathSyntaxError('Unexpected token %r after end of expression'
% self.cur_token, self.filename, self.lineno)
return paths
def _location_path(self):
steps = []
while True:
if self.cur_token.startswith('/'):
if self.cur_token == '//':
steps.append((DESCENDANT_OR_SELF, NodeTest(), []))
elif not steps:
raise PathSyntaxError('Absolute location paths not '
'supported', self.filename,
self.lineno)
self.next_token()
axis, nodetest, predicates = self._location_step()
if not axis:
axis = CHILD
steps.append((axis, nodetest, predicates))
if self.at_end or not self.cur_token.startswith('/'):
break
return steps
def _location_step(self):
if self.cur_token == '@':
axis = ATTRIBUTE
self.next_token()
elif self.cur_token == '.':
axis = SELF
elif self.cur_token == '..':
raise PathSyntaxError('Unsupported axis "parent"', self.filename,
self.lineno)
elif self.peek_token() == '::':
axis = Axis.forname(self.cur_token)
if axis is None:
raise PathSyntaxError('Unsupport axis "%s"' % axis,
self.filename, self.lineno)
self.next_token()
self.next_token()
else:
axis = None
nodetest = self._node_test(axis or CHILD)
predicates = []
while self.cur_token == '[':
predicates.append(self._predicate())
return axis, nodetest, predicates
def _node_test(self, axis=None):
test = prefix = None
next_token = self.peek_token()
if next_token in ('(', '()'): # Node type test
test = self._node_type()
elif next_token == ':': # Namespace prefix
prefix = self.cur_token
self.next_token()
localname = self.next_token()
if localname == '*':
test = QualifiedPrincipalTypeTest(axis, prefix)
else:
test = QualifiedNameTest(axis, prefix, localname)
else: # Name test
if self.cur_token == '*':
test = PrincipalTypeTest(axis)
elif self.cur_token == '.':
test = NodeTest()
else:
test = LocalNameTest(axis, self.cur_token)
if not self.at_end:
self.next_token()
return test
def _node_type(self):
name = self.cur_token
self.next_token()
args = []
if self.cur_token != '()':
# The processing-instruction() function optionally accepts the
# name of the PI as argument, which must be a literal string
self.next_token() # (
if self.cur_token != ')':
string = self.cur_token
if (string[0], string[-1]) in self._QUOTES:
string = string[1:-1]
args.append(string)
cls = _nodetest_map.get(name)
if not cls:
raise PathSyntaxError('%s() not allowed here' % name, self.filename,
self.lineno)
return cls(*args)
def _predicate(self):
assert self.cur_token == '['
self.next_token()
expr = self._or_expr()
if self.cur_token != ']':
raise PathSyntaxError('Expected "]" to close predicate, '
'but found "%s"' % self.cur_token,
self.filename, self.lineno)
if not self.at_end:
self.next_token()
return expr
def _or_expr(self):
expr = self._and_expr()
while self.cur_token == 'or':
self.next_token()
expr = OrOperator(expr, self._and_expr())
return expr
def _and_expr(self):
expr = self._equality_expr()
while self.cur_token == 'and':
self.next_token()
expr = AndOperator(expr, self._equality_expr())
return expr
def _equality_expr(self):
expr = self._relational_expr()
while self.cur_token in ('=', '!='):
op = _operator_map[self.cur_token]
self.next_token()
expr = op(expr, self._relational_expr())
return expr
def _relational_expr(self):
expr = self._sub_expr()
while self.cur_token in ('>', '>=', '<', '>='):
op = _operator_map[self.cur_token]
self.next_token()
expr = op(expr, self._sub_expr())
return expr
def _sub_expr(self):
token = self.cur_token
if token != '(':
return self._primary_expr()
self.next_token()
expr = self._or_expr()
if self.cur_token != ')':
raise PathSyntaxError('Expected ")" to close sub-expression, '
'but found "%s"' % self.cur_token,
self.filename, self.lineno)
self.next_token()
return expr
def _primary_expr(self):
token = self.cur_token
if len(token) > 1 and (token[0], token[-1]) in self._QUOTES:
self.next_token()
return StringLiteral(token[1:-1])
elif token[0].isdigit() or token[0] == '.':
self.next_token()
return NumberLiteral(as_float(token))
elif token == '$':
token = self.next_token()
self.next_token()
return VariableReference(token)
elif not self.at_end and self.peek_token().startswith('('):
return self._function_call()
else:
axis = None
if token == '@':
axis = ATTRIBUTE
self.next_token()
return self._node_test(axis)
def _function_call(self):
name = self.cur_token
if self.next_token() == '()':
args = []
else:
assert self.cur_token == '('
self.next_token()
args = [self._or_expr()]
while self.cur_token == ',':
self.next_token()
args.append(self._or_expr())
if not self.cur_token == ')':
raise PathSyntaxError('Expected ")" to close function argument '
'list, but found "%s"' % self.cur_token,
self.filename, self.lineno)
self.next_token()
cls = _function_map.get(name)
if not cls:
raise PathSyntaxError('Unsupported function "%s"' % name,
self.filename, self.lineno)
return cls(*args)
# Type coercion
def as_scalar(value):
"""Convert value to a scalar. If a single element Attrs() object is passed
the value of the single attribute will be returned."""
if isinstance(value, Attrs):
assert len(value) == 1
return value[0][1]
else:
return value
def as_float(value):
# FIXME - if value is a bool it will be coerced to 0.0 and consequently
# compared as a float. This is probably not ideal.
return float(as_scalar(value))
def as_long(value):
return long(as_scalar(value))
def as_string(value):
value = as_scalar(value)
if value is False:
return u''
return unicode(value)
def as_bool(value):
return bool(as_scalar(value))
# Node tests
class PrincipalTypeTest(object):
"""Node test that matches any event with the given principal type."""
__slots__ = ['principal_type']
def __init__(self, principal_type):
self.principal_type = principal_type
def __call__(self, kind, data, pos, namespaces, variables):
if kind is START:
if self.principal_type is ATTRIBUTE:
return data[1] or None
else:
return True
def __repr__(self):
return '*'
class QualifiedPrincipalTypeTest(object):
"""Node test that matches any event with the given principal type in a
specific namespace."""
__slots__ = ['principal_type', 'prefix']
def __init__(self, principal_type, prefix):
self.principal_type = principal_type
self.prefix = prefix
def __call__(self, kind, data, pos, namespaces, variables):
namespace = Namespace(namespaces.get(self.prefix))
if kind is START:
if self.principal_type is ATTRIBUTE and data[1]:
return Attrs([(name, value) for name, value in data[1]
if name in namespace]) or None
else:
return data[0] in namespace
def __repr__(self):
return '%s:*' % self.prefix
class LocalNameTest(object):
"""Node test that matches any event with the given principal type and
local name.
"""
__slots__ = ['principal_type', 'name']
def __init__(self, principal_type, name):
self.principal_type = principal_type
self.name = name
def __call__(self, kind, data, pos, namespaces, variables):
if kind is START:
if self.principal_type is ATTRIBUTE and self.name in data[1]:
return Attrs([(self.name, data[1].get(self.name))])
else:
return data[0].localname == self.name
def __repr__(self):
return self.name
class QualifiedNameTest(object):
"""Node test that matches any event with the given principal type and
qualified name.
"""
__slots__ = ['principal_type', 'prefix', 'name']
def __init__(self, principal_type, prefix, name):
self.principal_type = principal_type
self.prefix = prefix
self.name = name
def __call__(self, kind, data, pos, namespaces, variables):
qname = QName('%s}%s' % (namespaces.get(self.prefix), self.name))
if kind is START:
if self.principal_type is ATTRIBUTE and qname in data[1]:
return Attrs([(self.name, data[1].get(self.name))])
else:
return data[0] == qname
def __repr__(self):
return '%s:%s' % (self.prefix, self.name)
class CommentNodeTest(object):
"""Node test that matches any comment events."""
__slots__ = []
def __call__(self, kind, data, pos, namespaces, variables):
return kind is COMMENT
def __repr__(self):
return 'comment()'
class NodeTest(object):
"""Node test that matches any node."""
__slots__ = []
def __call__(self, kind, data, pos, namespaces, variables):
if kind is START:
return True
return kind, data, pos
def __repr__(self):
return 'node()'
class ProcessingInstructionNodeTest(object):
"""Node test that matches any processing instruction event."""
__slots__ = ['target']
def __init__(self, target=None):
self.target = target
def __call__(self, kind, data, pos, namespaces, variables):
return kind is PI and (not self.target or data[0] == self.target)
def __repr__(self):
arg = ''
if self.target:
arg = '"' + self.target + '"'
return 'processing-instruction(%s)' % arg
class TextNodeTest(object):
"""Node test that matches any text event."""
__slots__ = []
def __call__(self, kind, data, pos, namespaces, variables):
return kind is TEXT
def __repr__(self):
return 'text()'
_nodetest_map = {'comment': CommentNodeTest, 'node': NodeTest,
'processing-instruction': ProcessingInstructionNodeTest,
'text': TextNodeTest}
# Functions
class Function(object):
"""Base class for function nodes in XPath expressions."""
class BooleanFunction(Function):
"""The `boolean` function, which converts its argument to a boolean
value.
"""
__slots__ = ['expr']
def __init__(self, expr):
self.expr = expr
def __call__(self, kind, data, pos, namespaces, variables):
val = self.expr(kind, data, pos, namespaces, variables)
return as_bool(val)
def __repr__(self):
return 'boolean(%r)' % self.expr
class CeilingFunction(Function):
"""The `ceiling` function, which returns the nearest lower integer number
for the given number.
"""
__slots__ = ['number']
def __init__(self, number):
self.number = number
def __call__(self, kind, data, pos, namespaces, variables):
number = self.number(kind, data, pos, namespaces, variables)
return ceil(as_float(number))
def __repr__(self):
return 'ceiling(%r)' % self.number
class ConcatFunction(Function):
"""The `concat` function, which concatenates (joins) the variable number of
strings it gets as arguments.
"""
__slots__ = ['exprs']
def __init__(self, *exprs):
self.exprs = exprs
def __call__(self, kind, data, pos, namespaces, variables):
strings = []
for item in [expr(kind, data, pos, namespaces, variables)
for expr in self.exprs]:
strings.append(as_string(item))
return u''.join(strings)
def __repr__(self):
return 'concat(%s)' % ', '.join([repr(expr) for expr in self.exprs])
class ContainsFunction(Function):
"""The `contains` function, which returns whether a string contains a given
substring.
"""
__slots__ = ['string1', 'string2']
def __init__(self, string1, string2):
self.string1 = string1
self.string2 = string2
def __call__(self, kind, data, pos, namespaces, variables):
string1 = self.string1(kind, data, pos, namespaces, variables)
string2 = self.string2(kind, data, pos, namespaces, variables)
return as_string(string2) in as_string(string1)
def __repr__(self):
return 'contains(%r, %r)' % (self.string1, self.string2)
class MatchesFunction(Function):
"""The `matches` function, which returns whether a string matches a regular
expression.
"""
__slots__ = ['string1', 'string2']
flag_mapping = {'s': re.S, 'm': re.M, 'i': re.I, 'x': re.X}
def __init__(self, string1, string2, flags=''):
self.string1 = string1
self.string2 = string2
self.flags = self._map_flags(flags)
def __call__(self, kind, data, pos, namespaces, variables):
string1 = as_string(self.string1(kind, data, pos, namespaces, variables))
string2 = as_string(self.string2(kind, data, pos, namespaces, variables))
return re.search(string2, string1, self.flags)
def _map_flags(self, flags):
return reduce(operator.or_,
[self.flag_map[flag] for flag in flags], re.U)
def __repr__(self):
return 'contains(%r, %r)' % (self.string1, self.string2)
class FalseFunction(Function):
"""The `false` function, which always returns the boolean `false` value."""
__slots__ = []
def __call__(self, kind, data, pos, namespaces, variables):
return False
def __repr__(self):
return 'false()'
class FloorFunction(Function):
"""The `ceiling` function, which returns the nearest higher integer number
for the given number.
"""
__slots__ = ['number']
def __init__(self, number):
self.number = number
def __call__(self, kind, data, pos, namespaces, variables):
number = self.number(kind, data, pos, namespaces, variables)
return floor(as_float(number))
def __repr__(self):
return 'floor(%r)' % self.number
class LocalNameFunction(Function):
"""The `local-name` function, which returns the local name of the current
element.
"""
__slots__ = []
def __call__(self, kind, data, pos, namespaces, variables):
if kind is START:
return data[0].localname
def __repr__(self):
return 'local-name()'
class NameFunction(Function):
"""The `name` function, which returns the qualified name of the current
element.
"""
__slots__ = []
def __call__(self, kind, data, pos, namespaces, variables):
if kind is START:
return data[0]
def __repr__(self):
return 'name()'
class NamespaceUriFunction(Function):
"""The `namespace-uri` function, which returns the namespace URI of the
current element.
"""
__slots__ = []
def __call__(self, kind, data, pos, namespaces, variables):
if kind is START:
return data[0].namespace
def __repr__(self):
return 'namespace-uri()'
class NotFunction(Function):
"""The `not` function, which returns the negated boolean value of its
argument.
"""
__slots__ = ['expr']
def __init__(self, expr):
self.expr = expr
def __call__(self, kind, data, pos, namespaces, variables):
return not as_bool(self.expr(kind, data, pos, namespaces, variables))
def __repr__(self):
return 'not(%s)' % self.expr
class NormalizeSpaceFunction(Function):
"""The `normalize-space` function, which removes leading and trailing
whitespace in the given string, and replaces multiple adjacent whitespace
characters inside the string with a single space.
"""
__slots__ = ['expr']
_normalize = re.compile(r'\s{2,}').sub
def __init__(self, expr):
self.expr = expr
def __call__(self, kind, data, pos, namespaces, variables):
string = self.expr(kind, data, pos, namespaces, variables)
return self._normalize(' ', as_string(string).strip())
def __repr__(self):
return 'normalize-space(%s)' % repr(self.expr)
class NumberFunction(Function):
"""The `number` function that converts its argument to a number."""
__slots__ = ['expr']
def __init__(self, expr):
self.expr = expr
def __call__(self, kind, data, pos, namespaces, variables):
val = self.expr(kind, data, pos, namespaces, variables)
return as_float(val)
def __repr__(self):
return 'number(%r)' % self.expr
class RoundFunction(Function):
"""The `round` function, which returns the nearest integer number for the
given number.
"""
__slots__ = ['number']
def __init__(self, number):
self.number = number
def __call__(self, kind, data, pos, namespaces, variables):
number = self.number(kind, data, pos, namespaces, variables)
return round(as_float(number))
def __repr__(self):
return 'round(%r)' % self.number
class StartsWithFunction(Function):
"""The `starts-with` function that returns whether one string starts with
a given substring.
"""
__slots__ = ['string1', 'string2']
def __init__(self, string1, string2):
self.string1 = string1
self.string2 = string2
def __call__(self, kind, data, pos, namespaces, variables):
string1 = self.string1(kind, data, pos, namespaces, variables)
string2 = self.string2(kind, data, pos, namespaces, variables)
return as_string(string1).startswith(as_string(string2))
def __repr__(self):
return 'starts-with(%r, %r)' % (self.string1, self.string2)
class StringLengthFunction(Function):
"""The `string-length` function that returns the length of the given
string.
"""
__slots__ = ['expr']
def __init__(self, expr):
self.expr = expr
def __call__(self, kind, data, pos, namespaces, variables):
string = self.expr(kind, data, pos, namespaces, variables)
return len(as_string(string))
def __repr__(self):
return 'string-length(%r)' % self.expr
class SubstringFunction(Function):
"""The `substring` function that returns the part of a string that starts
at the given offset, and optionally limited to the given length.
"""
__slots__ = ['string', 'start', 'length']
def __init__(self, string, start, length=None):
self.string = string
self.start = start
self.length = length
def __call__(self, kind, data, pos, namespaces, variables):
string = self.string(kind, data, pos, namespaces, variables)
start = self.start(kind, data, pos, namespaces, variables)
length = 0
if self.length is not None:
length = self.length(kind, data, pos, namespaces, variables)
return string[as_long(start):len(as_string(string)) - as_long(length)]
def __repr__(self):
if self.length is not None:
return 'substring(%r, %r, %r)' % (self.string, self.start,
self.length)
else:
return 'substring(%r, %r)' % (self.string, self.start)
class SubstringAfterFunction(Function):
"""The `substring-after` function that returns the part of a string that
is found after the given substring.
"""
__slots__ = ['string1', 'string2']
def __init__(self, string1, string2):
self.string1 = string1
self.string2 = string2
def __call__(self, kind, data, pos, namespaces, variables):
string1 = as_string(self.string1(kind, data, pos, namespaces, variables))
string2 = as_string(self.string2(kind, data, pos, namespaces, variables))
index = string1.find(string2)
if index >= 0:
return string1[index + len(string2):]
return u''
def __repr__(self):
return 'substring-after(%r, %r)' % (self.string1, self.string2)
class SubstringBeforeFunction(Function):
"""The `substring-before` function that returns the part of a string that
is found before the given substring.
"""
__slots__ = ['string1', 'string2']
def __init__(self, string1, string2):
self.string1 = string1
self.string2 = string2
def __call__(self, kind, data, pos, namespaces, variables):
string1 = as_string(self.string1(kind, data, pos, namespaces, variables))
string2 = as_string(self.string2(kind, data, pos, namespaces, variables))
index = string1.find(string2)
if index >= 0:
return string1[:index]
return u''
def __repr__(self):
return 'substring-after(%r, %r)' % (self.string1, self.string2)
class TranslateFunction(Function):
"""The `translate` function that translates a set of characters in a
string to target set of characters.
"""
__slots__ = ['string', 'fromchars', 'tochars']
def __init__(self, string, fromchars, tochars):
self.string = string
self.fromchars = fromchars
self.tochars = tochars
def __call__(self, kind, data, pos, namespaces, variables):
string = as_string(self.string(kind, data, pos, namespaces, variables))
fromchars = as_string(self.fromchars(kind, data, pos, namespaces, variables))
tochars = as_string(self.tochars(kind, data, pos, namespaces, variables))
table = dict(zip([ord(c) for c in fromchars],
[ord(c) for c in tochars]))
return string.translate(table)
def __repr__(self):
return 'translate(%r, %r, %r)' % (self.string, self.fromchars,
self.tochars)
class TrueFunction(Function):
"""The `true` function, which always returns the boolean `true` value."""
__slots__ = []
def __call__(self, kind, data, pos, namespaces, variables):
return True
def __repr__(self):
return 'true()'
_function_map = {'boolean': BooleanFunction, 'ceiling': CeilingFunction,
'concat': ConcatFunction, 'contains': ContainsFunction,
'matches': MatchesFunction, 'false': FalseFunction, 'floor':
FloorFunction, 'local-name': LocalNameFunction, 'name':
NameFunction, 'namespace-uri': NamespaceUriFunction,
'normalize-space': NormalizeSpaceFunction, 'not': NotFunction,
'number': NumberFunction, 'round': RoundFunction,
'starts-with': StartsWithFunction, 'string-length':
StringLengthFunction, 'substring': SubstringFunction,
'substring-after': SubstringAfterFunction, 'substring-before':
SubstringBeforeFunction, 'translate': TranslateFunction,
'true': TrueFunction}
# Literals & Variables
class Literal(object):
"""Abstract base class for literal nodes."""
class StringLiteral(Literal):
"""A string literal node."""
__slots__ = ['text']
def __init__(self, text):
self.text = text
def __call__(self, kind, data, pos, namespaces, variables):
return self.text
def __repr__(self):
return '"%s"' % self.text
class NumberLiteral(Literal):
"""A number literal node."""
__slots__ = ['number']
def __init__(self, number):
self.number = number
def __call__(self, kind, data, pos, namespaces, variables):
return self.number
def __repr__(self):
return str(self.number)
class VariableReference(Literal):
"""A variable reference node."""
__slots__ = ['name']
def __init__(self, name):
self.name = name
def __call__(self, kind, data, pos, namespaces, variables):
return variables.get(self.name)
def __repr__(self):
return str(self.name)
# Operators
class AndOperator(object):
"""The boolean operator `and`."""
__slots__ = ['lval', 'rval']
def __init__(self, lval, rval):
self.lval = lval
self.rval = rval
def __call__(self, kind, data, pos, namespaces, variables):
lval = as_bool(self.lval(kind, data, pos, namespaces, variables))
if not lval:
return False
rval = self.rval(kind, data, pos, namespaces, variables)
return as_bool(rval)
def __repr__(self):
return '%s and %s' % (self.lval, self.rval)
class EqualsOperator(object):
"""The equality operator `=`."""
__slots__ = ['lval', 'rval']
def __init__(self, lval, rval):
self.lval = lval
self.rval = rval
def __call__(self, kind, data, pos, namespaces, variables):
lval = as_scalar(self.lval(kind, data, pos, namespaces, variables))
rval = as_scalar(self.rval(kind, data, pos, namespaces, variables))
return lval == rval
def __repr__(self):
return '%s=%s' % (self.lval, self.rval)
class NotEqualsOperator(object):
"""The equality operator `!=`."""
__slots__ = ['lval', 'rval']
def __init__(self, lval, rval):
self.lval = lval
self.rval = rval
def __call__(self, kind, data, pos, namespaces, variables):
lval = as_scalar(self.lval(kind, data, pos, namespaces, variables))
rval = as_scalar(self.rval(kind, data, pos, namespaces, variables))
return lval != rval
def __repr__(self):
return '%s!=%s' % (self.lval, self.rval)
class OrOperator(object):
"""The boolean operator `or`."""
__slots__ = ['lval', 'rval']
def __init__(self, lval, rval):
self.lval = lval
self.rval = rval
def __call__(self, kind, data, pos, namespaces, variables):
lval = as_bool(self.lval(kind, data, pos, namespaces, variables))
if lval:
return True
rval = self.rval(kind, data, pos, namespaces, variables)
return as_bool(rval)
def __repr__(self):
return '%s or %s' % (self.lval, self.rval)
class GreaterThanOperator(object):
"""The relational operator `>` (greater than)."""
__slots__ = ['lval', 'rval']
def __init__(self, lval, rval):
self.lval = lval
self.rval = rval
def __call__(self, kind, data, pos, namespaces, variables):
lval = self.lval(kind, data, pos, namespaces, variables)
rval = self.rval(kind, data, pos, namespaces, variables)
return as_float(lval) > as_float(rval)
def __repr__(self):
return '%s>%s' % (self.lval, self.rval)
class GreaterThanOrEqualOperator(object):
"""The relational operator `>=` (greater than or equal)."""
__slots__ = ['lval', 'rval']
def __init__(self, lval, rval):
self.lval = lval
self.rval = rval
def __call__(self, kind, data, pos, namespaces, variables):
lval = self.lval(kind, data, pos, namespaces, variables)
rval = self.rval(kind, data, pos, namespaces, variables)
return as_float(lval) >= as_float(rval)
def __repr__(self):
return '%s>=%s' % (self.lval, self.rval)
class LessThanOperator(object):
"""The relational operator `<` (less than)."""
__slots__ = ['lval', 'rval']
def __init__(self, lval, rval):
self.lval = lval
self.rval = rval
def __call__(self, kind, data, pos, namespaces, variables):
lval = self.lval(kind, data, pos, namespaces, variables)
rval = self.rval(kind, data, pos, namespaces, variables)
return as_float(lval) < as_float(rval)
def __repr__(self):
return '%s<%s' % (self.lval, self.rval)
class LessThanOrEqualOperator(object):
"""The relational operator `<=` (less than or equal)."""
__slots__ = ['lval', 'rval']
def __init__(self, lval, rval):
self.lval = lval
self.rval = rval
def __call__(self, kind, data, pos, namespaces, variables):
lval = self.lval(kind, data, pos, namespaces, variables)
rval = self.rval(kind, data, pos, namespaces, variables)
return as_float(lval) <= as_float(rval)
def __repr__(self):
return '%s<=%s' % (self.lval, self.rval)
_operator_map = {'=': EqualsOperator, '!=': NotEqualsOperator,
'>': GreaterThanOperator, '>=': GreaterThanOrEqualOperator,
'<': LessThanOperator, '>=': LessThanOrEqualOperator}
_DOTSLASHSLASH = (DESCENDANT_OR_SELF, PrincipalTypeTest(None), ())
| |
"""
GWR is tested against results from GWR4
"""
import unittest
import pickle as pk
from crankshaft.regression.gwr.gwr import GWR, FBGWR
from crankshaft.regression.gwr.sel_bw import Sel_BW
from crankshaft.regression.gwr.diagnostics import get_AICc, get_AIC, get_BIC, get_CV
from crankshaft.regression.glm.family import Gaussian, Poisson, Binomial
import numpy as np
import pysal
class TestGWRGaussian(unittest.TestCase):
def setUp(self):
data = pysal.open(pysal.examples.get_path('GData_utm.csv'))
self.coords = zip(data.by_col('X'), data.by_col('Y'))
self.y = np.array(data.by_col('PctBach')).reshape((-1,1))
rural = np.array(data.by_col('PctRural')).reshape((-1,1))
pov = np.array(data.by_col('PctPov')).reshape((-1,1))
black = np.array(data.by_col('PctBlack')).reshape((-1,1))
self.X = np.hstack([rural, pov, black])
self.BS_F = pysal.open(pysal.examples.get_path('georgia_BS_F_listwise.csv'))
self.BS_NN = pysal.open(pysal.examples.get_path('georgia_BS_NN_listwise.csv'))
self.GS_F = pysal.open(pysal.examples.get_path('georgia_GS_F_listwise.csv'))
self.GS_NN = pysal.open(pysal.examples.get_path('georgia_GS_NN_listwise.csv'))
self.FB = pk.load(open(pysal.examples.get_path('FB.p'), 'r'))
self.XB = pk.load(open(pysal.examples.get_path('XB.p'), 'r'))
self.err = pk.load(open(pysal.examples.get_path('err.p'), 'r'))
def test_BS_F(self):
est_Int = self.BS_F.by_col(' est_Intercept')
se_Int = self.BS_F.by_col(' se_Intercept')
t_Int = self.BS_F.by_col(' t_Intercept')
est_rural = self.BS_F.by_col(' est_PctRural')
se_rural = self.BS_F.by_col(' se_PctRural')
t_rural = self.BS_F.by_col(' t_PctRural')
est_pov = self.BS_F.by_col(' est_PctPov')
se_pov = self.BS_F.by_col(' se_PctPov')
t_pov = self.BS_F.by_col(' t_PctPov')
est_black = self.BS_F.by_col(' est_PctBlack')
se_black = self.BS_F.by_col(' se_PctBlack')
t_black = self.BS_F.by_col(' t_PctBlack')
yhat = self.BS_F.by_col(' yhat')
res = np.array(self.BS_F.by_col(' residual'))
std_res = np.array(self.BS_F.by_col(' std_residual')).reshape((-1,1))
localR2 = np.array(self.BS_F.by_col(' localR2')).reshape((-1,1))
inf = np.array(self.BS_F.by_col(' influence')).reshape((-1,1))
cooksD = np.array(self.BS_F.by_col(' CooksD')).reshape((-1,1))
model = GWR(self.coords, self.y, self.X, bw=209267.689, fixed=True)
rslt = model.fit()
AICc = get_AICc(rslt)
AIC = get_AIC(rslt)
BIC = get_BIC(rslt)
CV = get_CV(rslt)
self.assertAlmostEquals(np.floor(AICc), 894.0)
self.assertAlmostEquals(np.floor(AIC), 890.0)
self.assertAlmostEquals(np.floor(BIC), 944.0)
self.assertAlmostEquals(np.round(CV,2), 18.25)
np.testing.assert_allclose(est_Int, rslt.params[:,0], rtol=1e-04)
np.testing.assert_allclose(se_Int, rslt.bse[:,0], rtol=1e-04)
np.testing.assert_allclose(t_Int, rslt.tvalues[:,0], rtol=1e-04)
np.testing.assert_allclose(est_rural, rslt.params[:,1], rtol=1e-04)
np.testing.assert_allclose(se_rural, rslt.bse[:,1], rtol=1e-04)
np.testing.assert_allclose(t_rural, rslt.tvalues[:,1], rtol=1e-04)
np.testing.assert_allclose(est_pov, rslt.params[:,2], rtol=1e-04)
np.testing.assert_allclose(se_pov, rslt.bse[:,2], rtol=1e-04)
np.testing.assert_allclose(t_pov, rslt.tvalues[:,2], rtol=1e-04)
np.testing.assert_allclose(est_black, rslt.params[:,3], rtol=1e-02)
np.testing.assert_allclose(se_black, rslt.bse[:,3], rtol=1e-02)
np.testing.assert_allclose(t_black, rslt.tvalues[:,3], rtol=1e-02)
np.testing.assert_allclose(yhat, rslt.mu, rtol=1e-05)
np.testing.assert_allclose(res, rslt.resid_response, rtol=1e-04)
np.testing.assert_allclose(std_res, rslt.std_res, rtol=1e-04)
np.testing.assert_allclose(localR2, rslt.localR2, rtol=1e-05)
np.testing.assert_allclose(inf, rslt.influ, rtol=1e-04)
np.testing.assert_allclose(cooksD, rslt.cooksD, rtol=1e-00)
def test_BS_NN(self):
est_Int = self.BS_NN.by_col(' est_Intercept')
se_Int = self.BS_NN.by_col(' se_Intercept')
t_Int = self.BS_NN.by_col(' t_Intercept')
est_rural = self.BS_NN.by_col(' est_PctRural')
se_rural = self.BS_NN.by_col(' se_PctRural')
t_rural = self.BS_NN.by_col(' t_PctRural')
est_pov = self.BS_NN.by_col(' est_PctPov')
se_pov = self.BS_NN.by_col(' se_PctPov')
t_pov = self.BS_NN.by_col(' t_PctPov')
est_black = self.BS_NN.by_col(' est_PctBlack')
se_black = self.BS_NN.by_col(' se_PctBlack')
t_black = self.BS_NN.by_col(' t_PctBlack')
yhat = self.BS_NN.by_col(' yhat')
res = np.array(self.BS_NN.by_col(' residual'))
std_res = np.array(self.BS_NN.by_col(' std_residual')).reshape((-1,1))
localR2 = np.array(self.BS_NN.by_col(' localR2')).reshape((-1,1))
inf = np.array(self.BS_NN.by_col(' influence')).reshape((-1,1))
cooksD = np.array(self.BS_NN.by_col(' CooksD')).reshape((-1,1))
model = GWR(self.coords, self.y, self.X, bw=90.000, fixed=False)
rslt = model.fit()
AICc = get_AICc(rslt)
AIC = get_AIC(rslt)
BIC = get_BIC(rslt)
CV = get_CV(rslt)
self.assertAlmostEquals(np.floor(AICc), 896.0)
self.assertAlmostEquals(np.floor(AIC), 892.0)
self.assertAlmostEquals(np.floor(BIC), 941.0)
self.assertAlmostEquals(np.around(CV, 2), 19.19)
np.testing.assert_allclose(est_Int, rslt.params[:,0], rtol=1e-04)
np.testing.assert_allclose(se_Int, rslt.bse[:,0], rtol=1e-04)
np.testing.assert_allclose(t_Int, rslt.tvalues[:,0], rtol=1e-04)
np.testing.assert_allclose(est_rural, rslt.params[:,1], rtol=1e-04)
np.testing.assert_allclose(se_rural, rslt.bse[:,1], rtol=1e-04)
np.testing.assert_allclose(t_rural, rslt.tvalues[:,1], rtol=1e-04)
np.testing.assert_allclose(est_pov, rslt.params[:,2], rtol=1e-04)
np.testing.assert_allclose(se_pov, rslt.bse[:,2], rtol=1e-04)
np.testing.assert_allclose(t_pov, rslt.tvalues[:,2], rtol=1e-04)
np.testing.assert_allclose(est_black, rslt.params[:,3], rtol=1e-02)
np.testing.assert_allclose(se_black, rslt.bse[:,3], rtol=1e-02)
np.testing.assert_allclose(t_black, rslt.tvalues[:,3], rtol=1e-02)
np.testing.assert_allclose(yhat, rslt.mu, rtol=1e-05)
np.testing.assert_allclose(res, rslt.resid_response, rtol=1e-04)
np.testing.assert_allclose(std_res, rslt.std_res, rtol=1e-04)
np.testing.assert_allclose(localR2, rslt.localR2, rtol=1e-05)
np.testing.assert_allclose(inf, rslt.influ, rtol=1e-04)
np.testing.assert_allclose(cooksD, rslt.cooksD, rtol=1e-00)
def test_GS_F(self):
est_Int = self.GS_F.by_col(' est_Intercept')
se_Int = self.GS_F.by_col(' se_Intercept')
t_Int = self.GS_F.by_col(' t_Intercept')
est_rural = self.GS_F.by_col(' est_PctRural')
se_rural = self.GS_F.by_col(' se_PctRural')
t_rural = self.GS_F.by_col(' t_PctRural')
est_pov = self.GS_F.by_col(' est_PctPov')
se_pov = self.GS_F.by_col(' se_PctPov')
t_pov = self.GS_F.by_col(' t_PctPov')
est_black = self.GS_F.by_col(' est_PctBlack')
se_black = self.GS_F.by_col(' se_PctBlack')
t_black = self.GS_F.by_col(' t_PctBlack')
yhat = self.GS_F.by_col(' yhat')
res = np.array(self.GS_F.by_col(' residual'))
std_res = np.array(self.GS_F.by_col(' std_residual')).reshape((-1,1))
localR2 = np.array(self.GS_F.by_col(' localR2')).reshape((-1,1))
inf = np.array(self.GS_F.by_col(' influence')).reshape((-1,1))
cooksD = np.array(self.GS_F.by_col(' CooksD')).reshape((-1,1))
model = GWR(self.coords, self.y, self.X, bw=87308.298,
kernel='gaussian', fixed=True)
rslt = model.fit()
AICc = get_AICc(rslt)
AIC = get_AIC(rslt)
BIC = get_BIC(rslt)
CV = get_CV(rslt)
self.assertAlmostEquals(np.floor(AICc), 895.0)
self.assertAlmostEquals(np.floor(AIC), 890.0)
self.assertAlmostEquals(np.floor(BIC), 943.0)
self.assertAlmostEquals(np.around(CV, 2), 18.21)
np.testing.assert_allclose(est_Int, rslt.params[:,0], rtol=1e-04)
np.testing.assert_allclose(se_Int, rslt.bse[:,0], rtol=1e-04)
np.testing.assert_allclose(t_Int, rslt.tvalues[:,0], rtol=1e-04)
np.testing.assert_allclose(est_rural, rslt.params[:,1], rtol=1e-04)
np.testing.assert_allclose(se_rural, rslt.bse[:,1], rtol=1e-04)
np.testing.assert_allclose(t_rural, rslt.tvalues[:,1], rtol=1e-04)
np.testing.assert_allclose(est_pov, rslt.params[:,2], rtol=1e-04)
np.testing.assert_allclose(se_pov, rslt.bse[:,2], rtol=1e-04)
np.testing.assert_allclose(t_pov, rslt.tvalues[:,2], rtol=1e-04)
np.testing.assert_allclose(est_black, rslt.params[:,3], rtol=1e-02)
np.testing.assert_allclose(se_black, rslt.bse[:,3], rtol=1e-02)
np.testing.assert_allclose(t_black, rslt.tvalues[:,3], rtol=1e-02)
np.testing.assert_allclose(yhat, rslt.mu, rtol=1e-05)
np.testing.assert_allclose(res, rslt.resid_response, rtol=1e-04)
np.testing.assert_allclose(std_res, rslt.std_res, rtol=1e-04)
np.testing.assert_allclose(localR2, rslt.localR2, rtol=1e-05)
np.testing.assert_allclose(inf, rslt.influ, rtol=1e-04)
np.testing.assert_allclose(cooksD, rslt.cooksD, rtol=1e-00)
def test_GS_NN(self):
est_Int = self.GS_NN.by_col(' est_Intercept')
se_Int = self.GS_NN.by_col(' se_Intercept')
t_Int = self.GS_NN.by_col(' t_Intercept')
est_rural = self.GS_NN.by_col(' est_PctRural')
se_rural = self.GS_NN.by_col(' se_PctRural')
t_rural = self.GS_NN.by_col(' t_PctRural')
est_pov = self.GS_NN.by_col(' est_PctPov')
se_pov = self.GS_NN.by_col(' se_PctPov')
t_pov = self.GS_NN.by_col(' t_PctPov')
est_black = self.GS_NN.by_col(' est_PctBlack')
se_black = self.GS_NN.by_col(' se_PctBlack')
t_black = self.GS_NN.by_col(' t_PctBlack')
yhat = self.GS_NN.by_col(' yhat')
res = np.array(self.GS_NN.by_col(' residual'))
std_res = np.array(self.GS_NN.by_col(' std_residual')).reshape((-1,1))
localR2 = np.array(self.GS_NN.by_col(' localR2')).reshape((-1,1))
inf = np.array(self.GS_NN.by_col(' influence')).reshape((-1,1))
cooksD = np.array(self.GS_NN.by_col(' CooksD')).reshape((-1,1))
model = GWR(self.coords, self.y, self.X, bw=49.000,
kernel='gaussian', fixed=False)
rslt = model.fit()
AICc = get_AICc(rslt)
AIC = get_AIC(rslt)
BIC = get_BIC(rslt)
CV = get_CV(rslt)
self.assertAlmostEquals(np.floor(AICc), 896)
self.assertAlmostEquals(np.floor(AIC), 894.0)
self.assertAlmostEquals(np.floor(BIC), 922.0)
self.assertAlmostEquals(np.around(CV, 2), 17.91)
np.testing.assert_allclose(est_Int, rslt.params[:,0], rtol=1e-04)
np.testing.assert_allclose(se_Int, rslt.bse[:,0], rtol=1e-04)
np.testing.assert_allclose(t_Int, rslt.tvalues[:,0], rtol=1e-04)
np.testing.assert_allclose(est_rural, rslt.params[:,1], rtol=1e-04)
np.testing.assert_allclose(se_rural, rslt.bse[:,1], rtol=1e-04)
np.testing.assert_allclose(t_rural, rslt.tvalues[:,1], rtol=1e-04)
np.testing.assert_allclose(est_pov, rslt.params[:,2], rtol=1e-04)
np.testing.assert_allclose(se_pov, rslt.bse[:,2], rtol=1e-04)
np.testing.assert_allclose(t_pov, rslt.tvalues[:,2], rtol=1e-04)
np.testing.assert_allclose(est_black, rslt.params[:,3], rtol=1e-02)
np.testing.assert_allclose(se_black, rslt.bse[:,3], rtol=1e-02)
np.testing.assert_allclose(t_black, rslt.tvalues[:,3], rtol=1e-02)
np.testing.assert_allclose(yhat, rslt.mu, rtol=1e-05)
np.testing.assert_allclose(res, rslt.resid_response, rtol=1e-04)
np.testing.assert_allclose(std_res, rslt.std_res, rtol=1e-04)
np.testing.assert_allclose(localR2, rslt.localR2, rtol=1e-05)
np.testing.assert_allclose(inf, rslt.influ, rtol=1e-04)
np.testing.assert_allclose(cooksD, rslt.cooksD, rtol=1e-00)
def test_FBGWR(self):
model = FBGWR(self.coords, self.y, self.X, [157.0, 65.0, 52.0],
XB=self.XB, err=self.err, constant=False)
rslt = model.fit()
np.testing.assert_allclose(rslt.predy, self.FB['predy'], atol=1e-07)
np.testing.assert_allclose(rslt.params, self.FB['params'], atol=1e-07)
np.testing.assert_allclose(rslt.resid_response, self.FB['u'], atol=1e-05)
np.testing.assert_almost_equal(rslt.resid_ss, 6339.3497144025841)
def test_Prediction(self):
coords =np.array(self.coords)
index = np.arange(len(self.y))
#train = index[0:-10]
test = index[-10:]
#y_train = self.y[train]
#X_train = self.X[train]
#coords_train = list(coords[train])
#y_test = self.y[test]
X_test = self.X[test]
coords_test = list(coords[test])
model = GWR(self.coords, self.y, self.X, 93, family=Gaussian(),
fixed=False, kernel='bisquare')
results = model.predict(coords_test, X_test)
params = np.array([22.77198, -0.10254, -0.215093, -0.01405,
19.10531, -0.094177, -0.232529, 0.071913,
19.743421, -0.080447, -0.30893, 0.083206,
17.505759, -0.078919, -0.187955, 0.051719,
27.747402, -0.165335, -0.208553, 0.004067,
26.210627, -0.138398, -0.360514, 0.072199,
18.034833, -0.077047, -0.260556, 0.084319,
28.452802, -0.163408, -0.14097, -0.063076,
22.353095, -0.103046, -0.226654, 0.002992,
18.220508, -0.074034, -0.309812, 0.108636]).reshape((10,4))
np.testing.assert_allclose(params, results.params, rtol=1e-03)
bse = np.array([2.080166, 0.021462, 0.102954, 0.049627,
2.536355, 0.022111, 0.123857, 0.051917,
1.967813, 0.019716, 0.102562, 0.054918,
2.463219, 0.021745, 0.110297, 0.044189,
1.556056, 0.019513, 0.12764, 0.040315,
1.664108, 0.020114, 0.131208, 0.041613,
2.5835, 0.021481, 0.113158, 0.047243,
1.709483, 0.019752, 0.116944, 0.043636,
1.958233, 0.020947, 0.09974, 0.049821,
2.276849, 0.020122, 0.107867, 0.047842]).reshape((10,4))
np.testing.assert_allclose(bse, results.bse, rtol=1e-03)
tvalues = np.array([10.947193, -4.777659, -2.089223, -0.283103,
7.532584, -4.259179, -1.877395, 1.385161,
10.033179, -4.080362, -3.012133, 1.515096,
7.106862, -3.629311, -1.704079, 1.17042,
17.831878, -8.473156, -1.633924, 0.100891,
15.750552, -6.880725, -2.74765, 1.734978,
6.980774, -3.586757, -2.302575, 1.784818,
16.644095, -8.273001, -1.205451, -1.445501,
11.414933, -4.919384, -2.272458, 0.060064,
8.00251, -3.679274, -2.872176, 2.270738]).reshape((10,4))
np.testing.assert_allclose(tvalues, results.tvalues, rtol=1e-03)
localR2 = np.array([[ 0.53068693],
[ 0.59582647],
[ 0.59700925],
[ 0.45769954],
[ 0.54634509],
[ 0.5494828 ],
[ 0.55159604],
[ 0.55634237],
[ 0.53903842],
[ 0.55884954]])
np.testing.assert_allclose(localR2, results.localR2, rtol=1e-05)
class TestGWRPoisson(unittest.TestCase):
def setUp(self):
data = pysal.open(pysal.examples.get_path('Tokyomortality.csv'), mode='Ur')
self.coords = zip(data.by_col('X_CENTROID'), data.by_col('Y_CENTROID'))
self.y = np.array(data.by_col('db2564')).reshape((-1,1))
self.off = np.array(data.by_col('eb2564')).reshape((-1,1))
OCC = np.array(data.by_col('OCC_TEC')).reshape((-1,1))
OWN = np.array(data.by_col('OWNH')).reshape((-1,1))
POP = np.array(data.by_col('POP65')).reshape((-1,1))
UNEMP = np.array(data.by_col('UNEMP')).reshape((-1,1))
self.X = np.hstack([OCC,OWN,POP,UNEMP])
self.BS_F = pysal.open(pysal.examples.get_path('tokyo_BS_F_listwise.csv'))
self.BS_NN = pysal.open(pysal.examples.get_path('tokyo_BS_NN_listwise.csv'))
self.GS_F = pysal.open(pysal.examples.get_path('tokyo_GS_F_listwise.csv'))
self.GS_NN = pysal.open(pysal.examples.get_path('tokyo_GS_NN_listwise.csv'))
self.BS_NN_OFF = pysal.open(pysal.examples.get_path('tokyo_BS_NN_OFF_listwise.csv'))
def test_BS_F(self):
est_Int = self.BS_F.by_col(' est_Intercept')
se_Int = self.BS_F.by_col(' se_Intercept')
t_Int = self.BS_F.by_col(' t_Intercept')
est_OCC = self.BS_F.by_col(' est_OCC_TEC')
se_OCC = self.BS_F.by_col(' se_OCC_TEC')
t_OCC = self.BS_F.by_col(' t_OCC_TEC')
est_OWN = self.BS_F.by_col(' est_OWNH')
se_OWN = self.BS_F.by_col(' se_OWNH')
t_OWN = self.BS_F.by_col(' t_OWNH')
est_POP = self.BS_F.by_col(' est_POP65')
se_POP = self.BS_F.by_col(' se_POP65')
t_POP = self.BS_F.by_col(' t_POP65')
est_UNEMP = self.BS_F.by_col(' est_UNEMP')
se_UNEMP = self.BS_F.by_col(' se_UNEMP')
t_UNEMP = self.BS_F.by_col(' t_UNEMP')
yhat = self.BS_F.by_col(' yhat')
pdev = np.array(self.BS_F.by_col(' localpdev')).reshape((-1,1))
model = GWR(self.coords, self.y, self.X, bw=26029.625, family=Poisson(),
kernel='bisquare', fixed=True)
rslt = model.fit()
AICc = get_AICc(rslt)
AIC = get_AIC(rslt)
BIC = get_BIC(rslt)
self.assertAlmostEquals(np.floor(AICc), 13294.0)
self.assertAlmostEquals(np.floor(AIC), 13247.0)
self.assertAlmostEquals(np.floor(BIC), 13485.0)
np.testing.assert_allclose(est_Int, rslt.params[:,0], rtol=1e-05)
np.testing.assert_allclose(se_Int, rslt.bse[:,0], rtol=1e-03)
np.testing.assert_allclose(t_Int, rslt.tvalues[:,0], rtol=1e-03)
np.testing.assert_allclose(est_OCC, rslt.params[:,1], rtol=1e-04)
np.testing.assert_allclose(se_OCC, rslt.bse[:,1], rtol=1e-02)
np.testing.assert_allclose(t_OCC, rslt.tvalues[:,1], rtol=1e-02)
np.testing.assert_allclose(est_OWN, rslt.params[:,2], rtol=1e-04)
np.testing.assert_allclose(se_OWN, rslt.bse[:,2], rtol=1e-03)
np.testing.assert_allclose(t_OWN, rslt.tvalues[:,2], rtol=1e-03)
np.testing.assert_allclose(est_POP, rslt.params[:,3], rtol=1e-04)
np.testing.assert_allclose(se_POP, rslt.bse[:,3], rtol=1e-02)
np.testing.assert_allclose(t_POP, rslt.tvalues[:,3], rtol=1e-02)
np.testing.assert_allclose(est_UNEMP, rslt.params[:,4], rtol=1e-04)
np.testing.assert_allclose(se_UNEMP, rslt.bse[:,4], rtol=1e-02)
np.testing.assert_allclose(t_UNEMP, rslt.tvalues[:,4], rtol=1e-02)
np.testing.assert_allclose(yhat, rslt.mu, rtol=1e-05)
np.testing.assert_allclose(pdev, rslt.pDev, rtol=1e-05)
def test_BS_NN(self):
est_Int = self.BS_NN.by_col(' est_Intercept')
se_Int = self.BS_NN.by_col(' se_Intercept')
t_Int = self.BS_NN.by_col(' t_Intercept')
est_OCC = self.BS_NN.by_col(' est_OCC_TEC')
se_OCC = self.BS_NN.by_col(' se_OCC_TEC')
t_OCC = self.BS_NN.by_col(' t_OCC_TEC')
est_OWN = self.BS_NN.by_col(' est_OWNH')
se_OWN = self.BS_NN.by_col(' se_OWNH')
t_OWN = self.BS_NN.by_col(' t_OWNH')
est_POP = self.BS_NN.by_col(' est_POP65')
se_POP = self.BS_NN.by_col(' se_POP65')
t_POP = self.BS_NN.by_col(' t_POP65')
est_UNEMP = self.BS_NN.by_col(' est_UNEMP')
se_UNEMP = self.BS_NN.by_col(' se_UNEMP')
t_UNEMP = self.BS_NN.by_col(' t_UNEMP')
yhat = self.BS_NN.by_col(' yhat')
pdev = np.array(self.BS_NN.by_col(' localpdev')).reshape((-1,1))
model = GWR(self.coords, self.y, self.X, bw=50, family=Poisson(),
kernel='bisquare', fixed=False)
rslt = model.fit()
AICc = get_AICc(rslt)
AIC = get_AIC(rslt)
BIC = get_BIC(rslt)
self.assertAlmostEquals(np.floor(AICc), 13285)
self.assertAlmostEquals(np.floor(AIC), 13259.0)
self.assertAlmostEquals(np.floor(BIC), 13442.0)
np.testing.assert_allclose(est_Int, rslt.params[:,0], rtol=1e-04)
np.testing.assert_allclose(se_Int, rslt.bse[:,0], rtol=1e-02)
np.testing.assert_allclose(t_Int, rslt.tvalues[:,0], rtol=1e-02)
np.testing.assert_allclose(est_OCC, rslt.params[:,1], rtol=1e-03)
np.testing.assert_allclose(se_OCC, rslt.bse[:,1], rtol=1e-02)
np.testing.assert_allclose(t_OCC, rslt.tvalues[:,1], rtol=1e-02)
np.testing.assert_allclose(est_OWN, rslt.params[:,2], rtol=1e-04)
np.testing.assert_allclose(se_OWN, rslt.bse[:,2], rtol=1e-02)
np.testing.assert_allclose(t_OWN, rslt.tvalues[:,2], rtol=1e-02)
np.testing.assert_allclose(est_POP, rslt.params[:,3], rtol=1e-03)
np.testing.assert_allclose(se_POP, rslt.bse[:,3], rtol=1e-02)
np.testing.assert_allclose(t_POP, rslt.tvalues[:,3], rtol=1e-02)
np.testing.assert_allclose(est_UNEMP, rslt.params[:,4], rtol=1e-04)
np.testing.assert_allclose(se_UNEMP, rslt.bse[:,4], rtol=1e-02)
np.testing.assert_allclose(t_UNEMP, rslt.tvalues[:,4], rtol=1e-02)
np.testing.assert_allclose(yhat, rslt.mu, rtol=1e-04)
np.testing.assert_allclose(pdev, rslt.pDev, rtol=1e-05)
def test_BS_NN_Offset(self):
est_Int = self.BS_NN_OFF.by_col(' est_Intercept')
se_Int = self.BS_NN_OFF.by_col(' se_Intercept')
t_Int = self.BS_NN_OFF.by_col(' t_Intercept')
est_OCC = self.BS_NN_OFF.by_col(' est_OCC_TEC')
se_OCC = self.BS_NN_OFF.by_col(' se_OCC_TEC')
t_OCC = self.BS_NN_OFF.by_col(' t_OCC_TEC')
est_OWN = self.BS_NN_OFF.by_col(' est_OWNH')
se_OWN = self.BS_NN_OFF.by_col(' se_OWNH')
t_OWN = self.BS_NN_OFF.by_col(' t_OWNH')
est_POP = self.BS_NN_OFF.by_col(' est_POP65')
se_POP = self.BS_NN_OFF.by_col(' se_POP65')
t_POP = self.BS_NN_OFF.by_col(' t_POP65')
est_UNEMP = self.BS_NN_OFF.by_col(' est_UNEMP')
se_UNEMP = self.BS_NN_OFF.by_col(' se_UNEMP')
t_UNEMP = self.BS_NN_OFF.by_col(' t_UNEMP')
yhat = self.BS_NN_OFF.by_col(' yhat')
pdev = np.array(self.BS_NN_OFF.by_col(' localpdev')).reshape((-1,1))
model = GWR(self.coords, self.y, self.X, bw=100, offset=self.off, family=Poisson(),
kernel='bisquare', fixed=False)
rslt = model.fit()
AICc = get_AICc(rslt)
AIC = get_AIC(rslt)
BIC = get_BIC(rslt)
self.assertAlmostEquals(np.floor(AICc), 367.0)
self.assertAlmostEquals(np.floor(AIC), 361.0)
self.assertAlmostEquals(np.floor(BIC), 451.0)
np.testing.assert_allclose(est_Int, rslt.params[:,0], rtol=1e-02,
atol=1e-02)
np.testing.assert_allclose(se_Int, rslt.bse[:,0], rtol=1e-02, atol=1e-02)
np.testing.assert_allclose(t_Int, rslt.tvalues[:,0], rtol=1e-01,
atol=1e-02)
np.testing.assert_allclose(est_OCC, rslt.params[:,1], rtol=1e-03,
atol=1e-02)
np.testing.assert_allclose(se_OCC, rslt.bse[:,1], rtol=1e-02, atol=1e-02)
np.testing.assert_allclose(t_OCC, rslt.tvalues[:,1], rtol=1e-01,
atol=1e-02)
np.testing.assert_allclose(est_OWN, rslt.params[:,2], rtol=1e-04,
atol=1e-02)
np.testing.assert_allclose(se_OWN, rslt.bse[:,2], rtol=1e-02, atol=1e-02)
np.testing.assert_allclose(t_OWN, rslt.tvalues[:,2], rtol=1e-01,
atol=1e-02)
np.testing.assert_allclose(est_POP, rslt.params[:,3], rtol=1e-03,
atol=1e-02)
np.testing.assert_allclose(se_POP, rslt.bse[:,3], rtol=1e-02, atol=1e-02)
np.testing.assert_allclose(t_POP, rslt.tvalues[:,3], rtol=1e-01,
atol=1e-02)
np.testing.assert_allclose(est_UNEMP, rslt.params[:,4], rtol=1e-04,
atol=1e-02)
np.testing.assert_allclose(se_UNEMP, rslt.bse[:,4], rtol=1e-02,
atol=1e-02)
np.testing.assert_allclose(t_UNEMP, rslt.tvalues[:,4], rtol=1e-01,
atol=1e-02)
np.testing.assert_allclose(yhat, rslt.mu, rtol=1e-03, atol=1e-02)
np.testing.assert_allclose(pdev, rslt.pDev, rtol=1e-04, atol=1e-02)
def test_GS_F(self):
est_Int = self.GS_F.by_col(' est_Intercept')
se_Int = self.GS_F.by_col(' se_Intercept')
t_Int = self.GS_F.by_col(' t_Intercept')
est_OCC = self.GS_F.by_col(' est_OCC_TEC')
se_OCC = self.GS_F.by_col(' se_OCC_TEC')
t_OCC = self.GS_F.by_col(' t_OCC_TEC')
est_OWN = self.GS_F.by_col(' est_OWNH')
se_OWN = self.GS_F.by_col(' se_OWNH')
t_OWN = self.GS_F.by_col(' t_OWNH')
est_POP = self.GS_F.by_col(' est_POP65')
se_POP = self.GS_F.by_col(' se_POP65')
t_POP = self.GS_F.by_col(' t_POP65')
est_UNEMP = self.GS_F.by_col(' est_UNEMP')
se_UNEMP = self.GS_F.by_col(' se_UNEMP')
t_UNEMP = self.GS_F.by_col(' t_UNEMP')
yhat = self.GS_F.by_col(' yhat')
pdev = np.array(self.GS_F.by_col(' localpdev')).reshape((-1,1))
model = GWR(self.coords, self.y, self.X, bw=8764.474, family=Poisson(),
kernel='gaussian', fixed=True)
rslt = model.fit()
AICc = get_AICc(rslt)
AIC = get_AIC(rslt)
BIC = get_BIC(rslt)
self.assertAlmostEquals(np.floor(AICc), 11283.0)
self.assertAlmostEquals(np.floor(AIC), 11211.0)
self.assertAlmostEquals(np.floor(BIC), 11497.0)
np.testing.assert_allclose(est_Int, rslt.params[:,0], rtol=1e-03)
np.testing.assert_allclose(se_Int, rslt.bse[:,0], rtol=1e-02)
np.testing.assert_allclose(t_Int, rslt.tvalues[:,0], rtol=1e-02)
np.testing.assert_allclose(est_OCC, rslt.params[:,1], rtol=1e-03)
np.testing.assert_allclose(se_OCC, rslt.bse[:,1], rtol=1e-02)
np.testing.assert_allclose(t_OCC, rslt.tvalues[:,1], rtol=1e-02)
np.testing.assert_allclose(est_OWN, rslt.params[:,2], rtol=1e-03)
np.testing.assert_allclose(se_OWN, rslt.bse[:,2], rtol=1e-02)
np.testing.assert_allclose(t_OWN, rslt.tvalues[:,2], rtol=1e-02)
np.testing.assert_allclose(est_POP, rslt.params[:,3], rtol=1e-02)
np.testing.assert_allclose(se_POP, rslt.bse[:,3], rtol=1e-02)
np.testing.assert_allclose(t_POP, rslt.tvalues[:,3], rtol=1e-02)
np.testing.assert_allclose(est_UNEMP, rslt.params[:,4], rtol=1e-02)
np.testing.assert_allclose(se_UNEMP, rslt.bse[:,4], rtol=1e-02)
np.testing.assert_allclose(t_UNEMP, rslt.tvalues[:,4], rtol=1e-02)
np.testing.assert_allclose(yhat, rslt.mu, rtol=1e-04)
np.testing.assert_allclose(pdev, rslt.pDev, rtol=1e-05)
def test_GS_NN(self):
est_Int = self.GS_NN.by_col(' est_Intercept')
se_Int = self.GS_NN.by_col(' se_Intercept')
t_Int = self.GS_NN.by_col(' t_Intercept')
est_OCC = self.GS_NN.by_col(' est_OCC_TEC')
se_OCC = self.GS_NN.by_col(' se_OCC_TEC')
t_OCC = self.GS_NN.by_col(' t_OCC_TEC')
est_OWN = self.GS_NN.by_col(' est_OWNH')
se_OWN = self.GS_NN.by_col(' se_OWNH')
t_OWN = self.GS_NN.by_col(' t_OWNH')
est_POP = self.GS_NN.by_col(' est_POP65')
se_POP = self.GS_NN.by_col(' se_POP65')
t_POP = self.GS_NN.by_col(' t_POP65')
est_UNEMP = self.GS_NN.by_col(' est_UNEMP')
se_UNEMP = self.GS_NN.by_col(' se_UNEMP')
t_UNEMP = self.GS_NN.by_col(' t_UNEMP')
yhat = self.GS_NN.by_col(' yhat')
pdev = np.array(self.GS_NN.by_col(' localpdev')).reshape((-1,1))
model = GWR(self.coords, self.y, self.X, bw=50, family=Poisson(),
kernel='gaussian', fixed=False)
rslt = model.fit()
AICc = get_AICc(rslt)
AIC = get_AIC(rslt)
BIC = get_BIC(rslt)
self.assertAlmostEquals(np.floor(AICc), 21070.0)
self.assertAlmostEquals(np.floor(AIC), 21069.0)
self.assertAlmostEquals(np.floor(BIC), 21111.0)
np.testing.assert_allclose(est_Int, rslt.params[:,0], rtol=1e-04)
np.testing.assert_allclose(se_Int, rslt.bse[:,0], rtol=1e-02)
np.testing.assert_allclose(t_Int, rslt.tvalues[:,0], rtol=1e-02)
np.testing.assert_allclose(est_OCC, rslt.params[:,1], rtol=1e-03)
np.testing.assert_allclose(se_OCC, rslt.bse[:,1], rtol=1e-02)
np.testing.assert_allclose(t_OCC, rslt.tvalues[:,1], rtol=1e-02)
np.testing.assert_allclose(est_OWN, rslt.params[:,2], rtol=1e-04)
np.testing.assert_allclose(se_OWN, rslt.bse[:,2], rtol=1e-02)
np.testing.assert_allclose(t_OWN, rslt.tvalues[:,2], rtol=1e-02)
np.testing.assert_allclose(est_POP, rslt.params[:,3], rtol=1e-02)
np.testing.assert_allclose(se_POP, rslt.bse[:,3], rtol=1e-02)
np.testing.assert_allclose(t_POP, rslt.tvalues[:,3], rtol=1e-02)
np.testing.assert_allclose(est_UNEMP, rslt.params[:,4], rtol=1e-02)
np.testing.assert_allclose(se_UNEMP, rslt.bse[:,4], rtol=1e-02)
np.testing.assert_allclose(t_UNEMP, rslt.tvalues[:,4], rtol=1e-02)
np.testing.assert_allclose(yhat, rslt.mu, rtol=1e-04)
np.testing.assert_allclose(pdev, rslt.pDev, rtol=1e-05)
class TestGWRBinomial(unittest.TestCase):
def setUp(self):
data = pysal.open(pysal.examples.get_path('landslides.csv'))
self.coords = zip(data.by_col('X'), data.by_col('Y'))
self.y = np.array(data.by_col('Landslid')).reshape((-1,1))
ELEV = np.array(data.by_col('Elev')).reshape((-1,1))
SLOPE = np.array(data.by_col('Slope')).reshape((-1,1))
SIN = np.array(data.by_col('SinAspct')).reshape((-1,1))
COS = np.array(data.by_col('CosAspct')).reshape((-1,1))
SOUTH = np.array(data.by_col('AbsSouth')).reshape((-1,1))
DIST = np.array(data.by_col('DistStrm')).reshape((-1,1))
self.X = np.hstack([ELEV, SLOPE, SIN, COS, SOUTH, DIST])
self.BS_F = pysal.open(pysal.examples.get_path('clearwater_BS_F_listwise.csv'))
self.BS_NN = pysal.open(pysal.examples.get_path('clearwater_BS_NN_listwise.csv'))
self.GS_F = pysal.open(pysal.examples.get_path('clearwater_GS_F_listwise.csv'))
self.GS_NN = pysal.open(pysal.examples.get_path('clearwater_GS_NN_listwise.csv'))
def test_BS_F(self):
est_Int = self.BS_F.by_col(' est_Intercept')
se_Int = self.BS_F.by_col(' se_Intercept')
t_Int = self.BS_F.by_col(' t_Intercept')
est_elev = self.BS_F.by_col(' est_Elev')
se_elev = self.BS_F.by_col(' se_Elev')
t_elev = self.BS_F.by_col(' t_Elev')
est_slope = self.BS_F.by_col(' est_Slope')
se_slope = self.BS_F.by_col(' se_Slope')
t_slope = self.BS_F.by_col(' t_Slope')
est_sin = self.BS_F.by_col(' est_SinAspct')
se_sin = self.BS_F.by_col(' se_SinAspct')
t_sin = self.BS_F.by_col(' t_SinAspct')
est_cos = self.BS_F.by_col(' est_CosAspct')
se_cos = self.BS_F.by_col(' se_CosAspct')
t_cos = self.BS_F.by_col(' t_CosAspct')
est_south = self.BS_F.by_col(' est_AbsSouth')
se_south = self.BS_F.by_col(' se_AbsSouth')
t_south = self.BS_F.by_col(' t_AbsSouth')
est_strm = self.BS_F.by_col(' est_DistStrm')
se_strm = self.BS_F.by_col(' se_DistStrm')
t_strm = self.BS_F.by_col(' t_DistStrm')
yhat = self.BS_F.by_col(' yhat')
pdev = np.array(self.BS_F.by_col(' localpdev')).reshape((-1,1))
model = GWR(self.coords, self.y, self.X, bw=19642.170, family=Binomial(),
kernel='bisquare', fixed=True)
rslt = model.fit()
AICc = get_AICc(rslt)
AIC = get_AIC(rslt)
BIC = get_BIC(rslt)
self.assertAlmostEquals(np.floor(AICc), 275.0)
self.assertAlmostEquals(np.floor(AIC), 271.0)
self.assertAlmostEquals(np.floor(BIC), 349.0)
np.testing.assert_allclose(est_Int, rslt.params[:,0], rtol=1e-00)
np.testing.assert_allclose(se_Int, rslt.bse[:,0], rtol=1e-00)
np.testing.assert_allclose(t_Int, rslt.tvalues[:,0], rtol=1e-00)
np.testing.assert_allclose(est_elev, rslt.params[:,1], rtol=1e-00)
np.testing.assert_allclose(se_elev, rslt.bse[:,1], rtol=1e-00)
np.testing.assert_allclose(t_elev, rslt.tvalues[:,1], rtol=1e-00)
np.testing.assert_allclose(est_slope, rslt.params[:,2], rtol=1e-00)
np.testing.assert_allclose(se_slope, rslt.bse[:,2], rtol=1e-00)
np.testing.assert_allclose(t_slope, rslt.tvalues[:,2], rtol=1e-00)
np.testing.assert_allclose(est_sin, rslt.params[:,3], rtol=1e01)
np.testing.assert_allclose(se_sin, rslt.bse[:,3], rtol=1e01)
np.testing.assert_allclose(t_sin, rslt.tvalues[:,3], rtol=1e01)
np.testing.assert_allclose(est_cos, rslt.params[:,4], rtol=1e01)
np.testing.assert_allclose(se_cos, rslt.bse[:,4], rtol=1e01)
np.testing.assert_allclose(t_cos, rslt.tvalues[:,4], rtol=1e01)
np.testing.assert_allclose(est_south, rslt.params[:,5], rtol=1e01)
np.testing.assert_allclose(se_south, rslt.bse[:,5], rtol=1e01)
np.testing.assert_allclose(t_south, rslt.tvalues[:,5], rtol=1e01)
np.testing.assert_allclose(est_strm, rslt.params[:,6], rtol=1e02)
np.testing.assert_allclose(se_strm, rslt.bse[:,6], rtol=1e01)
np.testing.assert_allclose(t_strm, rslt.tvalues[:,6], rtol=1e02)
np.testing.assert_allclose(yhat, rslt.mu, rtol=1e-01)
#This test fails - likely due to compound rounding errors
#Has been tested using statsmodels.family calculations and
#code from Jing's python version, which both yield the same
#np.testing.assert_allclose(pdev, rslt.pDev, rtol=1e-05)
def test_BS_NN(self):
est_Int = self.BS_NN.by_col(' est_Intercept')
se_Int = self.BS_NN.by_col(' se_Intercept')
t_Int = self.BS_NN.by_col(' t_Intercept')
est_elev = self.BS_NN.by_col(' est_Elev')
se_elev = self.BS_NN.by_col(' se_Elev')
t_elev = self.BS_NN.by_col(' t_Elev')
est_slope = self.BS_NN.by_col(' est_Slope')
se_slope = self.BS_NN.by_col(' se_Slope')
t_slope = self.BS_NN.by_col(' t_Slope')
est_sin = self.BS_NN.by_col(' est_SinAspct')
se_sin = self.BS_NN.by_col(' se_SinAspct')
t_sin = self.BS_NN.by_col(' t_SinAspct')
est_cos = self.BS_NN.by_col(' est_CosAspct')
se_cos = self.BS_NN.by_col(' se_CosAspct')
t_cos = self.BS_NN.by_col(' t_CosAspct')
est_south = self.BS_NN.by_col(' est_AbsSouth')
se_south = self.BS_NN.by_col(' se_AbsSouth')
t_south = self.BS_NN.by_col(' t_AbsSouth')
est_strm = self.BS_NN.by_col(' est_DistStrm')
se_strm = self.BS_NN.by_col(' se_DistStrm')
t_strm = self.BS_NN.by_col(' t_DistStrm')
yhat = self.BS_NN.by_col(' yhat')
pdev = self.BS_NN.by_col(' localpdev')
model = GWR(self.coords, self.y, self.X, bw=158, family=Binomial(),
kernel='bisquare', fixed=False)
rslt = model.fit()
AICc = get_AICc(rslt)
AIC = get_AIC(rslt)
BIC = get_BIC(rslt)
self.assertAlmostEquals(np.floor(AICc), 277.0)
self.assertAlmostEquals(np.floor(AIC), 271.0)
self.assertAlmostEquals(np.floor(BIC), 358.0)
np.testing.assert_allclose(est_Int, rslt.params[:,0], rtol=1e-00)
np.testing.assert_allclose(se_Int, rslt.bse[:,0], rtol=1e-00)
np.testing.assert_allclose(t_Int, rslt.tvalues[:,0], rtol=1e-00)
np.testing.assert_allclose(est_elev, rslt.params[:,1], rtol=1e-00)
np.testing.assert_allclose(se_elev, rslt.bse[:,1], rtol=1e-00)
np.testing.assert_allclose(t_elev, rslt.tvalues[:,1], rtol=1e-00)
np.testing.assert_allclose(est_slope, rslt.params[:,2], rtol=1e-00)
np.testing.assert_allclose(se_slope, rslt.bse[:,2], rtol=1e-00)
np.testing.assert_allclose(t_slope, rslt.tvalues[:,2], rtol=1e-00)
np.testing.assert_allclose(est_sin, rslt.params[:,3], rtol=1e01)
np.testing.assert_allclose(se_sin, rslt.bse[:,3], rtol=1e01)
np.testing.assert_allclose(t_sin, rslt.tvalues[:,3], rtol=1e01)
np.testing.assert_allclose(est_cos, rslt.params[:,4], rtol=1e01)
np.testing.assert_allclose(se_cos, rslt.bse[:,4], rtol=1e01)
np.testing.assert_allclose(t_cos, rslt.tvalues[:,4], rtol=1e01)
np.testing.assert_allclose(est_south, rslt.params[:,5], rtol=1e01)
np.testing.assert_allclose(se_south, rslt.bse[:,5], rtol=1e01)
np.testing.assert_allclose(t_south, rslt.tvalues[:,5], rtol=1e01)
np.testing.assert_allclose(est_strm, rslt.params[:,6], rtol=1e03)
np.testing.assert_allclose(se_strm, rslt.bse[:,6], rtol=1e01)
np.testing.assert_allclose(t_strm, rslt.tvalues[:,6], rtol=1e03)
np.testing.assert_allclose(yhat, rslt.mu, rtol=1e-01)
#This test fails - likely due to compound rounding errors
#Has been tested using statsmodels.family calculations and
#code from Jing's python version, which both yield the same
#np.testing.assert_allclose(pdev, rslt.pDev, rtol=1e-05)
def test_GS_F(self):
est_Int = self.GS_F.by_col(' est_Intercept')
se_Int = self.GS_F.by_col(' se_Intercept')
t_Int = self.GS_F.by_col(' t_Intercept')
est_elev = self.GS_F.by_col(' est_Elev')
se_elev = self.GS_F.by_col(' se_Elev')
t_elev = self.GS_F.by_col(' t_Elev')
est_slope = self.GS_F.by_col(' est_Slope')
se_slope = self.GS_F.by_col(' se_Slope')
t_slope = self.GS_F.by_col(' t_Slope')
est_sin = self.GS_F.by_col(' est_SinAspct')
se_sin = self.GS_F.by_col(' se_SinAspct')
t_sin = self.GS_F.by_col(' t_SinAspct')
est_cos = self.GS_F.by_col(' est_CosAspct')
se_cos = self.GS_F.by_col(' se_CosAspct')
t_cos = self.GS_F.by_col(' t_CosAspct')
est_south = self.GS_F.by_col(' est_AbsSouth')
se_south = self.GS_F.by_col(' se_AbsSouth')
t_south = self.GS_F.by_col(' t_AbsSouth')
est_strm = self.GS_F.by_col(' est_DistStrm')
se_strm = self.GS_F.by_col(' se_DistStrm')
t_strm = self.GS_F.by_col(' t_DistStrm')
yhat = self.GS_F.by_col(' yhat')
pdev = self.GS_F.by_col(' localpdev')
model = GWR(self.coords, self.y, self.X, bw=8929.061, family=Binomial(),
kernel='gaussian', fixed=True)
rslt = model.fit()
AICc = get_AICc(rslt)
AIC = get_AIC(rslt)
BIC = get_BIC(rslt)
self.assertAlmostEquals(np.floor(AICc), 276.0)
self.assertAlmostEquals(np.floor(AIC), 272.0)
self.assertAlmostEquals(np.floor(BIC), 341.0)
np.testing.assert_allclose(est_Int, rslt.params[:,0], rtol=1e-00)
np.testing.assert_allclose(se_Int, rslt.bse[:,0], rtol=1e-00)
np.testing.assert_allclose(t_Int, rslt.tvalues[:,0], rtol=1e-00)
np.testing.assert_allclose(est_elev, rslt.params[:,1], rtol=1e-00)
np.testing.assert_allclose(se_elev, rslt.bse[:,1], rtol=1e-00)
np.testing.assert_allclose(t_elev, rslt.tvalues[:,1], rtol=1e-00)
np.testing.assert_allclose(est_slope, rslt.params[:,2], rtol=1e-00)
np.testing.assert_allclose(se_slope, rslt.bse[:,2], rtol=1e-00)
np.testing.assert_allclose(t_slope, rslt.tvalues[:,2], rtol=1e-00)
np.testing.assert_allclose(est_sin, rslt.params[:,3], rtol=1e01)
np.testing.assert_allclose(se_sin, rslt.bse[:,3], rtol=1e01)
np.testing.assert_allclose(t_sin, rslt.tvalues[:,3], rtol=1e01)
np.testing.assert_allclose(est_cos, rslt.params[:,4], rtol=1e01)
np.testing.assert_allclose(se_cos, rslt.bse[:,4], rtol=1e01)
np.testing.assert_allclose(t_cos, rslt.tvalues[:,4], rtol=1e01)
np.testing.assert_allclose(est_south, rslt.params[:,5], rtol=1e01)
np.testing.assert_allclose(se_south, rslt.bse[:,5], rtol=1e01)
np.testing.assert_allclose(t_south, rslt.tvalues[:,5], rtol=1e01)
np.testing.assert_allclose(est_strm, rslt.params[:,6], rtol=1e02)
np.testing.assert_allclose(se_strm, rslt.bse[:,6], rtol=1e01)
np.testing.assert_allclose(t_strm, rslt.tvalues[:,6], rtol=1e02)
np.testing.assert_allclose(yhat, rslt.mu, rtol=1e-01)
#This test fails - likely due to compound rounding errors
#Has been tested using statsmodels.family calculations and
#code from Jing's python version, which both yield the same
#np.testing.assert_allclose(pdev, rslt.pDev, rtol=1e-05)
def test_GS_NN(self):
est_Int = self.GS_NN.by_col(' est_Intercept')
se_Int = self.GS_NN.by_col(' se_Intercept')
t_Int = self.GS_NN.by_col(' t_Intercept')
est_elev = self.GS_NN.by_col(' est_Elev')
se_elev = self.GS_NN.by_col(' se_Elev')
t_elev = self.GS_NN.by_col(' t_Elev')
est_slope = self.GS_NN.by_col(' est_Slope')
se_slope = self.GS_NN.by_col(' se_Slope')
t_slope = self.GS_NN.by_col(' t_Slope')
est_sin = self.GS_NN.by_col(' est_SinAspct')
se_sin = self.GS_NN.by_col(' se_SinAspct')
t_sin = self.GS_NN.by_col(' t_SinAspct')
est_cos = self.GS_NN.by_col(' est_CosAspct')
se_cos = self.GS_NN.by_col(' se_CosAspct')
t_cos = self.GS_NN.by_col(' t_CosAspct')
est_south = self.GS_NN.by_col(' est_AbsSouth')
se_south = self.GS_NN.by_col(' se_AbsSouth')
t_south = self.GS_NN.by_col(' t_AbsSouth')
est_strm = self.GS_NN.by_col(' est_DistStrm')
se_strm = self.GS_NN.by_col(' se_DistStrm')
t_strm = self.GS_NN.by_col(' t_DistStrm')
yhat = self.GS_NN.by_col(' yhat')
pdev = self.GS_NN.by_col(' localpdev')
model = GWR(self.coords, self.y, self.X, bw=64, family=Binomial(),
kernel='gaussian', fixed=False)
rslt = model.fit()
AICc = get_AICc(rslt)
AIC = get_AIC(rslt)
BIC = get_BIC(rslt)
self.assertAlmostEquals(np.floor(AICc), 276.0)
self.assertAlmostEquals(np.floor(AIC), 273.0)
self.assertAlmostEquals(np.floor(BIC), 331.0)
np.testing.assert_allclose(est_Int, rslt.params[:,0], rtol=1e-00)
np.testing.assert_allclose(se_Int, rslt.bse[:,0], rtol=1e-00)
np.testing.assert_allclose(t_Int, rslt.tvalues[:,0], rtol=1e-00)
np.testing.assert_allclose(est_elev, rslt.params[:,1], rtol=1e-00)
np.testing.assert_allclose(se_elev, rslt.bse[:,1], rtol=1e-00)
np.testing.assert_allclose(t_elev, rslt.tvalues[:,1], rtol=1e-00)
np.testing.assert_allclose(est_slope, rslt.params[:,2], rtol=1e-00)
np.testing.assert_allclose(se_slope, rslt.bse[:,2], rtol=1e-00)
np.testing.assert_allclose(t_slope, rslt.tvalues[:,2], rtol=1e-00)
np.testing.assert_allclose(est_sin, rslt.params[:,3], rtol=1e01)
np.testing.assert_allclose(se_sin, rslt.bse[:,3], rtol=1e01)
np.testing.assert_allclose(t_sin, rslt.tvalues[:,3], rtol=1e01)
np.testing.assert_allclose(est_cos, rslt.params[:,4], rtol=1e01)
np.testing.assert_allclose(se_cos, rslt.bse[:,4], rtol=1e01)
np.testing.assert_allclose(t_cos, rslt.tvalues[:,4], rtol=1e01)
np.testing.assert_allclose(est_south, rslt.params[:,5], rtol=1e01)
np.testing.assert_allclose(se_south, rslt.bse[:,5], rtol=1e01)
np.testing.assert_allclose(t_south, rslt.tvalues[:,5], rtol=1e01)
np.testing.assert_allclose(est_strm, rslt.params[:,6], rtol=1e02)
np.testing.assert_allclose(se_strm, rslt.bse[:,6], rtol=1e01)
np.testing.assert_allclose(t_strm, rslt.tvalues[:,6], rtol=1e02)
np.testing.assert_allclose(yhat, rslt.mu, rtol=1e-00)
#This test fails - likely due to compound rounding errors
#Has been tested using statsmodels.family calculations and
#code from Jing's python version, which both yield the same
#np.testing.assert_allclose(pdev, rslt.pDev, rtol=1e-05)
if __name__ == '__main__':
unittest.main()
| |
#!/usr/bin/env python
"""
runtests.py [OPTIONS] [-- ARGS]
Run tests, building the project first.
Examples::
$ python runtests.py
$ python runtests.py -t {SAMPLE_TEST}
$ python runtests.py --ipython
$ python runtests.py --python somescript.py
Run a debugger:
$ gdb --args python runtests.py [...other args...]
Generate C code coverage listing under build/lcov/:
(requires http://ltp.sourceforge.net/coverage/lcov.php)
$ python runtests.py --gcov [...other args...]
$ python runtests.py --lcov-html
"""
#
# This is a generic test runner script for projects using Numpy's test
# framework. Change the following values to adapt to your project:
#
PROJECT_MODULE = "skmonaco"
PROJECT_ROOT_FILES = ['skmonaco', 'LICENSE.txt', 'setup.py']
SAMPLE_TEST = "skmonaco/tests/test_uniform.py:TestMCQuad.test_const_1d"
SAMPLE_SUBMODULE = ""
EXTRA_PATH = ['/usr/lib/ccache', '/usr/lib/f90cache',
'/usr/local/lib/ccache', '/usr/local/lib/f90cache']
# ---------------------------------------------------------------------
if __doc__ is None:
__doc__ = "Run without -OO if you want usage info"
else:
__doc__ = __doc__.format(**globals())
import sys
import os
# In case we are run from the source directory, we don't want to import the
# project from there:
sys.path.pop(0)
import shutil
import subprocess
import time
import imp
from argparse import ArgumentParser, REMAINDER
ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__)))
def main(argv):
parser = ArgumentParser(usage=__doc__.lstrip())
parser.add_argument("--verbose", "-v", action="count", default=1,
help="more verbosity")
parser.add_argument("--no-build", "-n", action="store_true", default=False,
help="do not build the project (use system installed version)")
parser.add_argument("--build-only", "-b", action="store_true", default=False,
help="just build, do not run any tests")
parser.add_argument("--doctests", action="store_true", default=False,
help="Run doctests in module")
parser.add_argument("--coverage", action="store_true", default=False,
help=("report coverage of project code. HTML output goes "
"under build/coverage"))
parser.add_argument("--gcov", action="store_true", default=False,
help=("enable C code coverage via gcov (requires GCC). "
"gcov output goes to build/**/*.gc*"))
parser.add_argument("--lcov-html", action="store_true", default=False,
help=("produce HTML for C code coverage information "
"from a previous run with --gcov. "
"HTML output goes to build/lcov/"))
parser.add_argument("--mode", "-m", default="fast",
help="'fast', 'full', or something that could be "
"passed to nosetests -A [default: fast]")
#parser.add_argument("--submodule", "-s", default=None,
#help="Submodule whose tests to run (cluster, constants, ...)")
parser.add_argument("--pythonpath", "-p", default=None,
help="Paths to prepend to PYTHONPATH")
parser.add_argument("--tests", "-t", action='append',
help="Specify tests to run")
parser.add_argument("--python", action="store_true",
help="Start a Python shell with PYTHONPATH set")
parser.add_argument("--ipython", "-i", action="store_true",
help="Start IPython shell with PYTHONPATH set")
parser.add_argument("--shell", action="store_true",
help="Start Unix shell with PYTHONPATH set")
parser.add_argument("--debug", "-g", action="store_true",
help="Debug build")
parser.add_argument("--show-build-log", action="store_true",
help="Show build output rather than using a log file")
parser.add_argument("args", metavar="ARGS", default=[], nargs=REMAINDER,
help="Arguments to pass to Nose, Python or shell")
args = parser.parse_args(argv)
if args.lcov_html:
# generate C code coverage output
lcov_generate()
sys.exit(0)
if args.pythonpath:
for p in reversed(args.pythonpath.split(os.pathsep)):
sys.path.insert(0, p)
if args.gcov:
gcov_reset_counters()
if not args.no_build:
site_dir = build_project(args)
sys.path.insert(0, site_dir)
os.environ['PYTHONPATH'] = site_dir
extra_argv = args.args[:]
if extra_argv and extra_argv[0] == '--':
extra_argv = extra_argv[1:]
if args.python:
if extra_argv:
# Don't use subprocess, since we don't want to include the
# current path in PYTHONPATH.
sys.argv = extra_argv
with open(extra_argv[0], 'r') as f:
script = f.read()
sys.modules['__main__'] = imp.new_module('__main__')
ns = dict(__name__='__main__',
__file__=extra_argv[0])
exec_(script, ns)
sys.exit(0)
else:
import code
code.interact()
sys.exit(0)
if args.ipython:
import IPython
IPython.embed(user_ns={})
sys.exit(0)
if args.shell:
shell = os.environ.get('SHELL', 'sh')
print("Spawning a Unix shell...")
os.execv(shell, [shell] + extra_argv)
sys.exit(1)
if args.coverage:
dst_dir = os.path.join(ROOT_DIR, 'build', 'coverage')
fn = os.path.join(dst_dir, 'coverage_html.js')
if os.path.isdir(dst_dir) and os.path.isfile(fn):
shutil.rmtree(dst_dir)
extra_argv += ['--cover-html',
'--cover-html-dir='+dst_dir]
test_dir = os.path.join(ROOT_DIR, 'build', 'test')
if args.build_only:
sys.exit(0)
#elif args.submodule:
#modname = PROJECT_MODULE + '.' + args.submodule
#try:
#__import__(modname)
#test = sys.modules[modname].test
#except (ImportError, KeyError, AttributeError):
#print("Cannot run tests for %s" % modname)
#sys.exit(2)
elif args.tests:
def fix_test_path(x):
# fix up test path
p = x.split(':')
p[0] = os.path.relpath(os.path.abspath(p[0]),
test_dir)
return ':'.join(p)
tests = [fix_test_path(x) for x in args.tests]
def test(*a, **kw):
extra_argv = kw.pop('extra_argv', ())
extra_argv = extra_argv + tests[1:]
kw['extra_argv'] = extra_argv
from numpy.testing import Tester
return Tester(tests[0]).test(*a, **kw)
else:
__import__(PROJECT_MODULE)
test = sys.modules[PROJECT_MODULE].test
# Run the tests under build/test
try:
shutil.rmtree(test_dir)
except OSError:
pass
try:
os.makedirs(test_dir)
except OSError:
pass
cwd = os.getcwd()
try:
os.chdir(test_dir)
result = test(args.mode,
verbose=args.verbose,
extra_argv=extra_argv,
doctests=args.doctests,
coverage=args.coverage)
finally:
os.chdir(cwd)
if result.wasSuccessful():
sys.exit(0)
else:
sys.exit(1)
def build_project(args):
"""
Build a dev version of the project.
Returns
-------
site_dir
site-packages directory where it was installed
"""
root_ok = [os.path.exists(os.path.join(ROOT_DIR, fn))
for fn in PROJECT_ROOT_FILES]
if not all(root_ok):
print("To build the project, run runtests.py in "
"git checkout or unpacked source")
sys.exit(1)
dst_dir = os.path.join(ROOT_DIR, 'build', 'testenv')
env = dict(os.environ)
cmd = [sys.executable, 'setup.py']
# Always use ccache, if installed
env['PATH'] = os.pathsep.join(EXTRA_PATH + env.get('PATH', '').split(os.pathsep))
if args.debug or args.gcov:
# assume everyone uses gcc/gfortran
env['OPT'] = '-O0 -ggdb'
env['FOPT'] = '-O0 -ggdb'
if args.gcov:
import distutils.sysconfig
cvars = distutils.sysconfig.get_config_vars()
env['OPT'] = '-O0 -ggdb'
env['FOPT'] = '-O0 -ggdb'
env['CC'] = cvars['CC'] + ' --coverage'
env['CXX'] = cvars['CXX'] + ' --coverage'
env['F77'] = 'gfortran --coverage '
env['F90'] = 'gfortran --coverage '
env['LDSHARED'] = cvars['LDSHARED'] + ' --coverage'
env['LDFLAGS'] = " ".join(cvars['LDSHARED'].split()[1:]) + ' --coverage'
cmd += ["build"]
cmd += ['install', '--prefix=' + dst_dir]
log_filename = os.path.join(ROOT_DIR, 'build.log')
if args.show_build_log:
ret = subprocess.call(cmd, env=env, cwd=ROOT_DIR)
else:
log_filename = os.path.join(ROOT_DIR, 'build.log')
print("Building, see build.log...")
with open(log_filename, 'w') as log:
p = subprocess.Popen(cmd, env=env, stdout=log, stderr=log,
cwd=ROOT_DIR)
# Wait for it to finish, and print something to indicate the
# process is alive, but only if the log file has grown (to
# allow continuous integration environments kill a hanging
# process accurately if it produces no output)
last_blip = time.time()
last_log_size = os.stat(log_filename).st_size
while p.poll() is None:
time.sleep(0.5)
if time.time() - last_blip > 60:
log_size = os.stat(log_filename).st_size
if log_size > last_log_size:
print(" ... build in progress")
last_blip = time.time()
last_log_size = log_size
ret = p.wait()
if ret == 0:
print("Build OK")
else:
if not args.show_build_log:
with open(log_filename, 'r') as f:
print(f.read())
print("Build failed!")
sys.exit(1)
from distutils.sysconfig import get_python_lib
site_dir = get_python_lib(prefix=dst_dir, plat_specific=True)
return site_dir
#
# GCOV support
#
def gcov_reset_counters():
print("Removing previous GCOV .gcda files...")
build_dir = os.path.join(ROOT_DIR, 'build')
for dirpath, dirnames, filenames in os.walk(build_dir):
for fn in filenames:
if fn.endswith('.gcda') or fn.endswith('.da'):
pth = os.path.join(dirpath, fn)
os.unlink(pth)
#
# LCOV support
#
LCOV_OUTPUT_FILE = os.path.join(ROOT_DIR, 'build', 'lcov.out')
LCOV_HTML_DIR = os.path.join(ROOT_DIR, 'build', 'lcov')
def lcov_generate():
try: os.unlink(LCOV_OUTPUT_FILE)
except OSError: pass
try: shutil.rmtree(LCOV_HTML_DIR)
except OSError: pass
print("Capturing lcov info...")
subprocess.call(['lcov', '-q', '-c',
'-d', os.path.join(ROOT_DIR, 'build'),
'-b', ROOT_DIR,
'--output-file', LCOV_OUTPUT_FILE])
print("Generating lcov HTML output...")
ret = subprocess.call(['genhtml', '-q', LCOV_OUTPUT_FILE,
'--output-directory', LCOV_HTML_DIR,
'--legend', '--highlight'])
if ret != 0:
print("genhtml failed!")
else:
print("HTML output generated under build/lcov/")
#
# Python 3 support
#
if sys.version_info[0] >= 3:
import builtins
exec_ = getattr(builtins, "exec")
else:
def exec_(code, globs=None, locs=None):
"""Execute code in a namespace."""
if globs is None:
frame = sys._getframe(1)
globs = frame.f_globals
if locs is None:
locs = frame.f_locals
del frame
elif locs is None:
locs = globs
exec("""exec code in globs, locs""")
if __name__ == "__main__":
main(argv=sys.argv[1:])
| |
import os
import math
from PySide6 import QtGui, QtCore, QtWidgets
from Settings import ZOOMANCHOR, SCALEINC, MINZOOM, MAXZOOM, \
MARKERSIZE, RUBBERBANDSIZE, VIEWSTYLE
# put constraints on rubberband zoom (relative rectangle wdith)
RUBBERBANDSIZE = min(RUBBERBANDSIZE, 1.0)
RUBBERBANDSIZE = max(RUBBERBANDSIZE, 0.05)
class GraphicsView(QtWidgets.QGraphicsView):
"""The graphics view is the canvas where airfoils are drawn upon
Its coordinates are in pixels or "physical" coordinates.
Attributes:
origin (QPoint): stores location of mouse press
parent (QMainWindow): mainwindow instance
rubberband (QRubberBand): an instance of the custom rubberband class
used for zooming and selecting
sceneview (QRectF): stores current view in scene coordinates
"""
def __init__(self, parent=None, scene=None):
"""Default settings for graphicsview instance
Args:
parent (QMainWindow, optional): mainwindow instance
"""
super().__init__(scene)
self.parent = parent
self._leftMousePressed = False
# allow drops from drag and drop
self.setAcceptDrops(True)
# use custom rubberband
self.rubberband = RubberBand(QtWidgets.QRubberBand.Rectangle, self)
# needed for correct mouse wheel zoom
# otherwise mouse anchor is wrong; it would use (0, 0)
self.setInteractive(True)
# set QGraphicsView attributes
self.setRenderHints(QtGui.QPainter.Antialiasing |
QtGui.QPainter.TextAntialiasing)
self.setViewportUpdateMode(QtWidgets.QGraphicsView.FullViewportUpdate)
self.setResizeAnchor(QtWidgets.QGraphicsView.AnchorViewCenter)
# view behaviour when zooming
if ZOOMANCHOR == 'mouse':
# point under mouse pointer stays fixed during zoom
self.setTransformationAnchor(
QtWidgets.QGraphicsView.AnchorUnderMouse)
else:
# view center stays fixed during zoom
self.setTransformationAnchor(
QtWidgets.QGraphicsView.AnchorViewCenter)
self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
# normally (0, 0) is upperleft corner of view
# swap y-axis in order to make (0, 0) lower left
# and y-axis pointing upwards
self.scale(1, -1)
# cache view to be able to keep it during resize
self.getSceneFromView()
# set background style and color for view
self.setBackground(VIEWSTYLE)
def setBackground(self, styletype):
"""Switches between gradient and simple background using style sheets.
border-color (in HTML) works only if border-style is set.
"""
if styletype == 'gradient':
style = """
border-style:solid; border-color: lightgrey;
border-width: 1px; background-color: QLinearGradient(x1: 0.0, y1: 0.0,
x2: 0.0, y2: 1.0, stop: 0.3 white, stop: 1.0 #263a5a);
"""
# if more stops are needed
# stop: 0.3 white, stop: 0.6 #4b73b4, stop: 1.0 #263a5a); } """)
else:
style = ("""
border-style:solid; border-color: lightgrey; \
border-width: 1px; background-color: white;""")
self.setStyleSheet(style)
def resizeEvent(self, event):
"""Re-implement QGraphicsView's resizeEvent handler"""
# call corresponding base class method
super().resizeEvent(event)
# scrollbars need to be switched off when calling fitinview from
# within resize event otherwise strange recursion can occur
self.fitInView(self.sceneview,
aspectRadioMode=QtCore.Qt.KeepAspectRatio)
def mousePressEvent(self, event):
"""Re-implement QGraphicsView's mousePressEvent handler"""
# status of CTRL key
ctrl = event.modifiers() == QtCore.Qt.ControlModifier
# if a mouse event happens in the graphics view
# put the keyboard focus to the view as well
self.setFocus()
self.origin = event.pos()
# do rubberband zoom only with left mouse button
if event.button() == QtCore.Qt.LeftButton:
self._leftMousePressed = True
self._dragPos = event.pos()
if ctrl:
self.setCursor(QtCore.Qt.ClosedHandCursor)
else:
# initiate rubberband origin and size (zero at first)
self.rubberband.setGeometry(QtCore.QRect(self.origin,
QtCore.QSize()))
# show, even at zero size
# allows to check later using isVisible()
self.rubberband.show()
# call corresponding base class method
super().mousePressEvent(event)
def mouseMoveEvent(self, event):
"""Re-implement QGraphicsView's mouseMoveEvent handler"""
# if a mouse event happens in the graphics view
# put the keyboard focus to the view as well
self.setFocus()
# status of CTRL key
ctrl = event.modifiers() == QtCore.Qt.ControlModifier
# pan the view with the left mouse button and CRTL down
if self._leftMousePressed and ctrl:
self.setCursor(QtCore.Qt.ClosedHandCursor)
newPos = event.pos()
diff = newPos - self._dragPos
self._dragPos = newPos
# this actually does the pan
# no matter if scroll bars are displayed or not
self.horizontalScrollBar().setValue(
self.horizontalScrollBar().value() - diff.x())
self.verticalScrollBar().setValue(
self.verticalScrollBar().value() - diff.y())
if self.rubberband.isVisible() and not ctrl:
self.setInteractive(False)
self.rubberband.setGeometry(
QtCore.QRect(self.origin, event.pos()).normalized())
# call corresponding base class method
super().mouseMoveEvent(event)
def mouseReleaseEvent(self, event):
"""Re-implement QGraphicsView's mouseReleaseEvent handler"""
self._leftMousePressed = False
self.setCursor(QtCore.Qt.ArrowCursor)
# do zoom wrt to rect of rubberband
if self.rubberband.isVisible():
self.rubberband.hide()
rect = self.rubberband.geometry()
rectf = self.mapToScene(rect).boundingRect()
# zoom the selected rectangle (works on scene coordinates)
# zoom rect must be at least 5% of view width to allow zoom
if self.rubberband.allow_zoom:
self.fitInView(rectf,
aspectRadioMode=QtCore.Qt.KeepAspectRatio)
# rescale markers during zoom
# i.e. keep them constant size
self.adjustMarkerSize()
# reset to True, so that mouse wheel zoom anchor works
self.setInteractive(True)
# reset ScrollHandDrag if it was active
if self.dragMode() == QtWidgets.QGraphicsView.ScrollHandDrag:
self.setDragMode(QtWidgets.QGraphicsView.NoDrag)
# call corresponding base class method
super().mouseReleaseEvent(event)
def wheelEvent(self, event):
"""Re-implement QGraphicsView's wheelEvent handler"""
f = SCALEINC
# wheelevent.angleDelta() returns a QPoint instance
# the angle increment of the wheel is stored on the .y() attribute
angledelta = event.angleDelta().y()
if math.copysign(1, angledelta) > 0:
f = 1.0 / SCALEINC
self.scaleView(f)
# DO NOT CONTINUE HANDLING EVENTS HERE!!!
# this would destroy the mouse anchor
# call corresponding base class method
# super().wheelEvent(event)
def keyPressEvent(self, event):
"""Re-implement QGraphicsView's keyPressEvent handler"""
key = event.key()
if key == QtCore.Qt.Key_Plus or key == QtCore.Qt.Key_PageDown:
f = SCALEINC
# if scaling with the keys, the do not use mouse as zoom anchor
anchor = self.transformationAnchor()
self.setTransformationAnchor(QtWidgets.QGraphicsView.AnchorViewCenter)
self.scaleView(f)
self.setTransformationAnchor(anchor)
if key == QtCore.Qt.Key_PageDown:
# return here so that later base class is NOT called
# because QAbstractScrollArea would otherwise handle
# the event and do something we do not want
return
elif key == QtCore.Qt.Key_Minus or key == QtCore.Qt.Key_PageUp:
f = 1.0 / SCALEINC
# if scaling with the keys, the do not use mouse as zoom anchor
anchor = self.transformationAnchor()
self.setTransformationAnchor(QtWidgets.QGraphicsView.AnchorViewCenter)
self.scaleView(f)
self.setTransformationAnchor(anchor)
if key == QtCore.Qt.Key_PageUp:
# return here so that later base class is NOT called
# because QAbstractScrollArea would otherwise handle
# the event and do something we do not want
return
elif key == QtCore.Qt.Key_Home:
self.parent.slots.onViewAll()
elif key == QtCore.Qt.Key_Delete:
# removes all selected airfoils
self.parent.slots.removeAirfoil()
# call corresponding base class method
super().keyPressEvent(event)
def keyReleaseEvent(self, event):
"""Re-implement QGraphicsView's keyReleaseEvent handler"""
# call corresponding base class method
super().keyReleaseEvent(event)
def dragEnterEvent(self, event):
if event.mimeData().hasUrls():
event.accept()
else:
event.ignore()
def dragLeaveEvent(self, event):
pass
def dragMoveEvent(self, event):
if event.mimeData().hasUrls():
if event.mimeData().hasText():
event.setDropAction(QtCore.Qt.CopyAction)
event.accept()
else:
event.ignore()
def dropEvent(self, event):
for url in event.mimeData().urls():
path = url.toLocalFile()
if os.path.isfile(path):
self.parent.slots.loadAirfoil(path, comment='#')
def scaleView(self, factor):
# check if zoom limits are exceeded
# m11 = x-scaling
sx = self.transform().m11()
too_big = sx > MAXZOOM and factor > 1.0
too_small = sx < MINZOOM and factor < 1.0
if too_big or too_small:
return
# do the actual zooming
self.scale(factor, factor)
# rescale markers during zoom, i.e. keep them constant size
self.adjustMarkerSize()
# cache view to be able to keep it during resize
self.getSceneFromView()
def adjustMarkerSize(self):
"""Adjust marker size during zoom. Marker items are circles
which are otherwise affected by zoom. Using MARKERSIZE from
Settings a fixed markersize (e.g. 3 pixels) can be kept.
This method immitates the behaviour of pen.setCosmetic()
"""
if not self.parent.airfoil:
return
# markers are drawn in GraphicsItem using scene coordinates
# in order to keep them constant size, also when zooming
# a fixed pixel size (MARKERSIZE from settings) is mapped to
# scene coordinates
# depending on the zoom, this leads to always different
# scene coordinates
# map a square with side length of MARKERSIZE to the scene coords
mappedMarker = self.mapToScene(
QtCore.QRect(0, 0, MARKERSIZE, MARKERSIZE))
mappedMarkerWidth = mappedMarker.boundingRect().width()
if self.parent.airfoil.contourPolygon:
markers = self.parent.airfoil.polygonMarkers
x, y = self.parent.airfoil.raw_coordinates
for i, marker in enumerate(markers):
# in case of circle, args is a QRectF
marker.args = [QtCore.QRectF(x[i] - mappedMarkerWidth,
y[i] - mappedMarkerWidth,
2. * mappedMarkerWidth,
2. * mappedMarkerWidth)]
# if self.parent.airfoil.contourSpline:
if hasattr(self.parent.airfoil, 'contourSpline'):
markers = self.parent.airfoil.splineMarkers
x, y = self.parent.airfoil.spline_data[0]
for i, marker in enumerate(markers):
# in case of circle, args is a QRectF
marker.args = [QtCore.QRectF(x[i] - mappedMarkerWidth,
y[i] - mappedMarkerWidth,
2. * mappedMarkerWidth,
2. * mappedMarkerWidth)]
def getSceneFromView(self):
"""Cache view to be able to keep it during resize"""
# map view rectangle to scene coordinates
polygon = self.mapToScene(self.rect())
# sceneview describes the rectangle which is currently
# being viewed in scene coordinates
# this is needed during resizing to be able to keep the view
self.sceneview = QtCore.QRectF(polygon[0], polygon[2])
def contextMenuEvent(self, event):
"""creates popup menu for the graphicsview"""
menu = QtWidgets.QMenu(self)
fitairfoil = menu.addAction('Fit airfoil in view')
fitairfoil.setShortcut('CTRL+f')
fitall = menu.addAction('Fit all items in view')
fitall.setShortcut('HOME, CTRL+SHIFT+f')
menu.addSeparator()
delitems = menu.addAction('Delete airfoil')
delitems.setShortcut('Del')
menu.addSeparator()
togglebg = menu.addAction('Toggle background')
togglebg.setShortcut('CTRL+b')
action = menu.exec_(self.mapToGlobal(event.pos()))
if action == togglebg:
self.parent.slots.onBackground()
elif action == fitairfoil:
self.parent.slots.fitAirfoilInView()
elif action == fitall:
self.parent.slots.onViewAll()
# remove all selected items from the scene
elif action == delitems:
self.parent.slots.removeAirfoil()
# call corresponding base class method
super().contextMenuEvent(event)
class RubberBand(QtWidgets.QRubberBand):
"""Custom rubberband
from: http://stackoverflow.com/questions/25642618
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.view = args[1]
# set pen and brush (filling)
self.pen = QtGui.QPen()
self.pen.setStyle(QtCore.Qt.DotLine)
self.pen.setColor(QtGui.QColor(80, 80, 100))
self.brush = QtGui.QBrush()
color = QtGui.QColor(20, 20, 80, 30)
self.brush.setColor(color)
# self.brush.setStyle(QtCore.Qt.NoBrush)
self.brush.setStyle(QtCore.Qt.SolidPattern)
# set style selectively for the rubberband like that
# see: http://stackoverflow.com/questions/25642618
# required as opacity might not work
# NOTE: opacity removed here
self.setStyle(QtWidgets.QStyleFactory.create('windowsvista'))
# set boolean for allowing zoom
self.allow_zoom = False
def paintEvent(self, QPaintEvent):
painter = QtGui.QPainter(self)
self.pen.setColor(QtGui.QColor(80, 80, 100))
self.pen.setWidthF(1.5)
self.pen.setStyle(QtCore.Qt.DotLine)
# zoom rect must be at least RUBBERBANDSIZE % of view to allow zoom
if (QPaintEvent.rect().width() < RUBBERBANDSIZE * self.view.width()) \
or \
(QPaintEvent.rect().height() < RUBBERBANDSIZE * self.view.height()):
self.brush.setStyle(QtCore.Qt.NoBrush)
# set boolean for allowing zoom
self.allow_zoom = False
else:
# if rubberband rect is big enough indicate this by fill color
color = QtGui.QColor(10, 30, 140, 45)
self.brush.setColor(color)
self.brush.setStyle(QtCore.Qt.SolidPattern)
# set boolean for allowing zoom
self.allow_zoom = True
painter.setBrush(self.brush)
painter.setPen(self.pen)
painter.drawRect(QPaintEvent.rect())
| |
#!/usr/bin/env python
#@author: Prahlad Yeri
#@description: Small daemon to create a wifi hotspot on linux
#@license: MIT
import sys
import os
import argparse
import cli
import json
import socket
import platform
import time
class Proto:
pass
const = Proto()
#global const = Proto() #struct to hold startup parameters
#const.debug = False
#const.verbose = False
#const.command = 'start'
#const.argv = None
stores = Proto() #struct to dump misc variables
stores.running = False
def validate_ip(addr):
try:
socket.inet_aton(addr)
return True # legal
except socket.error:
return False # Not legal
def configure():
global wlan, ppp, IP, Netmask
#CHECK WHETHER WIFI IS SUPPORTED OR NOT
print 'Verifying connections'
wlan=''
ppp=''
s=cli.execute_shell('iwconfig')
if s!=None:
lines = s.splitlines()
#print 'and it is:' + s
for line in lines:
if not line.startswith(' ') and not line.startswith('mon.') and 'IEEE 802.11' in line:
wlan=line.split(' ')[0]
print 'Wifi interface found: ' + wlan
if wlan=='':
print 'Wireless interface could not be found on your device.'
return
#print 'Verifying Internet connections'
s=cli.execute_shell('ifconfig')
lines = s.splitlines()
iface=[]
for line in lines:
if not line.startswith(' ') and not line.startswith(wlan) and not line.startswith('lo') and not line.startswith('mon.') and len(line)>0:
iface.append(line.split(' ')[0])
#print 'f::' + line
if len(iface)==0:
print 'No network nic could be found on your deivce to interface with the LAN'
elif len(iface)==1:
ppp=iface[0]
print 'Network interface found: ' + ppp
else:
rniface=range(len(iface))
s=''
while True:
for i in rniface:
print i, iface[i]
try: s = int(input("Enter number for internet supplying NIC :"))
except: continue
if s not in rniface:
continue
ppp=iface[s]
break
while True:
IP= raw_input('Enter an IP address for your ap [192.168.45.1] :')
#except: continue
#print type(IP)
#sys.exit(0)
if IP==None or IP=='':
IP='192.168.45.1'
if not validate_ip(IP): continue
break
Netmask='255.255.255.0'
#CONFIGURE SSID, PASSWORD, ETC.
SSID=raw_input('Enter SSID [joe_ssid] :')
if SSID=='': SSID='joe_ssid'
password=raw_input('Enter 10 digit password [1234567890] :')
if password=='': password='1234567890'
f = open('run.dat','r')
lout=[]
for line in f.readlines():
lout.append(line.replace('<SSID>',SSID).replace('<PASS>',password))
f.close()
f = open('run.conf','w')
f.writelines(lout)
f.close()
print 'created hostapd configuration: run.conf'
dc = {'wlan': wlan, 'inet':ppp, 'ip':IP, 'netmask':Netmask, 'SSID':SSID, 'password':password}
json.dump(dc, open('hotspotd.json','wb'))
print dc
print 'Configuration saved'
#CHECK WIFI DRIVERS AND ISSUE WARNINGS
def check_dependencies():
#CHECK FOR DEPENDENCIES
if len(cli.check_sysfile('hostapd'))==0:
print 'hostapd executable not found. Make sure you have installed hostapd.'
return False
elif len(cli.check_sysfile('dnsmasq'))==0:
print 'dnsmasq executable not found. Make sure you have installed dnsmasq.'
return False
else:
return True
def check_interfaces():
global wlan, ppp
print 'Verifying interfaces'
s=cli.execute_shell('ifconfig')
lines = s.splitlines()
bwlan = False
bppp = False
for line in lines:
if not line.startswith(' ') and len(line)>0:
text=line.split(' ')[0]
if text.startswith(wlan):
bwlan = True
elif text.startswith(ppp):
bppp = True
if not bwlan:
print wlan + ' interface was not found. Make sure your wifi is on.'
return False
elif not bppp:
print ppp + ' interface was not found. Make sure you are connected to the internet.'
return False
else:
print 'done.'
return True
def pre_start():
try:
# oper = platform.linux_distribution()
# if oper[0].lower()=='ubuntu' and oper[2].lower()=='trusty':
# trusty patch
# print 'applying hostapd workaround for ubuntu trusty.'
#29-12-2014: Rather than patching individual distros, lets make it a default.
result = cli.execute_shell('nmcli radio wifi off')
if "error" in result.lower():
cli.execute_shell('nmcli nm wifi off')
cli.execute_shell('rfkill unblock wlan')
cli.execute_shell('sleep 1')
print 'done.'
except:
pass
def start_router():
if not check_dependencies():
return
elif not check_interfaces():
return
pre_start()
s = 'ifconfig ' + wlan + ' up ' + IP + ' netmask ' + Netmask
print 'created interface: mon.' + wlan + ' on IP: ' + IP
r = cli.execute_shell(s)
cli.writelog(r)
#cli.writelog('sleeping for 2 seconds.')
print 'wait..'
cli.execute_shell('sleep 2')
i = IP.rindex('.')
ipparts=IP[0:i]
#stop dnsmasq if already running.
if cli.is_process_running('dnsmasq')>0:
print 'stopping dnsmasq'
cli.execute_shell('killall dnsmasq')
#stop hostapd if already running.
if cli.is_process_running('hostapd')>0:
print 'stopping hostapd'
cli.execute_shell('killall hostapd')
#enable forwarding in sysctl.
print 'enabling forward in sysctl.'
r=cli.set_sysctl('net.ipv4.ip_forward','1')
print r.strip()
#enable forwarding in iptables.
print 'creating NAT using iptables: ' + wlan + '<->' + ppp
cli.execute_shell('iptables -P FORWARD ACCEPT')
#add iptables rules to create the NAT.
cli.execute_shell('iptables --table nat --delete-chain')
cli.execute_shell('iptables --table nat -F')
r=cli.execute_shell('iptables --table nat -X')
if len(r.strip())>0: print r.strip()
cli.execute_shell('iptables -t nat -A POSTROUTING -o ' + ppp + ' -j MASQUERADE')
cli.execute_shell('iptables -A FORWARD -i ' + ppp + ' -o ' + wlan + ' -j ACCEPT -m state --state RELATED,ESTABLISHED')
cli.execute_shell('iptables -A FORWARD -i ' + wlan + ' -o ' + ppp + ' -j ACCEPT')
#allow traffic to/from wlan
cli.execute_shell('iptables -A OUTPUT --out-interface ' + wlan + ' -j ACCEPT')
cli.execute_shell('iptables -A INPUT --in-interface ' + wlan + ' -j ACCEPT')
#start dnsmasq
s = 'dnsmasq --dhcp-authoritative --interface=' + wlan + ' --dhcp-range=' + ipparts + '.20,' + ipparts +'.100,' + Netmask + ',4h'
print 'running dnsmasq'
r = cli.execute_shell(s)
cli.writelog(r)
#~ f = open(os.getcwd() + '/hostapd.tem','r')
#~ lout=[]
#~ for line in f.readlines():
#~ lout.append(line.replace('<SSID>',SSID).replace('<PASS>',password))
#~
#~ f.close()
#~ f = open(os.getcwd() + '/hostapd.conf','w')
#~ f.writelines(lout)
#~ f.close()
#writelog('created: ' + os.getcwd() + '/hostapd.conf')
#start hostapd
#s = 'hostapd -B ' + os.path.abspath('run.conf')
s = 'hostapd -B ' + os.getcwd() + '/run.conf'
cli.writelog('running hostapd')
#cli.writelog('sleeping for 2 seconds.')
cli.writelog('wait..')
cli.execute_shell('sleep 2')
r = cli.execute_shell(s)
cli.writelog(r)
print 'hotspot is running.'
return
def stop_router():
#bring down the interface
cli.execute_shell('ifconfig mon.' + wlan + ' down')
#TODO: Find some workaround. killing hostapd brings down the wlan0 interface in ifconfig.
#~ #stop hostapd
#~ if cli.is_process_running('hostapd')>0:
#~ cli.writelog('stopping hostapd')
#~ cli.execute_shell('pkill hostapd')
#stop dnsmasq
if cli.is_process_running('dnsmasq')>0:
cli.writelog('stopping dnsmasq')
cli.execute_shell('killall dnsmasq')
#disable forwarding in iptables.
cli.writelog('disabling forward rules in iptables.')
cli.execute_shell('iptables -P FORWARD DROP')
#delete iptables rules that were added for wlan traffic.
if wlan != None:
cli.execute_shell('iptables -D OUTPUT --out-interface ' + wlan + ' -j ACCEPT')
cli.execute_shell('iptables -D INPUT --in-interface ' + wlan + ' -j ACCEPT')
cli.execute_shell('iptables --table nat --delete-chain')
cli.execute_shell('iptables --table nat -F')
cli.execute_shell('iptables --table nat -X')
#disable forwarding in sysctl.
cli.writelog('disabling forward in sysctl.')
r = cli.set_sysctl('net.ipv4.ip_forward','0')
print r.strip()
#cli.execute_shell('ifconfig ' + wlan + ' down' + IP + ' netmask ' + Netmask)
#cli.execute_shell('ip addr flush ' + wlan)
print 'hotspot has stopped.'
return
def main(args):
global wlan, ppp, IP, Netmask
scpath = os.path.realpath(__file__)
realdir = os.path.dirname(scpath)
os.chdir(realdir)
#print 'changed directory to ' + os.path.dirname(scpath)
#if an instance is already running, then quit
#const.verbose = args.verbose
#const.command = args.command
#const.blocking = args.blocking
#const.argv = [os.getcwd() + '/server.py'] + sys.argv[1:]
cli.arguments = args #initialize
newconfig = False
if not os.path.exists('hotspotd.json'):
configure()
newconfig=True
if len(cli.check_sysfile('hostapd'))==0:
print "hostapd is not installed on your system.This package will not work without it.To install it, try 'sudo apt-get install hostapd' or http://wireless.kernel.org/en/users/Documentation/hostapd after this installation gets over."
time.sleep(2)
dc =json.load(open('hotspotd.json'))
wlan = dc['wlan']
ppp = dc['inet']
IP=dc['ip']
Netmask=dc['netmask']
SSID = dc['SSID']
password = dc['password']
if args.command == 'configure':
if not newconfig: configure()
elif args.command == 'stop':
stop_router()
elif args.command == 'start':
if (cli.is_process_running('hostapd') != 0 and cli.is_process_running('dnsmasq') != 0):
print 'hotspot is already running.'
else:
start_router()
| |
# (C) Datadog, Inc. 2010-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
import copy
import inspect
from itertools import product
import imp
import logging
import os
from pprint import pformat
import sys
import time
import traceback
import unittest
import json
# project
from checks import AgentCheck
from config import get_checksd_path, get_sdk_integrations_path
from utils.debug import get_check # noqa - FIXME 5.5.0 AgentCheck tests should not use this
from utils.hostname import get_hostname
from utils.platform import get_os
log = logging.getLogger('tests')
def _is_sdk():
return "SDK_TESTING" in os.environ
def _load_sdk_module(name):
sdk_path = get_sdk_integrations_path(get_os())
module_path = os.path.join(sdk_path, name)
sdk_module_name = "_{}".format(name)
if sdk_module_name in sys.modules:
return sys.modules[sdk_module_name]
if sdk_path not in sys.path:
sys.path.append(sdk_path)
if module_path not in sys.path:
sys.path.append(module_path)
fd, filename, desc = imp.find_module('check', [module_path])
module = imp.load_module("_{}".format(name), fd, filename, desc)
if fd:
fd.close()
# module = __import__(module_name, fromlist=['check'])
return module
def get_check_class(name):
if not _is_sdk():
checksd_path = get_checksd_path(get_os())
if checksd_path not in sys.path:
sys.path.append(checksd_path)
check_module = __import__(name)
else:
check_module = _load_sdk_module(name)
check_class = None
classes = inspect.getmembers(check_module, inspect.isclass)
for _, clsmember in classes:
if clsmember == AgentCheck:
continue
if issubclass(clsmember, AgentCheck):
check_class = clsmember
if AgentCheck in clsmember.__bases__:
continue
else:
break
return check_class
def load_class(check_name, class_name):
"""
Retrieve a class with the given name within the given check module.
"""
check_module_name = check_name
if not _is_sdk():
checksd_path = get_checksd_path(get_os())
if checksd_path not in sys.path:
sys.path.append(checksd_path)
check_module = __import__(check_module_name)
else:
check_module = _load_sdk_module(check_name)
classes = inspect.getmembers(check_module, inspect.isclass)
for name, clsmember in classes:
if name == class_name:
return clsmember
raise Exception(u"Unable to import class {0} from the check module.".format(class_name))
def load_check(name, config, agentConfig):
if not _is_sdk():
checksd_path = agentConfig.get('additional_checksd', get_checksd_path(get_os()))
# find (in checksd_path) and load the check module
fd, filename, desc = imp.find_module(name, [checksd_path])
check_module = imp.load_module(name, fd, filename, desc)
else:
check_module = _load_sdk_module(name) # parent module
check_class = None
classes = inspect.getmembers(check_module, inspect.isclass)
for _, clsmember in classes:
if clsmember == AgentCheck:
continue
if issubclass(clsmember, AgentCheck):
check_class = clsmember
if AgentCheck in clsmember.__bases__:
continue
else:
break
if check_class is None:
raise Exception("Unable to import check %s. Missing a class that inherits AgentCheck" % name)
init_config = config.get('init_config', {})
instances = config.get('instances')
agentConfig['checksd_hostname'] = get_hostname(agentConfig)
# init the check class
try:
return check_class(name, init_config, agentConfig, instances=instances)
except TypeError as e:
raise Exception("Check is using old API, {0}".format(e))
except Exception:
raise
class Fixtures(object):
@staticmethod
def integration_name():
for stack in inspect.stack():
# stack[1] is the file path
file_name = os.path.basename(stack[1])
if 'test_' in file_name:
# test_name.py
# 5 -3
return file_name[5:-3]
raise Exception('No integration test file in stack')
@staticmethod
def directory(sdk_dir=None):
if sdk_dir:
return os.path.join(sdk_dir, 'fixtures')
return os.path.join(os.path.dirname(__file__), 'fixtures',
Fixtures.integration_name())
@staticmethod
def file(file_name, sdk_dir=None):
return os.path.join(Fixtures.directory(sdk_dir), file_name)
@staticmethod
def read_file(file_name, string_escape=True, sdk_dir=None):
with open(Fixtures.file(file_name, sdk_dir)) as f:
contents = f.read()
if string_escape:
contents = contents.decode('string-escape')
return contents.decode("utf-8")
@staticmethod
def read_json_file(file_name, string_escape=True, sdk_dir=None):
return json.loads(Fixtures.read_file(file_name, string_escape=string_escape, sdk_dir=sdk_dir))
class AgentCheckTest(unittest.TestCase):
DEFAULT_AGENT_CONFIG = {
'version': '0.1',
'api_key': 'toto'
}
def __init__(self, *args, **kwargs):
super(AgentCheckTest, self).__init__(*args, **kwargs)
if not hasattr(self, 'CHECK_NAME'):
raise Exception("You must define CHECK_NAME")
self.check = None
def is_travis(self):
return "TRAVIS" in os.environ
def load_check(self, config, agent_config=None):
agent_config = agent_config or self.DEFAULT_AGENT_CONFIG
self.check = load_check(self.CHECK_NAME, config, agent_config)
def load_class(self, name):
"""
Retrieve a class with the given name among the check module.
"""
return load_class(self.CHECK_NAME, name)
# Helper function when testing rates
def run_check_twice(self, config, agent_config=None, mocks=None,
force_reload=False):
self.run_check(config, agent_config, mocks, force_reload)
time.sleep(1)
self.run_check(config, agent_config, mocks)
def run_check_n(self, config, agent_config=None, mocks=None,
force_reload=False, repeat=1, sleep=1):
for i in xrange(repeat):
if not i:
self.run_check(config, agent_config, mocks, force_reload)
else:
self.run_check(config, agent_config, mocks)
time.sleep(sleep)
def run_check(self, config, agent_config=None, mocks=None, force_reload=False):
# If not loaded already, do it!
if self.check is None or force_reload:
self.load_check(config, agent_config=agent_config)
if mocks is not None:
for func_name, mock in mocks.iteritems():
if not hasattr(self.check, func_name):
continue
else:
setattr(self.check, func_name, mock)
error = None
for instance in self.check.instances:
try:
# Deepcopy needed to avoid weird duplicate tagging situations
# ie the check edits the tags of the instance, problematic if
# run twice
self.check.check(copy.deepcopy(instance))
# FIXME: This should be called within the `run` method only
self.check._roll_up_instance_metadata()
except Exception as e:
# Catch error before re-raising it to be able to get service_checks
print "Exception {0} during check".format(e)
print traceback.format_exc()
error = e
self.metrics = self.check.get_metrics()
self.events = self.check.get_events()
self.service_checks = self.check.get_service_checks()
self.service_metadata = []
self.warnings = self.check.get_warnings()
# clean {} service_metadata (otherwise COVERAGE fails for nothing)
for metadata in self.check.get_service_metadata():
if metadata:
self.service_metadata.append(metadata)
if error is not None:
raise error # pylint: disable=E0702
def print_current_state(self):
log.debug("""++++++++ CURRENT STATE ++++++++
METRICS
{metrics}
EVENTS
{events}
SERVICE CHECKS
{sc}
SERVICE METADATA
{sm}
WARNINGS
{warnings}
++++++++++++++++++++++++++++""".format(
metrics=pformat(self.metrics),
events=pformat(self.events),
sc=pformat(self.service_checks),
sm=pformat(self.service_metadata),
warnings=pformat(self.warnings)
))
def _generate_coverage_metrics(self, data, indice=None):
total = len(data)
tested = 0
untested = []
for d in data:
if (indice and d[indice] or d).get('tested'):
tested += 1
else:
untested.append(d)
if total == 0:
coverage = 100.0
else:
coverage = 100.0 * tested / total
return tested, total, coverage, untested
def coverage_report(self):
tested_metrics, total_metrics, coverage_metrics, untested_metrics = \
self._generate_coverage_metrics(self.metrics, indice=3)
tested_sc, total_sc, coverage_sc, untested_sc = \
self._generate_coverage_metrics(self.service_checks)
tested_sm, total_sm, coverage_sm, untested_sm = \
self._generate_coverage_metrics(self.service_metadata)
tested_events, total_events, coverage_events, untested_events = \
self._generate_coverage_metrics(self.events)
coverage = """Coverage
========================================
METRICS
Tested {tested_metrics}/{total_metrics} ({coverage_metrics}%)
UNTESTED: {untested_metrics}
EVENTS
Tested {tested_events}/{total_events} ({coverage_events}%)
UNTESTED: {untested_events}
SERVICE CHECKS
Tested {tested_sc}/{total_sc} ({coverage_sc}%)
UNTESTED: {untested_sc}
SERVICE METADATA
Tested {tested_sm}/{total_sm} ({coverage_sm}%)
UNTESTED: {untested_sm}
========================================"""
log.info(coverage.format(
tested_metrics=tested_metrics,
total_metrics=total_metrics,
coverage_metrics=coverage_metrics,
untested_metrics=pformat(untested_metrics),
tested_sc=tested_sc,
total_sc=total_sc,
coverage_sc=coverage_sc,
untested_sc=pformat(untested_sc),
tested_sm=tested_sm,
total_sm=total_sm,
coverage_sm=coverage_sm,
untested_sm=pformat(untested_sm),
tested_events=tested_events,
total_events=total_events,
coverage_events=coverage_events,
untested_events=pformat(untested_events),
))
if not os.getenv('NO_COVERAGE'):
self.assertEquals(coverage_metrics, 100.0)
self.assertEquals(coverage_events, 100.0)
self.assertEquals(coverage_sc, 100.0)
self.assertEquals(coverage_sm, 100.0)
def _candidates_size_assert(self, candidates, count=None, at_least=1):
try:
if count is not None:
self.assertEquals(
len(candidates), count,
"Needed exactly %d candidates, got %d" % (count, len(candidates))
)
else:
self.assertTrue(
len(candidates) >= at_least,
"Needed at least %d candidates, got %d" % (at_least, len(candidates))
)
except AssertionError:
self.print_current_state()
raise
def assertMetric(self, metric_name, value=None, tags=None, count=None,
at_least=1, hostname=None, device_name=None, metric_type=None):
candidates = []
for m_name, ts, val, mdata in self.metrics:
if m_name == metric_name:
if value is not None and val != value:
continue
if tags is not None and sorted(tags) != sorted(mdata.get("tags", [])):
continue
if hostname is not None and mdata['hostname'] != hostname:
continue
if device_name is not None and mdata['device_name'] != device_name:
continue
if metric_type is not None and mdata['type'] != metric_type:
continue
candidates.append((m_name, ts, val, mdata))
try:
self._candidates_size_assert(candidates, count=count, at_least=at_least)
except AssertionError:
log.error("Candidates size assertion for {0} (value: {1}, tags: {2}, "
"count: {3}, at_least: {4}, hostname: {5}) failed"
.format(metric_name, value, tags, count, at_least, hostname))
raise
for mtuple in self.metrics:
for cmtuple in candidates:
if mtuple == cmtuple:
mtuple[3]['tested'] = True
log.debug("{0} FOUND !".format(metric_name))
def assertMetricTagPrefix(self, metric_name, tag_prefix, count=None, at_least=1):
log.debug("Looking for a tag starting with `{0}:` on metric {1}"
.format(tag_prefix, metric_name))
if count is not None:
log.debug(" * should have exactly {0} data points".format(count))
elif at_least is not None:
log.debug(" * should have at least {0} data points".format(at_least))
candidates = []
for m_name, ts, val, mdata in self.metrics:
if m_name == metric_name:
gtags = [t for t in mdata['tags'] if t.startswith(tag_prefix)]
if not gtags:
continue
candidates.append((m_name, ts, val, mdata))
try:
self._candidates_size_assert(candidates, count=count)
except AssertionError:
log.error("Candidates size assertion for {0} (tag_prefix: {1}, "
"count: {2}, at_least: {3}) failed".format(metric_name,
tag_prefix,
count,
at_least))
raise
for mtuple in self.metrics:
for cmtuple in candidates:
if mtuple == cmtuple:
mtuple[3]['tested'] = True
log.debug("{0} FOUND !".format(metric_name))
def assertMetricTag(self, metric_name, tag, count=None, at_least=1):
log.debug("Looking for tag {0} on metric {1}".format(tag, metric_name))
if count is not None:
log.debug(" * should have exactly {0} data points".format(count))
elif at_least is not None:
log.debug(" * should have at least {0} data points".format(at_least))
candidates = []
for m_name, ts, val, mdata in self.metrics:
if m_name == metric_name:
gtags = [t for t in mdata['tags'] if t == tag]
if not gtags:
continue
candidates.append((m_name, ts, val, mdata))
try:
self._candidates_size_assert(candidates, count=count)
except AssertionError:
log.error("Candidates size assertion for {0} (tag: {1}, count={2},"
" at_least={3}) failed".format(metric_name, tag, count, at_least))
raise
for mtuple in self.metrics:
for cmtuple in candidates:
if mtuple == cmtuple:
mtuple[3]['tested'] = True
log.debug("{0} FOUND !".format(metric_name))
def assertServiceMetadata(self, meta_keys, count=None, at_least=1):
log.debug("Looking for service metadata with keys {0}".format(meta_keys))
if count is not None:
log.debug(" * should be defined for exactly {0} instances".format(count))
elif at_least is not None:
log.debug(" * should be defined for at least {0} instances".format(at_least))
candidates = []
for sm in self.service_metadata:
if sorted(sm.keys()) != sorted(meta_keys):
continue
candidates.append(sm)
try:
self._candidates_size_assert(candidates, count=count, at_least=at_least)
except AssertionError:
log.error("Candidates size assertion for service metadata with keys {0}"
" (count: {1}, at_least: {2}) failed".format(meta_keys, count, at_least))
raise
for sm in self.service_metadata:
for csm in candidates:
if sm == csm:
sm['tested'] = True
log.debug("Service metadata FOUND !")
def assertServiceCheck(self, service_check_name, status=None, tags=None,
count=None, at_least=1):
log.debug("Looking for service check {0}".format(service_check_name))
if status is not None:
log.debug(" * with status {0}".format(status))
if tags is not None:
log.debug(" * tagged with {0}".format(tags))
if count is not None:
log.debug(" * should have exactly {0} statuses".format(count))
elif at_least is not None:
log.debug(" * should have at least {0} statuses".format(at_least))
candidates = []
for sc in self.service_checks:
if sc['check'] == service_check_name:
if status is not None and sc['status'] != status:
continue
if tags is not None and sorted(tags) != sorted(sc.get("tags")):
continue
candidates.append(sc)
try:
self._candidates_size_assert(candidates, count=count, at_least=at_least)
except AssertionError:
log.error("Candidates size assertion for {0} (status: {1}, "
"tags: {2}, count: {3}, at_least: {4}) failed".format(service_check_name,
status,
tags,
count,
at_least))
raise
for sc in self.service_checks:
for csc in candidates:
if sc == csc:
sc['tested'] = True
log.debug("{0} FOUND !".format(service_check_name))
def assertServiceCheckOK(self, service_check_name, tags=None, count=None, at_least=1):
self.assertServiceCheck(service_check_name,
status=AgentCheck.OK,
tags=tags,
count=count,
at_least=at_least)
def assertServiceCheckWarning(self, service_check_name, tags=None, count=None, at_least=1):
self.assertServiceCheck(service_check_name,
status=AgentCheck.WARNING,
tags=tags,
count=count,
at_least=at_least)
def assertServiceCheckCritical(self, service_check_name, tags=None, count=None, at_least=1):
self.assertServiceCheck(service_check_name,
status=AgentCheck.CRITICAL,
tags=tags,
count=count,
at_least=at_least)
def assertServiceCheckUnknown(self, service_check_name, tags=None, count=None, at_least=1):
self.assertServiceCheck(service_check_name,
status=AgentCheck.UNKNOWN,
tags=tags,
count=count,
at_least=at_least)
def assertIn(self, first, second):
self.assertTrue(first in second, "{0} not in {1}".format(first, second))
def assertNotIn(self, first, second):
self.assertTrue(first not in second, "{0} in {1}".format(first, second))
def assertWarning(self, warning, count=None, at_least=1, exact_match=True):
log.debug("Looking for warning {0}".format(warning))
if count is not None:
log.debug(" * should have exactly {0} statuses".format(count))
elif at_least is not None:
log.debug(" * should have at least {0} statuses".format(count))
if exact_match:
candidates = [w for w in self.warnings if w == warning]
else:
candidates = [w for w in self.warnings if warning in w]
try:
self._candidates_size_assert(candidates, count=count, at_least=at_least)
except AssertionError:
log.error("Candidates size assertion for {0}, count: {1}, "
"at_least: {2}) failed".format(warning, count, at_least))
raise
log.debug("{0} FOUND !".format(warning))
# Potential kwargs: aggregation_key, alert_type, event_type,
# msg_title, source_type_name
def assertEvent(self, msg_text, count=None, at_least=1, exact_match=True,
tags=None, **kwargs):
log.debug("Looking for event {0}".format(msg_text))
if tags is not None:
log.debug(" * tagged with {0}".format(tags))
for name, value in kwargs.iteritems():
if value is not None:
log.debug(" * with {0} {1}".format(name, value))
if count is not None:
log.debug(" * should have exactly {0} events".format(count))
elif at_least is not None:
log.debug(" * should have at least {0} events".format(count))
candidates = []
for e in self.events:
if exact_match and msg_text != e['msg_text'] or \
not exact_match and msg_text not in e['msg_text']:
continue
if tags and set(tags) != set(e['tags']):
continue
for name, value in kwargs.iteritems():
if e[name] != value:
break
else:
candidates.append(e)
try:
self._candidates_size_assert(candidates, count=count, at_least=at_least)
except AssertionError:
log.error("Candidates size assertion for {0}, count: {1}, "
"at_least: {2}) failed".format(msg_text, count, at_least))
raise
for ev, ec in product(self.events, candidates):
if ec == ev:
ev['tested'] = True
log.debug("{0} FOUND !".format(msg_text))
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras training and evaluation routines for eager execution.
"""
# pylint: disable=protected-access
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager.backprop import GradientTape
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend
from tensorflow.python.keras.engine import training_utils
from tensorflow.python.keras.mixed_precision.experimental import loss_scale_optimizer
from tensorflow.python.keras.utils import losses_utils
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
def _eager_loss_fn(outputs, targets, loss_fn, output_name):
with backend.name_scope(output_name + '_loss'):
loss = loss_fn(targets, outputs)
return loss
def _eager_metrics_fn(model, outputs, targets, sample_weights=None, masks=None):
"""Calculates the metrics for each output of the given model.
Arguments:
model: The model on which metrics are being calculated.
outputs: The outputs of the given model.
targets: The predictions or targets of the given model.
sample_weights: Optional list of sample weights for each output.
masks: Optional list of masks for each output.
Returns:
Returns the metric results for each output of the model.
"""
outputs = nest.flatten(outputs)
targets = nest.flatten(targets)
# Invoke all(weighted and unweighted) metrics.
metric_results = []
if targets:
# Insert None values corresponding to the targets that need to be skipped
# on the model.
if len(model._targets) != len(targets):
new_targets = [
None if t is None else targets.pop(0) for t in model._targets
]
targets = new_targets
metric_results = model._handle_metrics(
outputs,
targets=targets,
sample_weights=sample_weights,
masks=masks,
return_weighted_and_unweighted_metrics=True,
skip_target_masks=model._prepare_skip_target_masks())
# Add metric results from the `add_metric` metrics.
metric_results.extend([
m.result()
for m in model.metrics
if m not in model._compile_metric_functions
])
return metric_results
def _model_loss(model,
inputs,
targets,
output_loss_metrics=None,
sample_weights=None,
training=False):
"""Calculates the loss for a given model.
Arguments:
model: The model on which metrics are being calculated.
inputs: Either a dictionary of inputs to the model or a list of input
arrays.
targets: List of target arrays.
output_loss_metrics: List of metrics that are used to aggregated output
loss values.
sample_weights: Optional list of sample weight arrays.
training: Whether the model should be run in inference or training mode.
Returns:
Returns the model output, total loss, loss value calculated using the
specified loss function and masks for each output. The total loss includes
regularization losses and applies masking and sample weighting
to the loss value.
"""
# TODO(psv): Dedup code here with graph mode prepare_total_loss() fn.
# Used to keep track of the total loss value (stateless).
# eg., total_loss = loss_weight_1 * output_1_loss_fn(...) +
# loss_weight_2 * output_2_loss_fn(...) +
# layer losses.
total_loss = 0
kwargs = {}
if model._expects_training_arg:
kwargs['training'] = training
if len(inputs) == 1 and not isinstance(inputs, dict):
inputs = inputs[0]
# Allow mixed `NumPy` and `EagerTensor` input here.
if any(
isinstance(input_t, (np.ndarray, float, int))
for input_t in nest.flatten(inputs)):
inputs = nest.map_structure(ops.convert_to_tensor_v2_with_dispatch, inputs)
outs = model(inputs, **kwargs)
outs = nest.flatten(outs)
if targets:
targets = training_utils.cast_if_floating_dtype_and_mismatch(targets, outs)
# TODO(sallymatson/psv): check if we should do same mismatch fix for weights
if sample_weights:
sample_weights = [
training_utils.cast_if_floating_dtype(
ops.convert_to_tensor_v2_with_dispatch(val))
if val is not None else None for val in sample_weights
]
masks = [getattr(t, '_keras_mask', None) for t in outs]
targets = nest.flatten(targets)
# Used to keep track of individual output losses.
output_losses = []
with backend.name_scope('loss'):
loss_fns = [
loss_fn for loss_fn in model.loss_functions if loss_fn is not None
]
custom_losses = model.losses # Regularization losses
if not loss_fns and not custom_losses:
if training:
raise ValueError('The model cannot be trained '
'because it has no loss to optimize.')
else:
raise ValueError('The model cannot be evaluated '
'because it has no loss to compute.')
for i, loss_fn in enumerate(loss_fns):
weights = sample_weights[i] if sample_weights else None
mask = masks[i]
with backend.name_scope(model.output_names[i] + '_loss'):
if mask is not None:
mask = math_ops.cast(mask, outs[i].dtype)
# Update weights with mask.
if weights is None:
weights = mask
else:
# Update dimensions of weights to match with mask if possible.
weights = math_ops.cast(weights, outs[i].dtype)
mask, _, weights = (
losses_utils.squeeze_or_expand_dimensions(
mask, sample_weight=weights))
weights *= mask
if hasattr(loss_fn, 'reduction'):
per_sample_losses = loss_fn.call(targets[i], outs[i])
weighted_losses = losses_utils.compute_weighted_loss(
per_sample_losses,
sample_weight=weights,
reduction=losses_utils.ReductionV2.NONE)
loss_reduction = loss_fn.reduction
# `AUTO` loss reduction defaults to `SUM_OVER_BATCH_SIZE` for all
# compile use cases.
if loss_reduction == losses_utils.ReductionV2.AUTO:
loss_reduction = losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE
# Compute the stateless loss value.
output_loss = losses_utils.reduce_weighted_loss(
weighted_losses, reduction=loss_reduction)
else:
# Compute the stateless loss value for a custom loss class.
# Here we assume that the class takes care of loss reduction
# because if this class returns a vector value we cannot
# differentiate between use case where a custom optimizer
# expects a vector loss value vs unreduced per-sample loss value.
output_loss = loss_fn(targets[i], outs[i], sample_weight=weights)
loss_reduction = losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE
# If the number of outputs is 1 then we don't append the loss metric
# associated with each model output. When there are multiple outputs
# associated with a model, each output's loss is calculated and returned
# as part of the loss_metrics.
if len(model.outputs) > 1:
# Keep track of the stateful output loss result.
output_losses.append(output_loss_metrics[i](output_loss))
# Scale output loss for distribution. For custom losses we assume
# reduction was mean.
if loss_reduction == losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE:
output_loss = losses_utils.scale_loss_for_distribution(output_loss)
total_loss += model._loss_weights_list[i] * output_loss
# Add regularization losses
if custom_losses:
total_loss += losses_utils.scale_loss_for_distribution(
math_ops.add_n(custom_losses))
return outs, total_loss, output_losses, masks
def _process_single_batch(model,
inputs,
targets,
output_loss_metrics=None,
sample_weights=None,
training=False):
"""Calculate the loss and gradient for one input batch.
The model weights are updated if training is set to True.
Arguments:
model: Model whose loss has to be calculated.
inputs: List of input arrays.
targets: List of target arrays.
output_loss_metrics: List of metrics that are used to aggregated output
loss values.
sample_weights: Optional list of sample weight arrays.
training: The boolean represents if the weights of the model are updated.
'fit' methods will set this to True while 'evaluate' methods will
set this to False.
Returns:
output of the model, total loss, the loss and the mask
associated with each output.
Raises:
ValueError: If the model has no loss to optimize.
"""
with backend.eager_learning_phase_scope(1 if training else 0), \
training_utils.RespectCompiledTrainableState(model):
with GradientTape() as tape:
outs, total_loss, output_losses, masks = (
_model_loss(
model,
inputs,
targets,
output_loss_metrics=output_loss_metrics,
sample_weights=sample_weights,
training=training))
if isinstance(model.optimizer, loss_scale_optimizer.LossScaleOptimizer):
scaled_total_loss = model.optimizer.get_scaled_loss(total_loss)
else:
scaled_total_loss = total_loss
if training:
trainable_weights = model.trainable_weights
if trainable_weights:
# TODO(tanzheny) b/132690565: Provide mechanism for user to override
# model.train_on_batch.
if hasattr(model, '_backwards'):
model._backwards(tape, scaled_total_loss)
else:
grads = tape.gradient(scaled_total_loss, trainable_weights)
if isinstance(model.optimizer,
loss_scale_optimizer.LossScaleOptimizer):
grads = model.optimizer.get_unscaled_gradients(grads)
model.optimizer.apply_gradients(zip(grads, trainable_weights))
else:
logging.warning('The list of trainable weights is empty. Make sure that'
' you are not setting model.trainable to False before '
'compiling the model.')
return outs, total_loss, output_losses, masks
def train_on_batch(model,
inputs,
targets,
sample_weights=None,
output_loss_metrics=None):
"""Calculates the loss and gradient updates for one input batch.
Arguments:
model: Model whose loss has to be calculated.
inputs: Input batch data.
targets: Target batch data.
sample_weights: Sample weight batch data.
output_loss_metrics: List of metrics that are used to aggregated output
loss values.
Returns:
Dict with three items:
'total_loss': list with a single tensor for overall loss,
'output_losses': list of tensors for loss corresponding to each of the
model output. Could be a empty list when model has only one output.
'metrics': list of tensors for metric specified.
"""
inputs = training_utils.cast_to_model_input_dtypes(inputs, model)
outs, total_loss, output_losses, masks = (
_process_single_batch(
model,
inputs,
targets,
sample_weights=sample_weights,
training=True,
output_loss_metrics=output_loss_metrics))
if not isinstance(outs, list):
outs = [outs]
metrics_results = _eager_metrics_fn(
model, outs, targets, sample_weights=sample_weights, masks=masks)
total_loss = nest.flatten(total_loss)
return {'total_loss': total_loss,
'output_losses': output_losses,
'metrics': metrics_results}
def test_on_batch(model,
inputs,
targets,
sample_weights=None,
output_loss_metrics=None):
"""Calculates the loss for one input batch.
Arguments:
model: Model whose loss has to be calculated.
inputs: Input batch data.
targets: Target batch data.
sample_weights: Sample weight batch data.
output_loss_metrics: List of metrics that are used to aggregated output
loss values.
Returns:
Dict with three items:
'total_loss': single tensor for overall loss,
'output_losses': list of tensors for loss corresponding to each of the
model output. Could be a empty list when model has only one output.
'metrics': list of tensors for metric specified.
"""
inputs = training_utils.cast_to_model_input_dtypes(inputs, model)
with backend.eager_learning_phase_scope(0):
outs, total_loss, output_losses, masks = (
_model_loss(
model,
inputs,
targets,
sample_weights=sample_weights,
training=False,
output_loss_metrics=output_loss_metrics))
if not isinstance(outs, list):
outs = [outs]
metrics_results = _eager_metrics_fn(
model, outs, targets, sample_weights=sample_weights, masks=masks)
total_loss = nest.flatten(total_loss)
return {'total_loss': total_loss,
'output_losses': output_losses,
'metrics': metrics_results}
| |
# Copyright 2014-2016 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import shutil
import os
import textwrap
import typing as T
import collections
from . import build
from . import coredata
from . import environment
from . import mesonlib
from . import mintro
from . import mlog
from .ast import AstIDGenerator
from .mesonlib import MachineChoice, OptionKey
if T.TYPE_CHECKING:
import argparse
from .coredata import UserOption
def add_arguments(parser: 'argparse.ArgumentParser') -> None:
coredata.register_builtin_arguments(parser)
parser.add_argument('builddir', nargs='?', default='.')
parser.add_argument('--clearcache', action='store_true', default=False,
help='Clear cached state (e.g. found dependencies)')
def make_lower_case(val: T.Any) -> T.Union[str, T.List[T.Any]]: # T.Any because of recursion...
if isinstance(val, bool):
return str(val).lower()
elif isinstance(val, list):
return [make_lower_case(i) for i in val]
else:
return str(val)
class ConfException(mesonlib.MesonException):
pass
class Conf:
def __init__(self, build_dir):
self.build_dir = os.path.abspath(os.path.realpath(build_dir))
if 'meson.build' in [os.path.basename(self.build_dir), self.build_dir]:
self.build_dir = os.path.dirname(self.build_dir)
self.build = None
self.max_choices_line_length = 60
self.name_col = []
self.value_col = []
self.choices_col = []
self.descr_col = []
# XXX: is there a case where this can actually remain false?
self.has_choices = False
self.all_subprojects: T.Set[str] = set()
self.yielding_options: T.Set[OptionKey] = set()
if os.path.isdir(os.path.join(self.build_dir, 'meson-private')):
self.build = build.load(self.build_dir)
self.source_dir = self.build.environment.get_source_dir()
self.coredata = coredata.load(self.build_dir)
self.default_values_only = False
elif os.path.isfile(os.path.join(self.build_dir, environment.build_filename)):
# Make sure that log entries in other parts of meson don't interfere with the JSON output
mlog.disable()
self.source_dir = os.path.abspath(os.path.realpath(self.build_dir))
intr = mintro.IntrospectionInterpreter(self.source_dir, '', 'ninja', visitors = [AstIDGenerator()])
intr.analyze()
# Re-enable logging just in case
mlog.enable()
self.coredata = intr.coredata
self.default_values_only = True
else:
raise ConfException(f'Directory {build_dir} is neither a Meson build directory nor a project source directory.')
def clear_cache(self):
self.coredata.clear_deps_cache()
def set_options(self, options):
self.coredata.set_options(options)
def save(self):
# Do nothing when using introspection
if self.default_values_only:
return
# Only called if something has changed so overwrite unconditionally.
coredata.save(self.coredata, self.build_dir)
# We don't write the build file because any changes to it
# are erased when Meson is executed the next time, i.e. when
# Ninja is run.
def print_aligned(self) -> None:
"""Do the actual printing.
This prints the generated output in an aligned, pretty form. it aims
for a total width of 160 characters, but will use whatever the tty
reports it's value to be. Though this is much wider than the standard
80 characters of terminals, and even than the newer 120, compressing
it to those lengths makes the output hard to read.
Each column will have a specific width, and will be line wrapped.
"""
total_width = shutil.get_terminal_size(fallback=(160, 0))[0]
_col = max(total_width // 5, 20)
four_column = (_col, _col, _col, total_width - (3 * _col))
# In this case we don't have the choices field, so we can redistribute
# the extra 40 characters to val and desc
three_column = (_col, _col * 2, total_width // 2)
for line in zip(self.name_col, self.value_col, self.choices_col, self.descr_col):
if not any(line):
print('')
continue
# This is a header, like `Subproject foo:`,
# We just want to print that and get on with it
if line[0] and not any(line[1:]):
print(line[0])
continue
# wrap will take a long string, and create a list of strings no
# longer than the size given. Then that list can be zipped into, to
# print each line of the output, such the that columns are printed
# to the right width, row by row.
if self.has_choices:
name = textwrap.wrap(line[0], four_column[0])
val = textwrap.wrap(line[1], four_column[1])
choice = textwrap.wrap(line[2], four_column[2])
desc = textwrap.wrap(line[3], four_column[3])
for l in itertools.zip_longest(name, val, choice, desc, fillvalue=''):
# We must use the length modifier here to get even rows, as
# `textwrap.wrap` will only shorten, not lengthen each item
print('{:{widths[0]}} {:{widths[1]}} {:{widths[2]}} {}'.format(*l, widths=four_column))
else:
name = textwrap.wrap(line[0], three_column[0])
val = textwrap.wrap(line[1], three_column[1])
desc = textwrap.wrap(line[3], three_column[2])
for l in itertools.zip_longest(name, val, desc, fillvalue=''):
print('{:{widths[0]}} {:{widths[1]}} {}'.format(*l, widths=three_column))
def split_options_per_subproject(self, options: 'coredata.KeyedOptionDictType') -> T.Dict[str, T.Dict[str, 'UserOption']]:
result: T.Dict[str, T.Dict[str, 'UserOption']] = {}
for k, o in options.items():
subproject = k.subproject
if k.subproject:
k = k.as_root()
if o.yielding and k in options:
self.yielding_options.add(k)
self.all_subprojects.add(subproject)
result.setdefault(subproject, {})[str(k)] = o
return result
def _add_line(self, name: OptionKey, value, choices, descr) -> None:
self.name_col.append(' ' * self.print_margin + str(name))
self.value_col.append(value)
self.choices_col.append(choices)
self.descr_col.append(descr)
def add_option(self, name, descr, value, choices):
if isinstance(value, list):
value = '[{}]'.format(', '.join(make_lower_case(value)))
else:
value = make_lower_case(value)
if choices:
self.has_choices = True
if isinstance(choices, list):
choices_list = make_lower_case(choices)
current = '['
while choices_list:
i = choices_list.pop(0)
if len(current) + len(i) >= self.max_choices_line_length:
self._add_line(name, value, current + ',', descr)
name = ''
value = ''
descr = ''
current = ' '
if len(current) > 1:
current += ', '
current += i
choices = current + ']'
else:
choices = make_lower_case(choices)
else:
choices = ''
self._add_line(name, value, choices, descr)
def add_title(self, title):
titles = {'descr': 'Description', 'value': 'Current Value', 'choices': 'Possible Values'}
if self.default_values_only:
titles['value'] = 'Default Value'
self._add_line('', '', '', '')
self._add_line(title, titles['value'], titles['choices'], titles['descr'])
self._add_line('-' * len(title), '-' * len(titles['value']), '-' * len(titles['choices']), '-' * len(titles['descr']))
def add_section(self, section):
self.print_margin = 0
self._add_line('', '', '', '')
self._add_line(section + ':', '', '', '')
self.print_margin = 2
def print_options(self, title: str, options: 'coredata.KeyedOptionDictType') -> None:
if not options:
return
if title:
self.add_title(title)
for k, o in sorted(options.items()):
printable_value = o.printable_value()
if k in self.yielding_options:
printable_value = '<inherited from main project>'
self.add_option(k, o.description, printable_value, o.choices)
def print_conf(self):
def print_default_values_warning():
mlog.warning('The source directory instead of the build directory was specified.')
mlog.warning('Only the default values for the project are printed, and all command line parameters are ignored.')
if self.default_values_only:
print_default_values_warning()
print('')
print('Core properties:')
print(' Source dir', self.source_dir)
if not self.default_values_only:
print(' Build dir ', self.build_dir)
dir_option_names = set(coredata.BUILTIN_DIR_OPTIONS)
test_option_names = {OptionKey('errorlogs'),
OptionKey('stdsplit')}
dir_options: 'coredata.KeyedOptionDictType' = {}
test_options: 'coredata.KeyedOptionDictType' = {}
core_options: 'coredata.KeyedOptionDictType' = {}
module_options: T.Dict[str, 'coredata.KeyedOptionDictType'] = collections.defaultdict(dict)
for k, v in self.coredata.options.items():
if k in dir_option_names:
dir_options[k] = v
elif k in test_option_names:
test_options[k] = v
elif k.module:
# Ignore module options if we did not use that module during
# configuration.
if self.build and k.module not in self.build.modules:
continue
module_options[k.module][k] = v
elif k.is_builtin():
core_options[k] = v
host_core_options = self.split_options_per_subproject({k: v for k, v in core_options.items() if k.machine is MachineChoice.HOST})
build_core_options = self.split_options_per_subproject({k: v for k, v in core_options.items() if k.machine is MachineChoice.BUILD})
host_compiler_options = self.split_options_per_subproject({k: v for k, v in self.coredata.options.items() if k.is_compiler() and k.machine is MachineChoice.HOST})
build_compiler_options = self.split_options_per_subproject({k: v for k, v in self.coredata.options.items() if k.is_compiler() and k.machine is MachineChoice.BUILD})
project_options = self.split_options_per_subproject({k: v for k, v in self.coredata.options.items() if k.is_project()})
show_build_options = self.default_values_only or self.build.environment.is_cross_build()
self.add_section('Main project options')
self.print_options('Core options', host_core_options[''])
if show_build_options:
self.print_options('', build_core_options[''])
self.print_options('Backend options', {str(k): v for k, v in self.coredata.options.items() if k.is_backend()})
self.print_options('Base options', {str(k): v for k, v in self.coredata.options.items() if k.is_base()})
self.print_options('Compiler options', host_compiler_options.get('', {}))
if show_build_options:
self.print_options('', build_compiler_options.get('', {}))
for mod, mod_options in module_options.items():
self.print_options(f'{mod} module options', mod_options)
self.print_options('Directories', dir_options)
self.print_options('Testing options', test_options)
self.print_options('Project options', project_options.get('', {}))
for subproject in sorted(self.all_subprojects):
if subproject == '':
continue
self.add_section('Subproject ' + subproject)
if subproject in host_core_options:
self.print_options('Core options', host_core_options[subproject])
if subproject in build_core_options and show_build_options:
self.print_options('', build_core_options[subproject])
if subproject in host_compiler_options:
self.print_options('Compiler options', host_compiler_options[subproject])
if subproject in build_compiler_options and show_build_options:
self.print_options('', build_compiler_options[subproject])
if subproject in project_options:
self.print_options('Project options', project_options[subproject])
self.print_aligned()
# Print the warning twice so that the user shouldn't be able to miss it
if self.default_values_only:
print('')
print_default_values_warning()
self.print_nondefault_buildtype_options()
def print_nondefault_buildtype_options(self):
mismatching = self.coredata.get_nondefault_buildtype_args()
if not mismatching:
return
print("\nThe following option(s) have a different value than the build type default\n")
print(' current default')
for m in mismatching:
print(f'{m[0]:21}{m[1]:10}{m[2]:10}')
def run(options):
coredata.parse_cmd_line_options(options)
builddir = os.path.abspath(os.path.realpath(options.builddir))
c = None
try:
c = Conf(builddir)
if c.default_values_only:
c.print_conf()
return 0
save = False
if options.cmd_line_options:
c.set_options(options.cmd_line_options)
coredata.update_cmd_line_file(builddir, options)
save = True
elif options.clearcache:
c.clear_cache()
save = True
else:
c.print_conf()
if save:
c.save()
mintro.update_build_options(c.coredata, c.build.environment.info_dir)
mintro.write_meson_info_file(c.build, [])
except ConfException as e:
print('Meson configurator encountered an error:')
if c is not None and c.build is not None:
mintro.write_meson_info_file(c.build, [e])
raise e
return 0
| |
# -*- coding: utf-8 -*-
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""JSON gsutil Cloud API implementation for Google Cloud Storage."""
from __future__ import absolute_import
import httplib
import json
import os
import socket
import ssl
import time
from apitools.base.py import credentials_lib
from apitools.base.py import encoding
from apitools.base.py import exceptions as apitools_exceptions
from apitools.base.py import http_wrapper as apitools_http_wrapper
from apitools.base.py import transfer as apitools_transfer
from apitools.base.py.util import CalculateWaitForRetry
import boto
from boto import config
from gcs_oauth2_boto_plugin import oauth2_helper
import httplib2
from oauth2client import devshell
from oauth2client import multistore_file
from gslib.cloud_api import AccessDeniedException
from gslib.cloud_api import ArgumentException
from gslib.cloud_api import BadRequestException
from gslib.cloud_api import CloudApi
from gslib.cloud_api import NotEmptyException
from gslib.cloud_api import NotFoundException
from gslib.cloud_api import PreconditionException
from gslib.cloud_api import Preconditions
from gslib.cloud_api import ResumableDownloadException
from gslib.cloud_api import ResumableUploadAbortException
from gslib.cloud_api import ResumableUploadException
from gslib.cloud_api import ResumableUploadStartOverException
from gslib.cloud_api import ServiceException
from gslib.cloud_api_helper import ValidateDstObjectMetadata
from gslib.cred_types import CredTypes
from gslib.exception import CommandException
from gslib.gcs_json_media import BytesTransferredContainer
from gslib.gcs_json_media import DownloadCallbackConnectionClassFactory
from gslib.gcs_json_media import HttpWithDownloadStream
from gslib.gcs_json_media import HttpWithNoRetries
from gslib.gcs_json_media import UploadCallbackConnectionClassFactory
from gslib.gcs_json_media import WrapDownloadHttpRequest
from gslib.gcs_json_media import WrapUploadHttpRequest
from gslib.no_op_credentials import NoOpCredentials
from gslib.progress_callback import ProgressCallbackWithBackoff
from gslib.project_id import PopulateProjectId
from gslib.third_party.storage_apitools import storage_v1_client as apitools_client
from gslib.third_party.storage_apitools import storage_v1_messages as apitools_messages
from gslib.tracker_file import DeleteTrackerFile
from gslib.tracker_file import GetRewriteTrackerFilePath
from gslib.tracker_file import HashRewriteParameters
from gslib.tracker_file import ReadRewriteTrackerFile
from gslib.tracker_file import WriteRewriteTrackerFile
from gslib.translation_helper import CreateBucketNotFoundException
from gslib.translation_helper import CreateObjectNotFoundException
from gslib.translation_helper import DEFAULT_CONTENT_TYPE
from gslib.translation_helper import REMOVE_CORS_CONFIG
from gslib.util import GetBotoConfigFileList
from gslib.util import GetCertsFile
from gslib.util import GetCredentialStoreFilename
from gslib.util import GetGceCredentialCacheFilename
from gslib.util import GetJsonResumableChunkSize
from gslib.util import GetMaxRetryDelay
from gslib.util import GetNewHttp
from gslib.util import GetNumRetries
# Implementation supports only 'gs' URLs, so provider is unused.
# pylint: disable=unused-argument
DEFAULT_GCS_JSON_VERSION = 'v1'
NUM_BUCKETS_PER_LIST_PAGE = 1000
NUM_OBJECTS_PER_LIST_PAGE = 1000
TRANSLATABLE_APITOOLS_EXCEPTIONS = (apitools_exceptions.HttpError,
apitools_exceptions.StreamExhausted,
apitools_exceptions.TransferError,
apitools_exceptions.TransferInvalidError)
# TODO: Distribute these exceptions better through apitools and here.
# Right now, apitools is configured not to handle any exceptions on
# uploads/downloads.
# oauth2_client tries to JSON-decode the response, which can result
# in a ValueError if the response was invalid. Until that is fixed in
# oauth2_client, need to handle it here.
HTTP_TRANSFER_EXCEPTIONS = (apitools_exceptions.TransferRetryError,
apitools_exceptions.BadStatusCodeError,
# TODO: Honor retry-after headers.
apitools_exceptions.RetryAfterError,
apitools_exceptions.RequestError,
httplib.BadStatusLine,
httplib.IncompleteRead,
httplib.ResponseNotReady,
httplib2.ServerNotFoundError,
socket.error,
socket.gaierror,
socket.timeout,
ssl.SSLError,
ValueError)
_VALIDATE_CERTIFICATES_503_MESSAGE = (
"""Service Unavailable. If you have recently changed
https_validate_certificates from True to False in your boto configuration
file, please delete any cached access tokens in your filesystem (at %s)
and try again.""" % GetCredentialStoreFilename())
class GcsJsonApi(CloudApi):
"""Google Cloud Storage JSON implementation of gsutil Cloud API."""
def __init__(self, bucket_storage_uri_class, logger, provider=None,
credentials=None, debug=0):
"""Performs necessary setup for interacting with Google Cloud Storage.
Args:
bucket_storage_uri_class: Unused.
logger: logging.logger for outputting log messages.
provider: Unused. This implementation supports only Google Cloud Storage.
credentials: Credentials to be used for interacting with Google Cloud
Storage.
debug: Debug level for the API implementation (0..3).
"""
# TODO: Plumb host_header for perfdiag / test_perfdiag.
# TODO: Add jitter to apitools' http_wrapper retry mechanism.
super(GcsJsonApi, self).__init__(bucket_storage_uri_class, logger,
provider='gs', debug=debug)
no_op_credentials = False
if not credentials:
loaded_credentials = self._CheckAndGetCredentials(logger)
if not loaded_credentials:
loaded_credentials = NoOpCredentials()
no_op_credentials = True
else:
if isinstance(credentials, NoOpCredentials):
no_op_credentials = True
self.credentials = credentials or loaded_credentials
self.certs_file = GetCertsFile()
self.http = GetNewHttp()
self.http_base = 'https://'
gs_json_host = config.get('Credentials', 'gs_json_host', None)
self.host_base = gs_json_host or 'www.googleapis.com'
if not gs_json_host:
gs_host = config.get('Credentials', 'gs_host', None)
if gs_host:
raise ArgumentException(
'JSON API is selected but gs_json_host is not configured, '
'while gs_host is configured to %s. Please also configure '
'gs_json_host and gs_json_port to match your desired endpoint.'
% gs_host)
gs_json_port = config.get('Credentials', 'gs_json_port', None)
if not gs_json_port:
gs_port = config.get('Credentials', 'gs_port', None)
if gs_port:
raise ArgumentException(
'JSON API is selected but gs_json_port is not configured, '
'while gs_port is configured to %s. Please also configure '
'gs_json_host and gs_json_port to match your desired endpoint.'
% gs_port)
self.host_port = ''
else:
self.host_port = ':' + config.get('Credentials', 'gs_json_port')
self.api_version = config.get('GSUtil', 'json_api_version',
DEFAULT_GCS_JSON_VERSION)
self.url_base = (self.http_base + self.host_base + self.host_port + '/' +
'storage/' + self.api_version + '/')
self.credentials.set_store(
multistore_file.get_credential_storage_custom_string_key(
GetCredentialStoreFilename(), self.api_version))
self.num_retries = GetNumRetries()
log_request = (debug >= 3)
log_response = (debug >= 3)
self.api_client = apitools_client.StorageV1(
url=self.url_base, http=self.http, log_request=log_request,
log_response=log_response, credentials=self.credentials,
version=self.api_version)
self.api_client.num_retries = self.num_retries
if no_op_credentials:
# This API key is not secret and is used to identify gsutil during
# anonymous requests.
self.api_client.AddGlobalParam('key',
u'AIzaSyDnacJHrKma0048b13sh8cgxNUwulubmJM')
def _CheckAndGetCredentials(self, logger):
configured_cred_types = []
try:
if self._HasOauth2UserAccountCreds():
configured_cred_types.append(CredTypes.OAUTH2_USER_ACCOUNT)
if self._HasOauth2ServiceAccountCreds():
configured_cred_types.append(CredTypes.OAUTH2_SERVICE_ACCOUNT)
if len(configured_cred_types) > 1:
# We only allow one set of configured credentials. Otherwise, we're
# choosing one arbitrarily, which can be very confusing to the user
# (e.g., if only one is authorized to perform some action) and can
# also mask errors.
# Because boto merges config files, GCE credentials show up by default
# for GCE VMs. We don't want to fail when a user creates a boto file
# with their own credentials, so in this case we'll use the OAuth2
# user credentials.
failed_cred_type = None
raise CommandException(
('You have multiple types of configured credentials (%s), which is '
'not supported. One common way this happens is if you run gsutil '
'config to create credentials and later run gcloud auth, and '
'create a second set of credentials. Your boto config path is: '
'%s. For more help, see "gsutil help creds".')
% (configured_cred_types, GetBotoConfigFileList()))
failed_cred_type = CredTypes.OAUTH2_USER_ACCOUNT
user_creds = self._GetOauth2UserAccountCreds()
failed_cred_type = CredTypes.OAUTH2_SERVICE_ACCOUNT
service_account_creds = self._GetOauth2ServiceAccountCreds()
failed_cred_type = CredTypes.GCE
gce_creds = self._GetGceCreds()
failed_cred_type = CredTypes.DEVSHELL
devshell_creds = self._GetDevshellCreds()
return user_creds or service_account_creds or gce_creds or devshell_creds
except: # pylint: disable=bare-except
# If we didn't actually try to authenticate because there were multiple
# types of configured credentials, don't emit this warning.
if failed_cred_type:
if os.environ.get('CLOUDSDK_WRAPPER') == '1':
logger.warn(
'Your "%s" credentials are invalid. Please run\n'
' $ gcloud auth login', failed_cred_type)
else:
logger.warn(
'Your "%s" credentials are invalid. For more help, see '
'"gsutil help creds", or re-run the gsutil config command (see '
'"gsutil help config").', failed_cred_type)
# If there's any set of configured credentials, we'll fail if they're
# invalid, rather than silently falling back to anonymous config (as
# boto does). That approach leads to much confusion if users don't
# realize their credentials are invalid.
raise
def _HasOauth2ServiceAccountCreds(self):
return config.has_option('Credentials', 'gs_service_key_file')
def _HasOauth2UserAccountCreds(self):
return config.has_option('Credentials', 'gs_oauth2_refresh_token')
def _HasGceCreds(self):
return config.has_option('GoogleCompute', 'service_account')
def _GetOauth2ServiceAccountCreds(self):
if self._HasOauth2ServiceAccountCreds():
return oauth2_helper.OAuth2ClientFromBotoConfig(
boto.config,
cred_type=CredTypes.OAUTH2_SERVICE_ACCOUNT).GetCredentials()
def _GetOauth2UserAccountCreds(self):
if self._HasOauth2UserAccountCreds():
return oauth2_helper.OAuth2ClientFromBotoConfig(
boto.config).GetCredentials()
def _GetGceCreds(self):
if self._HasGceCreds():
try:
return credentials_lib.GceAssertionCredentials(
cache_filename=GetGceCredentialCacheFilename())
except apitools_exceptions.ResourceUnavailableError, e:
if 'service account' in str(e) and 'does not exist' in str(e):
return None
raise
def _GetDevshellCreds(self):
try:
return devshell.DevshellCredentials()
except devshell.NoDevshellServer:
return None
except:
raise
def _GetNewDownloadHttp(self, download_stream):
return GetNewHttp(http_class=HttpWithDownloadStream, stream=download_stream)
def _GetNewUploadHttp(self):
"""Returns an upload-safe Http object (by disabling httplib2 retries)."""
return GetNewHttp(http_class=HttpWithNoRetries)
def GetBucket(self, bucket_name, provider=None, fields=None):
"""See CloudApi class for function doc strings."""
projection = (apitools_messages.StorageBucketsGetRequest
.ProjectionValueValuesEnum.full)
apitools_request = apitools_messages.StorageBucketsGetRequest(
bucket=bucket_name, projection=projection)
global_params = apitools_messages.StandardQueryParameters()
if fields:
global_params.fields = ','.join(set(fields))
# Here and in list buckets, we have no way of knowing
# whether we requested a field and didn't get it because it didn't exist
# or because we didn't have permission to access it.
try:
return self.api_client.buckets.Get(apitools_request,
global_params=global_params)
except TRANSLATABLE_APITOOLS_EXCEPTIONS, e:
self._TranslateExceptionAndRaise(e, bucket_name=bucket_name)
def PatchBucket(self, bucket_name, metadata, canned_acl=None,
canned_def_acl=None, preconditions=None, provider=None,
fields=None):
"""See CloudApi class for function doc strings."""
projection = (apitools_messages.StorageBucketsPatchRequest
.ProjectionValueValuesEnum.full)
bucket_metadata = metadata
if not preconditions:
preconditions = Preconditions()
# For blank metadata objects, we need to explicitly call
# them out to apitools so it will send/erase them.
apitools_include_fields = []
for metadata_field in ('metadata', 'lifecycle', 'logging', 'versioning',
'website'):
attr = getattr(bucket_metadata, metadata_field, None)
if attr and not encoding.MessageToDict(attr):
setattr(bucket_metadata, metadata_field, None)
apitools_include_fields.append(metadata_field)
if bucket_metadata.cors and bucket_metadata.cors == REMOVE_CORS_CONFIG:
bucket_metadata.cors = []
apitools_include_fields.append('cors')
predefined_acl = None
if canned_acl:
# Must null out existing ACLs to apply a canned ACL.
apitools_include_fields.append('acl')
predefined_acl = (
apitools_messages.StorageBucketsPatchRequest.
PredefinedAclValueValuesEnum(
self._BucketCannedAclToPredefinedAcl(canned_acl)))
predefined_def_acl = None
if canned_def_acl:
# Must null out existing default object ACLs to apply a canned ACL.
apitools_include_fields.append('defaultObjectAcl')
predefined_def_acl = (
apitools_messages.StorageBucketsPatchRequest.
PredefinedDefaultObjectAclValueValuesEnum(
self._ObjectCannedAclToPredefinedAcl(canned_def_acl)))
apitools_request = apitools_messages.StorageBucketsPatchRequest(
bucket=bucket_name, bucketResource=bucket_metadata,
projection=projection,
ifMetagenerationMatch=preconditions.meta_gen_match,
predefinedAcl=predefined_acl,
predefinedDefaultObjectAcl=predefined_def_acl)
global_params = apitools_messages.StandardQueryParameters()
if fields:
global_params.fields = ','.join(set(fields))
with self.api_client.IncludeFields(apitools_include_fields):
try:
return self.api_client.buckets.Patch(apitools_request,
global_params=global_params)
except TRANSLATABLE_APITOOLS_EXCEPTIONS, e:
self._TranslateExceptionAndRaise(e)
def CreateBucket(self, bucket_name, project_id=None, metadata=None,
provider=None, fields=None):
"""See CloudApi class for function doc strings."""
projection = (apitools_messages.StorageBucketsInsertRequest
.ProjectionValueValuesEnum.full)
if not metadata:
metadata = apitools_messages.Bucket()
metadata.name = bucket_name
if metadata.location:
metadata.location = metadata.location.upper()
if metadata.storageClass:
metadata.storageClass = metadata.storageClass.upper()
project_id = PopulateProjectId(project_id)
apitools_request = apitools_messages.StorageBucketsInsertRequest(
bucket=metadata, project=project_id, projection=projection)
global_params = apitools_messages.StandardQueryParameters()
if fields:
global_params.fields = ','.join(set(fields))
try:
return self.api_client.buckets.Insert(apitools_request,
global_params=global_params)
except TRANSLATABLE_APITOOLS_EXCEPTIONS, e:
self._TranslateExceptionAndRaise(e, bucket_name=bucket_name)
def DeleteBucket(self, bucket_name, preconditions=None, provider=None):
"""See CloudApi class for function doc strings."""
if not preconditions:
preconditions = Preconditions()
apitools_request = apitools_messages.StorageBucketsDeleteRequest(
bucket=bucket_name, ifMetagenerationMatch=preconditions.meta_gen_match)
try:
self.api_client.buckets.Delete(apitools_request)
except TRANSLATABLE_APITOOLS_EXCEPTIONS, e:
if isinstance(
self._TranslateApitoolsException(e, bucket_name=bucket_name),
NotEmptyException):
# If bucket is not empty, check to see if versioning is enabled and
# signal that in the exception if it is.
bucket_metadata = self.GetBucket(bucket_name,
fields=['versioning'])
if bucket_metadata.versioning and bucket_metadata.versioning.enabled:
raise NotEmptyException('VersionedBucketNotEmpty',
status=e.status_code)
self._TranslateExceptionAndRaise(e, bucket_name=bucket_name)
def ListBuckets(self, project_id=None, provider=None, fields=None):
"""See CloudApi class for function doc strings."""
projection = (apitools_messages.StorageBucketsListRequest
.ProjectionValueValuesEnum.full)
project_id = PopulateProjectId(project_id)
apitools_request = apitools_messages.StorageBucketsListRequest(
project=project_id, maxResults=NUM_BUCKETS_PER_LIST_PAGE,
projection=projection)
global_params = apitools_messages.StandardQueryParameters()
if fields:
if 'nextPageToken' not in fields:
fields.add('nextPageToken')
global_params.fields = ','.join(set(fields))
try:
bucket_list = self.api_client.buckets.List(apitools_request,
global_params=global_params)
except TRANSLATABLE_APITOOLS_EXCEPTIONS, e:
self._TranslateExceptionAndRaise(e)
for bucket in self._YieldBuckets(bucket_list):
yield bucket
while bucket_list.nextPageToken:
apitools_request = apitools_messages.StorageBucketsListRequest(
project=project_id, pageToken=bucket_list.nextPageToken,
maxResults=NUM_BUCKETS_PER_LIST_PAGE, projection=projection)
try:
bucket_list = self.api_client.buckets.List(apitools_request,
global_params=global_params)
except TRANSLATABLE_APITOOLS_EXCEPTIONS, e:
self._TranslateExceptionAndRaise(e)
for bucket in self._YieldBuckets(bucket_list):
yield bucket
def _YieldBuckets(self, bucket_list):
"""Yields buckets from a list returned by apitools."""
if bucket_list.items:
for bucket in bucket_list.items:
yield bucket
def ListObjects(self, bucket_name, prefix=None, delimiter=None,
all_versions=None, provider=None, fields=None):
"""See CloudApi class for function doc strings."""
projection = (apitools_messages.StorageObjectsListRequest
.ProjectionValueValuesEnum.full)
apitools_request = apitools_messages.StorageObjectsListRequest(
bucket=bucket_name, prefix=prefix, delimiter=delimiter,
versions=all_versions, projection=projection,
maxResults=NUM_OBJECTS_PER_LIST_PAGE)
global_params = apitools_messages.StandardQueryParameters()
if fields:
fields = set(fields)
if 'nextPageToken' not in fields:
fields.add('nextPageToken')
global_params.fields = ','.join(fields)
try:
object_list = self.api_client.objects.List(apitools_request,
global_params=global_params)
except TRANSLATABLE_APITOOLS_EXCEPTIONS, e:
self._TranslateExceptionAndRaise(e, bucket_name=bucket_name)
for object_or_prefix in self._YieldObjectsAndPrefixes(object_list):
yield object_or_prefix
while object_list.nextPageToken:
apitools_request = apitools_messages.StorageObjectsListRequest(
bucket=bucket_name, prefix=prefix, delimiter=delimiter,
versions=all_versions, projection=projection,
pageToken=object_list.nextPageToken,
maxResults=NUM_OBJECTS_PER_LIST_PAGE)
try:
object_list = self.api_client.objects.List(apitools_request,
global_params=global_params)
except TRANSLATABLE_APITOOLS_EXCEPTIONS, e:
self._TranslateExceptionAndRaise(e, bucket_name=bucket_name)
for object_or_prefix in self._YieldObjectsAndPrefixes(object_list):
yield object_or_prefix
def _YieldObjectsAndPrefixes(self, object_list):
if object_list.items:
for cloud_obj in object_list.items:
yield CloudApi.CsObjectOrPrefix(cloud_obj,
CloudApi.CsObjectOrPrefixType.OBJECT)
if object_list.prefixes:
for prefix in object_list.prefixes:
yield CloudApi.CsObjectOrPrefix(prefix,
CloudApi.CsObjectOrPrefixType.PREFIX)
def GetObjectMetadata(self, bucket_name, object_name, generation=None,
provider=None, fields=None):
"""See CloudApi class for function doc strings."""
projection = (apitools_messages.StorageObjectsGetRequest
.ProjectionValueValuesEnum.full)
if generation:
generation = long(generation)
apitools_request = apitools_messages.StorageObjectsGetRequest(
bucket=bucket_name, object=object_name, projection=projection,
generation=generation)
global_params = apitools_messages.StandardQueryParameters()
if fields:
global_params.fields = ','.join(set(fields))
try:
return self.api_client.objects.Get(apitools_request,
global_params=global_params)
except TRANSLATABLE_APITOOLS_EXCEPTIONS, e:
self._TranslateExceptionAndRaise(e, bucket_name=bucket_name,
object_name=object_name,
generation=generation)
def GetObjectMedia(
self, bucket_name, object_name, download_stream,
provider=None, generation=None, object_size=None,
download_strategy=CloudApi.DownloadStrategy.ONE_SHOT, start_byte=0,
end_byte=None, progress_callback=None, serialization_data=None,
digesters=None):
"""See CloudApi class for function doc strings."""
# This implementation will get the object metadata first if we don't pass it
# in via serialization_data.
if generation:
generation = long(generation)
outer_total_size = object_size
if serialization_data:
outer_total_size = json.loads(serialization_data)['total_size']
if progress_callback:
if outer_total_size is None:
raise ArgumentException('Download size is required when callbacks are '
'requested for a download, but no size was '
'provided.')
progress_callback(0, outer_total_size)
bytes_downloaded_container = BytesTransferredContainer()
bytes_downloaded_container.bytes_transferred = start_byte
callback_class_factory = DownloadCallbackConnectionClassFactory(
bytes_downloaded_container, total_size=outer_total_size,
progress_callback=progress_callback, digesters=digesters)
download_http_class = callback_class_factory.GetConnectionClass()
download_http = self._GetNewDownloadHttp(download_stream)
download_http.connections = {'https': download_http_class}
authorized_download_http = self.credentials.authorize(download_http)
WrapDownloadHttpRequest(authorized_download_http)
if serialization_data:
apitools_download = apitools_transfer.Download.FromData(
download_stream, serialization_data, self.api_client.http,
num_retries=self.num_retries)
else:
apitools_download = apitools_transfer.Download.FromStream(
download_stream, auto_transfer=False, total_size=object_size,
num_retries=self.num_retries)
apitools_download.bytes_http = authorized_download_http
apitools_request = apitools_messages.StorageObjectsGetRequest(
bucket=bucket_name, object=object_name, generation=generation)
try:
if download_strategy == CloudApi.DownloadStrategy.RESUMABLE:
# Disable retries in apitools. We will handle them explicitly here.
apitools_download.retry_func = (
apitools_http_wrapper.RethrowExceptionHandler)
return self._PerformResumableDownload(
bucket_name, object_name, download_stream, apitools_request,
apitools_download, bytes_downloaded_container,
generation=generation, start_byte=start_byte, end_byte=end_byte,
serialization_data=serialization_data)
else:
return self._PerformDownload(
bucket_name, object_name, download_stream, apitools_request,
apitools_download, generation=generation, start_byte=start_byte,
end_byte=end_byte, serialization_data=serialization_data)
except TRANSLATABLE_APITOOLS_EXCEPTIONS, e:
self._TranslateExceptionAndRaise(e, bucket_name=bucket_name,
object_name=object_name,
generation=generation)
def _PerformResumableDownload(
self, bucket_name, object_name, download_stream, apitools_request,
apitools_download, bytes_downloaded_container, generation=None,
start_byte=0, end_byte=None, serialization_data=None):
retries = 0
last_progress_byte = start_byte
while retries <= self.num_retries:
try:
return self._PerformDownload(
bucket_name, object_name, download_stream, apitools_request,
apitools_download, generation=generation, start_byte=start_byte,
end_byte=end_byte, serialization_data=serialization_data)
except HTTP_TRANSFER_EXCEPTIONS, e:
start_byte = download_stream.tell()
bytes_downloaded_container.bytes_transferred = start_byte
if start_byte > last_progress_byte:
# We've made progress, so allow a fresh set of retries.
last_progress_byte = start_byte
retries = 0
retries += 1
if retries > self.num_retries:
raise ResumableDownloadException(
'Transfer failed after %d retries. Final exception: %s' %
(self.num_retries, str(e)))
time.sleep(CalculateWaitForRetry(retries, max_wait=GetMaxRetryDelay()))
self.logger.debug(
'Retrying download from byte %s after exception: %s',
start_byte, str(e))
apitools_http_wrapper.RebuildHttpConnections(
apitools_download.bytes_http)
def _PerformDownload(
self, bucket_name, object_name, download_stream, apitools_request,
apitools_download, generation=None, start_byte=0, end_byte=None,
serialization_data=None):
if not serialization_data:
try:
self.api_client.objects.Get(apitools_request,
download=apitools_download)
except TRANSLATABLE_APITOOLS_EXCEPTIONS, e:
self._TranslateExceptionAndRaise(e, bucket_name=bucket_name,
object_name=object_name,
generation=generation)
# Disable apitools' default print callbacks.
def _NoOpCallback(unused_response, unused_download_object):
pass
# TODO: If we have a resumable download with accept-encoding:gzip
# on a object that is compressible but not in gzip form in the cloud,
# on-the-fly compression will gzip the object. In this case if our
# download breaks, future requests will ignore the range header and just
# return the object (gzipped) in its entirety. Ideally, we would unzip
# the bytes that we have locally and send a range request without
# accept-encoding:gzip so that we can download only the (uncompressed) bytes
# that we don't yet have.
# Since bytes_http is created in this function, we don't get the
# user-agent header from api_client's http automatically.
additional_headers = {
'accept-encoding': 'gzip',
'user-agent': self.api_client.user_agent
}
if start_byte or end_byte:
apitools_download.GetRange(additional_headers=additional_headers,
start=start_byte, end=end_byte)
else:
apitools_download.StreamInChunks(
callback=_NoOpCallback, finish_callback=_NoOpCallback,
additional_headers=additional_headers)
return apitools_download.encoding
def PatchObjectMetadata(self, bucket_name, object_name, metadata,
canned_acl=None, generation=None, preconditions=None,
provider=None, fields=None):
"""See CloudApi class for function doc strings."""
projection = (apitools_messages.StorageObjectsPatchRequest
.ProjectionValueValuesEnum.full)
if not preconditions:
preconditions = Preconditions()
if generation:
generation = long(generation)
predefined_acl = None
apitools_include_fields = []
if canned_acl:
# Must null out existing ACLs to apply a canned ACL.
apitools_include_fields.append('acl')
predefined_acl = (
apitools_messages.StorageObjectsPatchRequest.
PredefinedAclValueValuesEnum(
self._ObjectCannedAclToPredefinedAcl(canned_acl)))
apitools_request = apitools_messages.StorageObjectsPatchRequest(
bucket=bucket_name, object=object_name, objectResource=metadata,
generation=generation, projection=projection,
ifGenerationMatch=preconditions.gen_match,
ifMetagenerationMatch=preconditions.meta_gen_match,
predefinedAcl=predefined_acl)
global_params = apitools_messages.StandardQueryParameters()
if fields:
global_params.fields = ','.join(set(fields))
try:
with self.api_client.IncludeFields(apitools_include_fields):
return self.api_client.objects.Patch(apitools_request,
global_params=global_params)
except TRANSLATABLE_APITOOLS_EXCEPTIONS, e:
self._TranslateExceptionAndRaise(e, bucket_name=bucket_name,
object_name=object_name,
generation=generation)
def _UploadObject(self, upload_stream, object_metadata, canned_acl=None,
size=None, preconditions=None, provider=None, fields=None,
serialization_data=None, tracker_callback=None,
progress_callback=None,
apitools_strategy=apitools_transfer.SIMPLE_UPLOAD,
total_size=0):
# pylint: disable=g-doc-args
"""Upload implementation. Cloud API arguments, plus two more.
Additional args:
apitools_strategy: SIMPLE_UPLOAD or RESUMABLE_UPLOAD.
total_size: Total size of the upload; None if it is unknown (streaming).
Returns:
Uploaded object metadata.
"""
# pylint: enable=g-doc-args
ValidateDstObjectMetadata(object_metadata)
predefined_acl = None
if canned_acl:
predefined_acl = (
apitools_messages.StorageObjectsInsertRequest.
PredefinedAclValueValuesEnum(
self._ObjectCannedAclToPredefinedAcl(canned_acl)))
bytes_uploaded_container = BytesTransferredContainer()
if progress_callback and size:
total_size = size
progress_callback(0, size)
callback_class_factory = UploadCallbackConnectionClassFactory(
bytes_uploaded_container, total_size=total_size,
progress_callback=progress_callback)
upload_http = self._GetNewUploadHttp()
upload_http_class = callback_class_factory.GetConnectionClass()
upload_http.connections = {'http': upload_http_class,
'https': upload_http_class}
authorized_upload_http = self.credentials.authorize(upload_http)
WrapUploadHttpRequest(authorized_upload_http)
# Since bytes_http is created in this function, we don't get the
# user-agent header from api_client's http automatically.
additional_headers = {
'user-agent': self.api_client.user_agent
}
try:
content_type = None
apitools_request = None
global_params = None
if not serialization_data:
# This is a new upload, set up initial upload state.
content_type = object_metadata.contentType
if not content_type:
content_type = DEFAULT_CONTENT_TYPE
if not preconditions:
preconditions = Preconditions()
apitools_request = apitools_messages.StorageObjectsInsertRequest(
bucket=object_metadata.bucket, object=object_metadata,
ifGenerationMatch=preconditions.gen_match,
ifMetagenerationMatch=preconditions.meta_gen_match,
predefinedAcl=predefined_acl)
global_params = apitools_messages.StandardQueryParameters()
if fields:
global_params.fields = ','.join(set(fields))
if apitools_strategy == apitools_transfer.SIMPLE_UPLOAD:
# One-shot upload.
apitools_upload = apitools_transfer.Upload(
upload_stream, content_type, total_size=size, auto_transfer=True,
num_retries=self.num_retries)
apitools_upload.strategy = apitools_strategy
apitools_upload.bytes_http = authorized_upload_http
return self.api_client.objects.Insert(
apitools_request,
upload=apitools_upload,
global_params=global_params)
else: # Resumable upload.
return self._PerformResumableUpload(
upload_stream, authorized_upload_http, content_type, size,
serialization_data, apitools_strategy, apitools_request,
global_params, bytes_uploaded_container, tracker_callback,
additional_headers, progress_callback)
except TRANSLATABLE_APITOOLS_EXCEPTIONS, e:
self._TranslateExceptionAndRaise(e, bucket_name=object_metadata.bucket,
object_name=object_metadata.name)
def _PerformResumableUpload(
self, upload_stream, authorized_upload_http, content_type, size,
serialization_data, apitools_strategy, apitools_request, global_params,
bytes_uploaded_container, tracker_callback, addl_headers,
progress_callback):
try:
if serialization_data:
# Resuming an existing upload.
apitools_upload = apitools_transfer.Upload.FromData(
upload_stream, serialization_data, self.api_client.http,
num_retries=self.num_retries)
apitools_upload.chunksize = GetJsonResumableChunkSize()
apitools_upload.bytes_http = authorized_upload_http
else:
# New resumable upload.
apitools_upload = apitools_transfer.Upload(
upload_stream, content_type, total_size=size,
chunksize=GetJsonResumableChunkSize(), auto_transfer=False,
num_retries=self.num_retries)
apitools_upload.strategy = apitools_strategy
apitools_upload.bytes_http = authorized_upload_http
self.api_client.objects.Insert(
apitools_request,
upload=apitools_upload,
global_params=global_params)
# Disable retries in apitools. We will handle them explicitly here.
apitools_upload.retry_func = (
apitools_http_wrapper.RethrowExceptionHandler)
# Disable apitools' default print callbacks.
def _NoOpCallback(unused_response, unused_upload_object):
pass
# If we're resuming an upload, apitools has at this point received
# from the server how many bytes it already has. Update our
# callback class with this information.
bytes_uploaded_container.bytes_transferred = apitools_upload.progress
if tracker_callback:
tracker_callback(json.dumps(apitools_upload.serialization_data))
retries = 0
last_progress_byte = apitools_upload.progress
while retries <= self.num_retries:
try:
# TODO: On retry, this will seek to the bytes that the server has,
# causing the hash to be recalculated. Make HashingFileUploadWrapper
# save a digest according to json_resumable_chunk_size.
if size:
# If size is known, we can send it all in one request and avoid
# making a round-trip per chunk.
http_response = apitools_upload.StreamMedia(
callback=_NoOpCallback, finish_callback=_NoOpCallback,
additional_headers=addl_headers)
else:
# Otherwise it's a streaming request and we need to ensure that we
# send the bytes in chunks so that we can guarantee that we never
# need to seek backwards more than our buffer (and also that the
# chunks are aligned to 256KB).
http_response = apitools_upload.StreamInChunks(
callback=_NoOpCallback, finish_callback=_NoOpCallback,
additional_headers=addl_headers)
processed_response = self.api_client.objects.ProcessHttpResponse(
self.api_client.objects.GetMethodConfig('Insert'), http_response)
if size is None and progress_callback:
# Make final progress callback; total size should now be known.
# This works around the fact the send function counts header bytes.
# However, this will make the progress appear to go slightly
# backwards at the end.
progress_callback(apitools_upload.total_size,
apitools_upload.total_size)
return processed_response
except HTTP_TRANSFER_EXCEPTIONS, e:
apitools_http_wrapper.RebuildHttpConnections(
apitools_upload.bytes_http)
while retries <= self.num_retries:
try:
# TODO: Simulate the refresh case in tests. Right now, our
# mocks are not complex enough to simulate a failure.
apitools_upload.RefreshResumableUploadState()
start_byte = apitools_upload.progress
bytes_uploaded_container.bytes_transferred = start_byte
break
except HTTP_TRANSFER_EXCEPTIONS, e2:
apitools_http_wrapper.RebuildHttpConnections(
apitools_upload.bytes_http)
retries += 1
if retries > self.num_retries:
raise ResumableUploadException(
'Transfer failed after %d retries. Final exception: %s' %
(self.num_retries, e2))
time.sleep(
CalculateWaitForRetry(retries, max_wait=GetMaxRetryDelay()))
if start_byte > last_progress_byte:
# We've made progress, so allow a fresh set of retries.
last_progress_byte = start_byte
retries = 0
else:
retries += 1
if retries > self.num_retries:
raise ResumableUploadException(
'Transfer failed after %d retries. Final exception: %s' %
(self.num_retries, e))
time.sleep(
CalculateWaitForRetry(retries, max_wait=GetMaxRetryDelay()))
self.logger.debug(
'Retrying upload from byte %s after exception: %s.',
start_byte, str(e))
except TRANSLATABLE_APITOOLS_EXCEPTIONS, e:
resumable_ex = self._TranslateApitoolsResumableUploadException(e)
if resumable_ex:
raise resumable_ex
else:
raise
def UploadObject(self, upload_stream, object_metadata, canned_acl=None,
size=None, preconditions=None, progress_callback=None,
provider=None, fields=None):
"""See CloudApi class for function doc strings."""
return self._UploadObject(
upload_stream, object_metadata, canned_acl=canned_acl,
size=size, preconditions=preconditions,
progress_callback=progress_callback, fields=fields,
apitools_strategy=apitools_transfer.SIMPLE_UPLOAD)
def UploadObjectStreaming(self, upload_stream, object_metadata,
canned_acl=None, preconditions=None,
progress_callback=None, provider=None,
fields=None):
"""See CloudApi class for function doc strings."""
# Streaming indicated by not passing a size.
# Resumable capabilities are present up to the resumable chunk size using
# a buffered stream.
return self._UploadObject(
upload_stream, object_metadata, canned_acl=canned_acl,
preconditions=preconditions, progress_callback=progress_callback,
fields=fields, apitools_strategy=apitools_transfer.RESUMABLE_UPLOAD,
total_size=None)
def UploadObjectResumable(
self, upload_stream, object_metadata, canned_acl=None, preconditions=None,
provider=None, fields=None, size=None, serialization_data=None,
tracker_callback=None, progress_callback=None):
"""See CloudApi class for function doc strings."""
return self._UploadObject(
upload_stream, object_metadata, canned_acl=canned_acl,
preconditions=preconditions, fields=fields, size=size,
serialization_data=serialization_data,
tracker_callback=tracker_callback, progress_callback=progress_callback,
apitools_strategy=apitools_transfer.RESUMABLE_UPLOAD)
def CopyObject(self, src_obj_metadata, dst_obj_metadata, src_generation=None,
canned_acl=None, preconditions=None, progress_callback=None,
max_bytes_per_call=None, provider=None, fields=None):
"""See CloudApi class for function doc strings."""
ValidateDstObjectMetadata(dst_obj_metadata)
predefined_acl = None
if canned_acl:
predefined_acl = (
apitools_messages.StorageObjectsRewriteRequest.
DestinationPredefinedAclValueValuesEnum(
self._ObjectCannedAclToPredefinedAcl(canned_acl)))
if src_generation:
src_generation = long(src_generation)
if not preconditions:
preconditions = Preconditions()
projection = (apitools_messages.StorageObjectsRewriteRequest.
ProjectionValueValuesEnum.full)
global_params = apitools_messages.StandardQueryParameters()
if fields:
# Rewrite returns the resultant object under the 'resource' field.
new_fields = set(['done', 'objectSize', 'rewriteToken',
'totalBytesRewritten'])
for field in fields:
new_fields.add('resource/' + field)
global_params.fields = ','.join(set(new_fields))
# Check to see if we are resuming a rewrite.
tracker_file_name = GetRewriteTrackerFilePath(
src_obj_metadata.bucket, src_obj_metadata.name, dst_obj_metadata.bucket,
dst_obj_metadata.name, 'JSON')
rewrite_params_hash = HashRewriteParameters(
src_obj_metadata, dst_obj_metadata, projection,
src_generation=src_generation, gen_match=preconditions.gen_match,
meta_gen_match=preconditions.meta_gen_match,
canned_acl=predefined_acl, fields=global_params.fields,
max_bytes_per_call=max_bytes_per_call)
resume_rewrite_token = ReadRewriteTrackerFile(tracker_file_name,
rewrite_params_hash)
progress_cb_with_backoff = None
try:
last_bytes_written = 0L
while True:
apitools_request = apitools_messages.StorageObjectsRewriteRequest(
sourceBucket=src_obj_metadata.bucket,
sourceObject=src_obj_metadata.name,
destinationBucket=dst_obj_metadata.bucket,
destinationObject=dst_obj_metadata.name,
projection=projection, object=dst_obj_metadata,
sourceGeneration=src_generation,
ifGenerationMatch=preconditions.gen_match,
ifMetagenerationMatch=preconditions.meta_gen_match,
destinationPredefinedAcl=predefined_acl,
rewriteToken=resume_rewrite_token,
maxBytesRewrittenPerCall=max_bytes_per_call)
rewrite_response = self.api_client.objects.Rewrite(
apitools_request, global_params=global_params)
bytes_written = long(rewrite_response.totalBytesRewritten)
if progress_callback and not progress_cb_with_backoff:
progress_cb_with_backoff = ProgressCallbackWithBackoff(
long(rewrite_response.objectSize), progress_callback)
if progress_cb_with_backoff:
progress_cb_with_backoff.Progress(
bytes_written - last_bytes_written)
if rewrite_response.done:
break
elif not resume_rewrite_token:
# Save the token and make a tracker file if they don't already exist.
resume_rewrite_token = rewrite_response.rewriteToken
WriteRewriteTrackerFile(tracker_file_name, rewrite_params_hash,
rewrite_response.rewriteToken)
last_bytes_written = bytes_written
DeleteTrackerFile(tracker_file_name)
return rewrite_response.resource
except TRANSLATABLE_APITOOLS_EXCEPTIONS, e:
self._TranslateExceptionAndRaise(e, bucket_name=dst_obj_metadata.bucket,
object_name=dst_obj_metadata.name)
def DeleteObject(self, bucket_name, object_name, preconditions=None,
generation=None, provider=None):
"""See CloudApi class for function doc strings."""
if not preconditions:
preconditions = Preconditions()
if generation:
generation = long(generation)
apitools_request = apitools_messages.StorageObjectsDeleteRequest(
bucket=bucket_name, object=object_name, generation=generation,
ifGenerationMatch=preconditions.gen_match,
ifMetagenerationMatch=preconditions.meta_gen_match)
try:
return self.api_client.objects.Delete(apitools_request)
except TRANSLATABLE_APITOOLS_EXCEPTIONS, e:
self._TranslateExceptionAndRaise(e, bucket_name=bucket_name,
object_name=object_name,
generation=generation)
def ComposeObject(self, src_objs_metadata, dst_obj_metadata,
preconditions=None, provider=None, fields=None):
"""See CloudApi class for function doc strings."""
ValidateDstObjectMetadata(dst_obj_metadata)
dst_obj_name = dst_obj_metadata.name
dst_obj_metadata.name = None
dst_bucket_name = dst_obj_metadata.bucket
dst_obj_metadata.bucket = None
if not dst_obj_metadata.contentType:
dst_obj_metadata.contentType = DEFAULT_CONTENT_TYPE
if not preconditions:
preconditions = Preconditions()
global_params = apitools_messages.StandardQueryParameters()
if fields:
global_params.fields = ','.join(set(fields))
src_objs_compose_request = apitools_messages.ComposeRequest(
sourceObjects=src_objs_metadata, destination=dst_obj_metadata)
apitools_request = apitools_messages.StorageObjectsComposeRequest(
composeRequest=src_objs_compose_request,
destinationBucket=dst_bucket_name,
destinationObject=dst_obj_name,
ifGenerationMatch=preconditions.gen_match,
ifMetagenerationMatch=preconditions.meta_gen_match)
try:
return self.api_client.objects.Compose(apitools_request,
global_params=global_params)
except TRANSLATABLE_APITOOLS_EXCEPTIONS, e:
# We can't be sure which object was missing in the 404 case.
if isinstance(e, apitools_exceptions.HttpError) and e.status_code == 404:
raise NotFoundException('One of the source objects does not exist.')
else:
self._TranslateExceptionAndRaise(e)
def WatchBucket(self, bucket_name, address, channel_id, token=None,
provider=None, fields=None):
"""See CloudApi class for function doc strings."""
projection = (apitools_messages.StorageObjectsWatchAllRequest
.ProjectionValueValuesEnum.full)
channel = apitools_messages.Channel(address=address, id=channel_id,
token=token, type='WEB_HOOK')
apitools_request = apitools_messages.StorageObjectsWatchAllRequest(
bucket=bucket_name, channel=channel, projection=projection)
global_params = apitools_messages.StandardQueryParameters()
if fields:
global_params.fields = ','.join(set(fields))
try:
return self.api_client.objects.WatchAll(apitools_request,
global_params=global_params)
except TRANSLATABLE_APITOOLS_EXCEPTIONS, e:
self._TranslateExceptionAndRaise(e, bucket_name=bucket_name)
def StopChannel(self, channel_id, resource_id, provider=None):
"""See CloudApi class for function doc strings."""
channel = apitools_messages.Channel(id=channel_id, resourceId=resource_id)
try:
self.api_client.channels.Stop(channel)
except TRANSLATABLE_APITOOLS_EXCEPTIONS, e:
self._TranslateExceptionAndRaise(e)
def _BucketCannedAclToPredefinedAcl(self, canned_acl_string):
"""Translates the input string to a bucket PredefinedAcl string.
Args:
canned_acl_string: Canned ACL string.
Returns:
String that can be used as a query parameter with the JSON API. This
corresponds to a flavor of *PredefinedAclValueValuesEnum and can be
used as input to apitools requests that affect bucket access controls.
"""
# XML : JSON
translation_dict = {
None: None,
'authenticated-read': 'authenticatedRead',
'private': 'private',
'project-private': 'projectPrivate',
'public-read': 'publicRead',
'public-read-write': 'publicReadWrite'
}
if canned_acl_string in translation_dict:
return translation_dict[canned_acl_string]
raise ArgumentException('Invalid canned ACL %s' % canned_acl_string)
def _ObjectCannedAclToPredefinedAcl(self, canned_acl_string):
"""Translates the input string to an object PredefinedAcl string.
Args:
canned_acl_string: Canned ACL string.
Returns:
String that can be used as a query parameter with the JSON API. This
corresponds to a flavor of *PredefinedAclValueValuesEnum and can be
used as input to apitools requests that affect object access controls.
"""
# XML : JSON
translation_dict = {
None: None,
'authenticated-read': 'authenticatedRead',
'bucket-owner-read': 'bucketOwnerRead',
'bucket-owner-full-control': 'bucketOwnerFullControl',
'private': 'private',
'project-private': 'projectPrivate',
'public-read': 'publicRead'
}
if canned_acl_string in translation_dict:
return translation_dict[canned_acl_string]
raise ArgumentException('Invalid canned ACL %s' % canned_acl_string)
def _TranslateExceptionAndRaise(self, e, bucket_name=None, object_name=None,
generation=None):
"""Translates an HTTP exception and raises the translated or original value.
Args:
e: Any Exception.
bucket_name: Optional bucket name in request that caused the exception.
object_name: Optional object name in request that caused the exception.
generation: Optional generation in request that caused the exception.
Raises:
Translated CloudApi exception, or the original exception if it was not
translatable.
"""
translated_exception = self._TranslateApitoolsException(
e, bucket_name=bucket_name, object_name=object_name,
generation=generation)
if translated_exception:
raise translated_exception
else:
raise
def _GetMessageFromHttpError(self, http_error):
if isinstance(http_error, apitools_exceptions.HttpError):
if getattr(http_error, 'content', None):
try:
json_obj = json.loads(http_error.content)
if 'error' in json_obj and 'message' in json_obj['error']:
return json_obj['error']['message']
except Exception: # pylint: disable=broad-except
# If we couldn't decode anything, just leave the message as None.
pass
def _TranslateApitoolsResumableUploadException(
self, e, bucket_name=None, object_name=None, generation=None):
if isinstance(e, apitools_exceptions.HttpError):
message = self._GetMessageFromHttpError(e)
if (e.status_code == 503 and
self.http.disable_ssl_certificate_validation):
return ServiceException(_VALIDATE_CERTIFICATES_503_MESSAGE,
status=e.status_code)
elif e.status_code >= 500:
return ResumableUploadException(
message or 'Server Error', status=e.status_code)
elif e.status_code == 429:
return ResumableUploadException(
message or 'Too Many Requests', status=e.status_code)
elif e.status_code == 410:
return ResumableUploadStartOverException(
message or 'Bad Request', status=e.status_code)
elif e.status_code >= 400:
return ResumableUploadAbortException(
message or 'Bad Request', status=e.status_code)
if isinstance(e, apitools_exceptions.StreamExhausted):
return ResumableUploadAbortException(e.message)
if (isinstance(e, apitools_exceptions.TransferError) and
('Aborting transfer' in e.message or
'Not enough bytes in stream' in e.message or
'additional bytes left in stream' in e.message)):
return ResumableUploadAbortException(e.message)
def _TranslateApitoolsException(self, e, bucket_name=None, object_name=None,
generation=None):
"""Translates apitools exceptions into their gsutil Cloud Api equivalents.
Args:
e: Any exception in TRANSLATABLE_APITOOLS_EXCEPTIONS.
bucket_name: Optional bucket name in request that caused the exception.
object_name: Optional object name in request that caused the exception.
generation: Optional generation in request that caused the exception.
Returns:
CloudStorageApiServiceException for translatable exceptions, None
otherwise.
"""
if isinstance(e, apitools_exceptions.HttpError):
message = self._GetMessageFromHttpError(e)
if e.status_code == 400:
# It is possible that the Project ID is incorrect. Unfortunately the
# JSON API does not give us much information about what part of the
# request was bad.
return BadRequestException(message or 'Bad Request',
status=e.status_code)
elif e.status_code == 401:
if 'Login Required' in str(e):
return AccessDeniedException(
message or 'Access denied: login required.',
status=e.status_code)
elif e.status_code == 403:
if 'The account for the specified project has been disabled' in str(e):
return AccessDeniedException(message or 'Account disabled.',
status=e.status_code)
elif 'Daily Limit for Unauthenticated Use Exceeded' in str(e):
return AccessDeniedException(
message or 'Access denied: quota exceeded. '
'Is your project ID valid?',
status=e.status_code)
elif 'The bucket you tried to delete was not empty.' in str(e):
return NotEmptyException('BucketNotEmpty (%s)' % bucket_name,
status=e.status_code)
elif ('The bucket you tried to create requires domain ownership '
'verification.' in str(e)):
return AccessDeniedException(
'The bucket you tried to create requires domain ownership '
'verification. Please see '
'https://developers.google.com/storage/docs/bucketnaming'
'?hl=en#verification for more details.', status=e.status_code)
elif 'User Rate Limit Exceeded' in str(e):
return AccessDeniedException('Rate limit exceeded. Please retry this '
'request later.', status=e.status_code)
elif 'Access Not Configured' in str(e):
return AccessDeniedException(
'Access Not Configured. Please go to the Google Developers '
'Console (https://cloud.google.com/console#/project) for your '
'project, select APIs and Auth and enable the '
'Google Cloud Storage JSON API.',
status=e.status_code)
else:
return AccessDeniedException(message or e.message,
status=e.status_code)
elif e.status_code == 404:
if bucket_name:
if object_name:
return CreateObjectNotFoundException(e.status_code, self.provider,
bucket_name, object_name,
generation=generation)
return CreateBucketNotFoundException(e.status_code, self.provider,
bucket_name)
return NotFoundException(e.message, status=e.status_code)
elif e.status_code == 409 and bucket_name:
if 'The bucket you tried to delete was not empty.' in str(e):
return NotEmptyException('BucketNotEmpty (%s)' % bucket_name,
status=e.status_code)
return ServiceException(
'Bucket %s already exists.' % bucket_name, status=e.status_code)
elif e.status_code == 412:
return PreconditionException(message, status=e.status_code)
elif (e.status_code == 503 and
not self.http.disable_ssl_certificate_validation):
return ServiceException(_VALIDATE_CERTIFICATES_503_MESSAGE,
status=e.status_code)
return ServiceException(message, status=e.status_code)
elif isinstance(e, apitools_exceptions.TransferInvalidError):
return ServiceException('Transfer invalid (possible encoding error: %s)'
% str(e))
| |
# coding=utf-8
# Copyright 2012 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 NTT DOCOMO, INC.
# Copyright 2014 International Business Machines Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
IPMI power manager driver.
Uses the 'ipmitool' command (http://ipmitool.sourceforge.net/) to remotely
manage hardware. This includes setting the boot device, getting a
serial-over-LAN console, and controlling the power state of the machine.
NOTE THAT CERTAIN DISTROS MAY INSTALL openipmi BY DEFAULT, INSTEAD OF ipmitool,
WHICH PROVIDES DIFFERENT COMMAND-LINE OPTIONS AND *IS NOT SUPPORTED* BY THIS
DRIVER.
"""
import contextlib
import os
import re
import tempfile
import time
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_utils import excutils
from ironic.common import boot_devices
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common.i18n import _LE
from ironic.common.i18n import _LW
from ironic.common import states
from ironic.common import utils
from ironic.conductor import task_manager
from ironic.drivers import base
from ironic.drivers.modules import console_utils
from ironic.openstack.common import log as logging
from ironic.openstack.common import loopingcall
CONF = cfg.CONF
CONF.import_opt('retry_timeout',
'ironic.drivers.modules.ipminative',
group='ipmi')
CONF.import_opt('min_command_interval',
'ironic.drivers.modules.ipminative',
group='ipmi')
LOG = logging.getLogger(__name__)
VALID_PRIV_LEVELS = ['ADMINISTRATOR', 'CALLBACK', 'OPERATOR', 'USER']
REQUIRED_PROPERTIES = {
'ipmi_address': _("IP address or hostname of the node. Required.")
}
OPTIONAL_PROPERTIES = {
'ipmi_password': _("password. Optional."),
'ipmi_priv_level': _("privilege level; default is ADMINISTRATOR. One of "
"%s. Optional.") % ', '.join(VALID_PRIV_LEVELS),
'ipmi_username': _("username; default is NULL user. Optional."),
'ipmi_bridging': _("bridging_type; default is \"no\". One of \"single\", "
"\"dual\", \"no\". Optional."),
'ipmi_transit_channel': _("transit channel for bridged request. Required "
"only if ipmi_bridging is set to \"dual\"."),
'ipmi_transit_address': _("transit address for bridged request. Required "
"only if ipmi_bridging is set to \"dual\"."),
'ipmi_target_channel': _("destination channel for bridged request. "
"Required only if ipmi_bridging is set to "
"\"single\" or \"dual\"."),
'ipmi_target_address': _("destination address for bridged request. "
"Required only if ipmi_bridging is set "
"to \"single\" or \"dual\"."),
'ipmi_local_address': _("local IPMB address for bridged requests. "
"Used only if ipmi_bridging is set "
"to \"single\" or \"dual\". Optional.")
}
COMMON_PROPERTIES = REQUIRED_PROPERTIES.copy()
COMMON_PROPERTIES.update(OPTIONAL_PROPERTIES)
CONSOLE_PROPERTIES = {
'ipmi_terminal_port': _("node's UDP port to connect to. Only required for "
"console access.")
}
BRIDGING_OPTIONS = [('local_address', '-m'),
('transit_channel', '-B'), ('transit_address', '-T'),
('target_channel', '-b'), ('target_address', '-t')]
LAST_CMD_TIME = {}
TIMING_SUPPORT = None
SINGLE_BRIDGE_SUPPORT = None
DUAL_BRIDGE_SUPPORT = None
ipmitool_command_options = {
'timing': ['ipmitool', '-N', '0', '-R', '0', '-h'],
'single_bridge': ['ipmitool', '-m', '0', '-b', '0', '-t', '0', '-h'],
'dual_bridge': ['ipmitool', '-m', '0', '-b', '0', '-t', '0',
'-B', '0', '-T', '0', '-h']}
# Note(TheJulia): This string is hardcoded in ipmitool's lanplus driver
# and is substituted in return for the error code received from the IPMI
# controller. As of 1.8.15, no internationalization support appears to
# be in ipmitool which means the string should always be returned in this
# form regardless of locale.
IPMITOOL_RETRYABLE_FAILURES = ['insufficient resources for session']
def _check_option_support(options):
"""Checks if the specific ipmitool options are supported on host.
This method updates the module-level variables indicating whether
an option is supported so that it is accessible by any driver
interface class in this module. It is intended to be called from
the __init__ method of such classes only.
:param options: list of ipmitool options to be checked
:raises: OSError
"""
for opt in options:
if _is_option_supported(opt) is None:
try:
cmd = ipmitool_command_options[opt]
out, err = utils.execute(*cmd)
except processutils.ProcessExecutionError:
# the local ipmitool does not support the command.
_is_option_supported(opt, False)
else:
# looks like ipmitool supports the command.
_is_option_supported(opt, True)
def _is_option_supported(option, is_supported=None):
"""Indicates whether the particular ipmitool option is supported.
:param option: specific ipmitool option
:param is_supported: Optional Boolean. when specified, this value
is assigned to the module-level variable indicating
whether the option is supported. Used only if a value
is not already assigned.
:returns: True, indicates the option is supported
:returns: False, indicates the option is not supported
:returns: None, indicates that it is not aware whether the option
is supported
"""
global SINGLE_BRIDGE_SUPPORT
global DUAL_BRIDGE_SUPPORT
global TIMING_SUPPORT
if option == 'single_bridge':
if (SINGLE_BRIDGE_SUPPORT is None) and (is_supported is not None):
SINGLE_BRIDGE_SUPPORT = is_supported
return SINGLE_BRIDGE_SUPPORT
elif option == 'dual_bridge':
if (DUAL_BRIDGE_SUPPORT is None) and (is_supported is not None):
DUAL_BRIDGE_SUPPORT = is_supported
return DUAL_BRIDGE_SUPPORT
elif option == 'timing':
if (TIMING_SUPPORT is None) and (is_supported is not None):
TIMING_SUPPORT = is_supported
return TIMING_SUPPORT
def _console_pwfile_path(uuid):
"""Return the file path for storing the ipmi password for a console."""
file_name = "%(uuid)s.pw" % {'uuid': uuid}
return os.path.join(tempfile.gettempdir(), file_name)
@contextlib.contextmanager
def _make_password_file(password):
"""Makes a temporary file that contains the password.
:param password: the password
:returns: the absolute pathname of the temporary file
:raises: PasswordFileFailedToCreate from creating or writing to the
temporary file
"""
f = None
try:
f = tempfile.NamedTemporaryFile(mode='w', dir=CONF.tempdir)
f.write(str(password))
f.flush()
except (IOError, OSError) as exc:
if f is not None:
f.close()
raise exception.PasswordFileFailedToCreate(error=exc)
except Exception:
if f is not None:
f.close()
raise
try:
# NOTE(jlvillal): This yield can not be in the try/except block above
# because an exception by the caller of this function would then get
# changed to a PasswordFileFailedToCreate exception which would mislead
# about the problem and its cause.
yield f.name
finally:
if f is not None:
f.close()
def _parse_driver_info(node):
"""Gets the parameters required for ipmitool to access the node.
:param node: the Node of interest.
:returns: dictionary of parameters.
:raises: InvalidParameterValue when an invalid value is specified
:raises: MissingParameterValue when a required ipmi parameter is missing.
"""
info = node.driver_info or {}
bridging_types = ['single', 'dual']
missing_info = [key for key in REQUIRED_PROPERTIES if not info.get(key)]
if missing_info:
raise exception.MissingParameterValue(_(
"Missing the following IPMI credentials in node's"
" driver_info: %s.") % missing_info)
address = info.get('ipmi_address')
username = info.get('ipmi_username')
password = info.get('ipmi_password')
port = info.get('ipmi_terminal_port')
priv_level = info.get('ipmi_priv_level', 'ADMINISTRATOR')
bridging_type = info.get('ipmi_bridging', 'no')
local_address = info.get('ipmi_local_address')
transit_channel = info.get('ipmi_transit_channel')
transit_address = info.get('ipmi_transit_address')
target_channel = info.get('ipmi_target_channel')
target_address = info.get('ipmi_target_address')
if port:
try:
port = int(port)
except ValueError:
raise exception.InvalidParameterValue(_(
"IPMI terminal port is not an integer."))
# check if ipmi_bridging has proper value
if bridging_type == 'no':
# if bridging is not selected, then set all bridging params to None
(local_address, transit_channel, transit_address, target_channel,
target_address) = (None,) * 5
elif bridging_type in bridging_types:
# check if the particular bridging option is supported on host
if not _is_option_supported('%s_bridge' % bridging_type):
raise exception.InvalidParameterValue(_(
"Value for ipmi_bridging is provided as %s, but IPMI "
"bridging is not supported by the IPMI utility installed "
"on host. Ensure ipmitool version is > 1.8.11"
) % bridging_type)
# ensure that all the required parameters are provided
params_undefined = [param for param, value in [
("ipmi_target_channel", target_channel),
('ipmi_target_address', target_address)] if value is None]
if bridging_type == 'dual':
params_undefined2 = [param for param, value in [
("ipmi_transit_channel", transit_channel),
('ipmi_transit_address', transit_address)
] if value is None]
params_undefined.extend(params_undefined2)
else:
# if single bridging was selected, set dual bridge params to None
transit_channel = transit_address = None
# If the required parameters were not provided,
# raise an exception
if params_undefined:
raise exception.MissingParameterValue(_(
"%(param)s not provided") % {'param': params_undefined})
else:
raise exception.InvalidParameterValue(_(
"Invalid value for ipmi_bridging: %(bridging_type)s,"
" the valid value can be one of: %(bridging_types)s"
) % {'bridging_type': bridging_type,
'bridging_types': bridging_types + ['no']})
if priv_level not in VALID_PRIV_LEVELS:
valid_priv_lvls = ', '.join(VALID_PRIV_LEVELS)
raise exception.InvalidParameterValue(_(
"Invalid privilege level value:%(priv_level)s, the valid value"
" can be one of %(valid_levels)s") %
{'priv_level': priv_level, 'valid_levels': valid_priv_lvls})
return {
'address': address,
'username': username,
'password': password,
'port': port,
'uuid': node.uuid,
'priv_level': priv_level,
'local_address': local_address,
'transit_channel': transit_channel,
'transit_address': transit_address,
'target_channel': target_channel,
'target_address': target_address
}
def _exec_ipmitool(driver_info, command):
"""Execute the ipmitool command.
This uses the lanplus interface to communicate with the BMC device driver.
:param driver_info: the ipmitool parameters for accessing a node.
:param command: the ipmitool command to be executed.
:returns: (stdout, stderr) from executing the command.
:raises: PasswordFileFailedToCreate from creating or writing to the
temporary file.
:raises: processutils.ProcessExecutionError from executing the command.
"""
args = ['ipmitool',
'-I',
'lanplus',
'-H',
driver_info['address'],
'-L', driver_info['priv_level']
]
if driver_info['username']:
args.append('-U')
args.append(driver_info['username'])
for name, option in BRIDGING_OPTIONS:
if driver_info[name] is not None:
args.append(option)
args.append(driver_info[name])
# specify retry timing more precisely, if supported
num_tries = max(
(CONF.ipmi.retry_timeout // CONF.ipmi.min_command_interval), 1)
if _is_option_supported('timing'):
args.append('-R')
args.append(str(num_tries))
args.append('-N')
args.append(str(CONF.ipmi.min_command_interval))
end_time = (time.time() + CONF.ipmi.retry_timeout)
while True:
num_tries = num_tries - 1
# NOTE(deva): ensure that no communications are sent to a BMC more
# often than once every min_command_interval seconds.
time_till_next_poll = CONF.ipmi.min_command_interval - (
time.time() - LAST_CMD_TIME.get(driver_info['address'], 0))
if time_till_next_poll > 0:
time.sleep(time_till_next_poll)
# Resetting the list that will be utilized so the password arguments
# from any previous execution are preserved.
cmd_args = args[:]
# 'ipmitool' command will prompt password if there is no '-f'
# option, we set it to '\0' to write a password file to support
# empty password
with _make_password_file(
driver_info['password'] or '\0'
) as pw_file:
cmd_args.append('-f')
cmd_args.append(pw_file)
cmd_args.extend(command.split(" "))
try:
out, err = utils.execute(*cmd_args)
return out, err
except processutils.ProcessExecutionError as e:
with excutils.save_and_reraise_exception() as ctxt:
err_list = [x for x in IPMITOOL_RETRYABLE_FAILURES
if x in e.message]
if ((time.time() > end_time) or
(num_tries == 0) or
not err_list):
LOG.error(_LE('IPMI Error while attempting '
'"%(cmd)s" for node %(node)s. '
'Error: %(error)s'),
{
'node': driver_info['uuid'],
'cmd': e.cmd,
'error': e
})
else:
ctxt.reraise = False
LOG.warning(_LW('IPMI Error encountered, retrying '
'"%(cmd)s" for node %(node)s. '
'Error: %(error)s'),
{
'node': driver_info['uuid'],
'cmd': e.cmd,
'error': e
})
finally:
LAST_CMD_TIME[driver_info['address']] = time.time()
def _sleep_time(iter):
"""Return the time-to-sleep for the n'th iteration of a retry loop.
This implementation increases exponentially.
:param iter: iteration number
:returns: number of seconds to sleep
"""
if iter <= 1:
return 1
return iter ** 2
def _set_and_wait(target_state, driver_info):
"""Helper function for DynamicLoopingCall.
This method changes the power state and polls the BMCuntil the desired
power state is reached, or CONF.ipmi.retry_timeout would be exceeded by the
next iteration.
This method assumes the caller knows the current power state and does not
check it prior to changing the power state. Most BMCs should be fine, but
if a driver is concerned, the state should be checked prior to calling this
method.
:param target_state: desired power state
:param driver_info: the ipmitool parameters for accessing a node.
:returns: one of ironic.common.states
"""
if target_state == states.POWER_ON:
state_name = "on"
elif target_state == states.POWER_OFF:
state_name = "off"
def _wait(mutable):
try:
# Only issue power change command once
if mutable['iter'] < 0:
_exec_ipmitool(driver_info, "power %s" % state_name)
else:
mutable['power'] = _power_status(driver_info)
except (exception.PasswordFileFailedToCreate,
processutils.ProcessExecutionError,
exception.IPMIFailure):
# Log failures but keep trying
LOG.warning(_LW("IPMI power %(state)s failed for node %(node)s."),
{'state': state_name, 'node': driver_info['uuid']})
finally:
mutable['iter'] += 1
if mutable['power'] == target_state:
raise loopingcall.LoopingCallDone()
sleep_time = _sleep_time(mutable['iter'])
if (sleep_time + mutable['total_time']) > CONF.ipmi.retry_timeout:
# Stop if the next loop would exceed maximum retry_timeout
LOG.error(_LE('IPMI power %(state)s timed out after '
'%(tries)s retries on node %(node_id)s.'),
{'state': state_name, 'tries': mutable['iter'],
'node_id': driver_info['uuid']})
mutable['power'] = states.ERROR
raise loopingcall.LoopingCallDone()
else:
mutable['total_time'] += sleep_time
return sleep_time
# Use mutable objects so the looped method can change them.
# Start 'iter' from -1 so that the first two checks are one second apart.
status = {'power': None, 'iter': -1, 'total_time': 0}
timer = loopingcall.DynamicLoopingCall(_wait, status)
timer.start().wait()
return status['power']
def _power_on(driver_info):
"""Turn the power ON for this node.
:param driver_info: the ipmitool parameters for accessing a node.
:returns: one of ironic.common.states POWER_ON or ERROR.
:raises: IPMIFailure on an error from ipmitool (from _power_status call).
"""
return _set_and_wait(states.POWER_ON, driver_info)
def _power_off(driver_info):
"""Turn the power OFF for this node.
:param driver_info: the ipmitool parameters for accessing a node.
:returns: one of ironic.common.states POWER_OFF or ERROR.
:raises: IPMIFailure on an error from ipmitool (from _power_status call).
"""
return _set_and_wait(states.POWER_OFF, driver_info)
def _power_status(driver_info):
"""Get the power status for a node.
:param driver_info: the ipmitool access parameters for a node.
:returns: one of ironic.common.states POWER_OFF, POWER_ON or ERROR.
:raises: IPMIFailure on an error from ipmitool.
"""
cmd = "power status"
try:
out_err = _exec_ipmitool(driver_info, cmd)
except (exception.PasswordFileFailedToCreate,
processutils.ProcessExecutionError) as e:
LOG.warning(_LW("IPMI power status failed for node %(node_id)s with "
"error: %(error)s."),
{'node_id': driver_info['uuid'], 'error': e})
raise exception.IPMIFailure(cmd=cmd)
if out_err[0] == "Chassis Power is on\n":
return states.POWER_ON
elif out_err[0] == "Chassis Power is off\n":
return states.POWER_OFF
else:
return states.ERROR
def _process_sensor(sensor_data):
sensor_data_fields = sensor_data.split('\n')
sensor_data_dict = {}
for field in sensor_data_fields:
if not field:
continue
kv_value = field.split(':')
if len(kv_value) != 2:
continue
sensor_data_dict[kv_value[0].strip()] = kv_value[1].strip()
return sensor_data_dict
def _get_sensor_type(node, sensor_data_dict):
# Have only three sensor type name IDs: 'Sensor Type (Analog)'
# 'Sensor Type (Discrete)' and 'Sensor Type (Threshold)'
for key in ('Sensor Type (Analog)', 'Sensor Type (Discrete)',
'Sensor Type (Threshold)'):
try:
return sensor_data_dict[key].split(' ', 1)[0]
except KeyError:
continue
raise exception.FailedToParseSensorData(
node=node.uuid,
error=(_("parse ipmi sensor data failed, unknown sensor type"
" data: %(sensors_data)s"), {'sensors_data': sensor_data_dict}))
def _parse_ipmi_sensors_data(node, sensors_data):
"""Parse the IPMI sensors data and format to the dict grouping by type.
We run 'ipmitool' command with 'sdr -v' options, which can return sensor
details in human-readable format, we need to format them to JSON string
dict-based data for Ceilometer Collector which can be sent it as payload
out via notification bus and consumed by Ceilometer Collector.
:param sensors_data: the sensor data returned by ipmitool command.
:returns: the sensor data with JSON format, grouped by sensor type.
:raises: FailedToParseSensorData when error encountered during parsing.
"""
sensors_data_dict = {}
if not sensors_data:
return sensors_data_dict
sensors_data_array = sensors_data.split('\n\n')
for sensor_data in sensors_data_array:
sensor_data_dict = _process_sensor(sensor_data)
if not sensor_data_dict:
continue
sensor_type = _get_sensor_type(node, sensor_data_dict)
# ignore the sensors which has no current 'Sensor Reading' data
if 'Sensor Reading' in sensor_data_dict:
sensors_data_dict.setdefault(sensor_type,
{})[sensor_data_dict['Sensor ID']] = sensor_data_dict
# get nothing, no valid sensor data
if not sensors_data_dict:
raise exception.FailedToParseSensorData(
node=node.uuid,
error=(_("parse ipmi sensor data failed, get nothing with input"
" data: %(sensors_data)s") % {'sensors_data': sensors_data}))
return sensors_data_dict
@task_manager.require_exclusive_lock
def send_raw(task, raw_bytes):
"""Send raw bytes to the BMC. Bytes should be a string of bytes.
:param task: a TaskManager instance.
:param raw_bytes: a string of raw bytes to send, e.g. '0x00 0x01'
:raises: IPMIFailure on an error from ipmitool.
:raises: MissingParameterValue if a required parameter is missing.
:raises: InvalidParameterValue when an invalid value is specified.
"""
node_uuid = task.node.uuid
LOG.debug('Sending node %(node)s raw bytes %(bytes)s',
{'bytes': raw_bytes, 'node': node_uuid})
driver_info = _parse_driver_info(task.node)
cmd = 'raw %s' % raw_bytes
try:
out, err = _exec_ipmitool(driver_info, cmd)
LOG.debug('send raw bytes returned stdout: %(stdout)s, stderr:'
' %(stderr)s', {'stdout': out, 'stderr': err})
except (exception.PasswordFileFailedToCreate,
processutils.ProcessExecutionError) as e:
LOG.exception(_LE('IPMI "raw bytes" failed for node %(node_id)s '
'with error: %(error)s.'),
{'node_id': node_uuid, 'error': e})
raise exception.IPMIFailure(cmd=cmd)
class IPMIPower(base.PowerInterface):
def __init__(self):
try:
_check_option_support(['timing', 'single_bridge', 'dual_bridge'])
except OSError:
raise exception.DriverLoadError(
driver=self.__class__.__name__,
reason=_("Unable to locate usable ipmitool command in "
"the system path when checking ipmitool version"))
def get_properties(self):
return COMMON_PROPERTIES
def validate(self, task):
"""Validate driver_info for ipmitool driver.
Check that node['driver_info'] contains IPMI credentials.
:param task: a TaskManager instance containing the node to act on.
:raises: InvalidParameterValue if required ipmi parameters are missing.
:raises: MissingParameterValue if a required parameter is missing.
"""
_parse_driver_info(task.node)
# NOTE(deva): don't actually touch the BMC in validate because it is
# called too often, and BMCs are too fragile.
# This is a temporary measure to mitigate problems while
# 1314954 and 1314961 are resolved.
def get_power_state(self, task):
"""Get the current power state of the task's node.
:param task: a TaskManager instance containing the node to act on.
:returns: one of ironic.common.states POWER_OFF, POWER_ON or ERROR.
:raises: InvalidParameterValue if required ipmi parameters are missing.
:raises: MissingParameterValue if a required parameter is missing.
:raises: IPMIFailure on an error from ipmitool (from _power_status
call).
"""
driver_info = _parse_driver_info(task.node)
return _power_status(driver_info)
@task_manager.require_exclusive_lock
def set_power_state(self, task, pstate):
"""Turn the power on or off.
:param task: a TaskManager instance containing the node to act on.
:param pstate: The desired power state, one of ironic.common.states
POWER_ON, POWER_OFF.
:raises: InvalidParameterValue if an invalid power state was specified.
:raises: MissingParameterValue if required ipmi parameters are missing
:raises: PowerStateFailure if the power couldn't be set to pstate.
"""
driver_info = _parse_driver_info(task.node)
if pstate == states.POWER_ON:
state = _power_on(driver_info)
elif pstate == states.POWER_OFF:
state = _power_off(driver_info)
else:
raise exception.InvalidParameterValue(_("set_power_state called "
"with invalid power state %s.") % pstate)
if state != pstate:
raise exception.PowerStateFailure(pstate=pstate)
@task_manager.require_exclusive_lock
def reboot(self, task):
"""Cycles the power to the task's node.
:param task: a TaskManager instance containing the node to act on.
:raises: MissingParameterValue if required ipmi parameters are missing.
:raises: InvalidParameterValue if an invalid power state was specified.
:raises: PowerStateFailure if the final state of the node is not
POWER_ON.
"""
driver_info = _parse_driver_info(task.node)
_power_off(driver_info)
state = _power_on(driver_info)
if state != states.POWER_ON:
raise exception.PowerStateFailure(pstate=states.POWER_ON)
class IPMIManagement(base.ManagementInterface):
def get_properties(self):
return COMMON_PROPERTIES
def __init__(self):
try:
_check_option_support(['timing', 'single_bridge', 'dual_bridge'])
except OSError:
raise exception.DriverLoadError(
driver=self.__class__.__name__,
reason=_("Unable to locate usable ipmitool command in "
"the system path when checking ipmitool version"))
def validate(self, task):
"""Check that 'driver_info' contains IPMI credentials.
Validates whether the 'driver_info' property of the supplied
task's node contains the required credentials information.
:param task: a task from TaskManager.
:raises: InvalidParameterValue if required IPMI parameters
are missing.
:raises: MissingParameterValue if a required parameter is missing.
"""
_parse_driver_info(task.node)
def get_supported_boot_devices(self):
"""Get a list of the supported boot devices.
:returns: A list with the supported boot devices defined
in :mod:`ironic.common.boot_devices`.
"""
return [boot_devices.PXE, boot_devices.DISK, boot_devices.CDROM,
boot_devices.BIOS, boot_devices.SAFE]
@task_manager.require_exclusive_lock
def set_boot_device(self, task, device, persistent=False):
"""Set the boot device for the task's node.
Set the boot device to use on next reboot of the node.
:param task: a task from TaskManager.
:param device: the boot device, one of
:mod:`ironic.common.boot_devices`.
:param persistent: Boolean value. True if the boot device will
persist to all future boots, False if not.
Default: False.
:raises: InvalidParameterValue if an invalid boot device is specified
:raises: MissingParameterValue if required ipmi parameters are missing.
:raises: IPMIFailure on an error from ipmitool.
"""
if device not in self.get_supported_boot_devices():
raise exception.InvalidParameterValue(_(
"Invalid boot device %s specified.") % device)
# note(JayF): IPMI spec indicates unless you send these raw bytes the
# boot device setting times out after 60s. Since it's possible it
# could be >60s before a node is rebooted, we should always send them.
# This mimics pyghmi's current behavior, and the "option=timeout"
# setting on newer ipmitool binaries.
timeout_disable = "0x00 0x08 0x03 0x08"
send_raw(task, timeout_disable)
cmd = "chassis bootdev %s" % device
if persistent:
cmd = cmd + " options=persistent"
driver_info = _parse_driver_info(task.node)
try:
out, err = _exec_ipmitool(driver_info, cmd)
except (exception.PasswordFileFailedToCreate,
processutils.ProcessExecutionError) as e:
LOG.warning(_LW('IPMI set boot device failed for node %(node)s '
'when executing "ipmitool %(cmd)s". '
'Error: %(error)s'),
{'node': driver_info['uuid'], 'cmd': cmd, 'error': e})
raise exception.IPMIFailure(cmd=cmd)
def get_boot_device(self, task):
"""Get the current boot device for the task's node.
Returns the current boot device of the node.
:param task: a task from TaskManager.
:raises: InvalidParameterValue if required IPMI parameters
are missing.
:raises: IPMIFailure on an error from ipmitool.
:raises: MissingParameterValue if a required parameter is missing.
:returns: a dictionary containing:
:boot_device: the boot device, one of
:mod:`ironic.common.boot_devices` or None if it is unknown.
:persistent: Whether the boot device will persist to all
future boots or not, None if it is unknown.
"""
cmd = "chassis bootparam get 5"
driver_info = _parse_driver_info(task.node)
response = {'boot_device': None, 'persistent': None}
try:
out, err = _exec_ipmitool(driver_info, cmd)
except (exception.PasswordFileFailedToCreate,
processutils.ProcessExecutionError) as e:
LOG.warning(_LW('IPMI get boot device failed for node %(node)s '
'when executing "ipmitool %(cmd)s". '
'Error: %(error)s'),
{'node': driver_info['uuid'], 'cmd': cmd, 'error': e})
raise exception.IPMIFailure(cmd=cmd)
re_obj = re.search('Boot Device Selector : (.+)?\n', out)
if re_obj:
boot_selector = re_obj.groups('')[0]
if 'PXE' in boot_selector:
response['boot_device'] = boot_devices.PXE
elif 'Hard-Drive' in boot_selector:
if 'Safe-Mode' in boot_selector:
response['boot_device'] = boot_devices.SAFE
else:
response['boot_device'] = boot_devices.DISK
elif 'BIOS' in boot_selector:
response['boot_device'] = boot_devices.BIOS
elif 'CD/DVD' in boot_selector:
response['boot_device'] = boot_devices.CDROM
response['persistent'] = 'Options apply to all future boots' in out
return response
def get_sensors_data(self, task):
"""Get sensors data.
:param task: a TaskManager instance.
:raises: FailedToGetSensorData when getting the sensor data fails.
:raises: FailedToParseSensorData when parsing sensor data fails.
:raises: InvalidParameterValue if required ipmi parameters are missing
:raises: MissingParameterValue if a required parameter is missing.
:returns: returns a dict of sensor data group by sensor type.
"""
driver_info = _parse_driver_info(task.node)
# with '-v' option, we can get the entire sensor data including the
# extended sensor informations
cmd = "sdr -v"
try:
out, err = _exec_ipmitool(driver_info, cmd)
except (exception.PasswordFileFailedToCreate,
processutils.ProcessExecutionError) as e:
raise exception.FailedToGetSensorData(node=task.node.uuid,
error=e)
return _parse_ipmi_sensors_data(task.node, out)
class VendorPassthru(base.VendorInterface):
def __init__(self):
try:
_check_option_support(['single_bridge', 'dual_bridge'])
except OSError:
raise exception.DriverLoadError(
driver=self.__class__.__name__,
reason=_("Unable to locate usable ipmitool command in "
"the system path when checking ipmitool version"))
@base.passthru(['POST'])
@task_manager.require_exclusive_lock
def send_raw(self, task, http_method, raw_bytes):
"""Send raw bytes to the BMC. Bytes should be a string of bytes.
:param task: a TaskManager instance.
:param http_method: the HTTP method used on the request.
:param raw_bytes: a string of raw bytes to send, e.g. '0x00 0x01'
:raises: IPMIFailure on an error from ipmitool.
:raises: MissingParameterValue if a required parameter is missing.
:raises: InvalidParameterValue when an invalid value is specified.
"""
send_raw(task, raw_bytes)
@base.passthru(['POST'])
@task_manager.require_exclusive_lock
def bmc_reset(self, task, http_method, warm=True):
"""Reset BMC with IPMI command 'bmc reset (warm|cold)'.
:param task: a TaskManager instance.
:param http_method: the HTTP method used on the request.
:param warm: boolean parameter to decide on warm or cold reset.
:raises: IPMIFailure on an error from ipmitool.
:raises: MissingParameterValue if a required parameter is missing.
:raises: InvalidParameterValue when an invalid value is specified
"""
node_uuid = task.node.uuid
if warm:
warm_param = 'warm'
else:
warm_param = 'cold'
LOG.debug('Doing %(warm)s BMC reset on node %(node)s',
{'warm': warm_param, 'node': node_uuid})
driver_info = _parse_driver_info(task.node)
cmd = 'bmc reset %s' % warm_param
try:
out, err = _exec_ipmitool(driver_info, cmd)
LOG.debug('bmc reset returned stdout: %(stdout)s, stderr:'
' %(stderr)s', {'stdout': out, 'stderr': err})
except (exception.PasswordFileFailedToCreate,
processutils.ProcessExecutionError) as e:
LOG.exception(_LE('IPMI "bmc reset" failed for node %(node_id)s '
'with error: %(error)s.'),
{'node_id': node_uuid, 'error': e})
raise exception.IPMIFailure(cmd=cmd)
def get_properties(self):
return COMMON_PROPERTIES
def validate(self, task, method, **kwargs):
"""Validate vendor-specific actions.
If invalid, raises an exception; otherwise returns None.
Valid methods:
* send_raw
* bmc_reset
:param task: a task from TaskManager.
:param method: method to be validated
:param kwargs: info for action.
:raises: InvalidParameterValue when an invalid parameter value is
specified.
:raises: MissingParameterValue if a required parameter is missing.
"""
if method == 'send_raw':
if not kwargs.get('raw_bytes'):
raise exception.MissingParameterValue(_(
'Parameter raw_bytes (string of bytes) was not '
'specified.'))
_parse_driver_info(task.node)
class IPMIShellinaboxConsole(base.ConsoleInterface):
"""A ConsoleInterface that uses ipmitool and shellinabox."""
def __init__(self):
try:
_check_option_support(['timing', 'single_bridge', 'dual_bridge'])
except OSError:
raise exception.DriverLoadError(
driver=self.__class__.__name__,
reason=_("Unable to locate usable ipmitool command in "
"the system path when checking ipmitool version"))
def get_properties(self):
d = COMMON_PROPERTIES.copy()
d.update(CONSOLE_PROPERTIES)
return d
def validate(self, task):
"""Validate the Node console info.
:param task: a task from TaskManager.
:raises: InvalidParameterValue
:raises: MissingParameterValue when a required parameter is missing
"""
driver_info = _parse_driver_info(task.node)
if not driver_info['port']:
raise exception.MissingParameterValue(_(
"Missing 'ipmi_terminal_port' parameter in node's"
" driver_info."))
def start_console(self, task):
"""Start a remote console for the node.
:param task: a task from TaskManager
:raises: InvalidParameterValue if required ipmi parameters are missing
:raises: PasswordFileFailedToCreate if unable to create a file
containing the password
:raises: ConsoleError if the directory for the PID file cannot be
created
:raises: ConsoleSubprocessFailed when invoking the subprocess failed
"""
driver_info = _parse_driver_info(task.node)
path = _console_pwfile_path(driver_info['uuid'])
pw_file = console_utils.make_persistent_password_file(
path, driver_info['password'])
ipmi_cmd = ("/:%(uid)s:%(gid)s:HOME:ipmitool -H %(address)s"
" -I lanplus -U %(user)s -f %(pwfile)s"
% {'uid': os.getuid(),
'gid': os.getgid(),
'address': driver_info['address'],
'user': driver_info['username'],
'pwfile': pw_file})
for name, option in BRIDGING_OPTIONS:
if driver_info[name] is not None:
ipmi_cmd = " ".join([ipmi_cmd,
option, driver_info[name]])
if CONF.debug:
ipmi_cmd += " -v"
ipmi_cmd += " sol activate"
try:
console_utils.start_shellinabox_console(driver_info['uuid'],
driver_info['port'],
ipmi_cmd)
except (exception.ConsoleError, exception.ConsoleSubprocessFailed):
with excutils.save_and_reraise_exception():
utils.unlink_without_raise(path)
def stop_console(self, task):
"""Stop the remote console session for the node.
:param task: a task from TaskManager
:raises: InvalidParameterValue if required ipmi parameters are missing
:raises: ConsoleError if unable to stop the console
"""
driver_info = _parse_driver_info(task.node)
try:
console_utils.stop_shellinabox_console(driver_info['uuid'])
finally:
utils.unlink_without_raise(
_console_pwfile_path(driver_info['uuid']))
def get_console(self, task):
"""Get the type and connection information about the console."""
driver_info = _parse_driver_info(task.node)
url = console_utils.get_shellinabox_console_url(driver_info['port'])
return {'type': 'shellinabox', 'url': url}
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2011 Chris Behrens
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pyhole Plugin Library"""
import functools
import os
import sys
from pyhole import utils
def _reset_variables():
"""
Local function to init some variables that are common between
load and reload
"""
global _plugin_instances
global _plugin_hooks
_plugin_instances = []
_plugin_hooks = {}
for x in _hook_names:
_plugin_hooks[x] = []
# Decorator for adding a hook
def hook_add(hookname, arg):
"""
Generic decorator to add hooks. Generally, this is not called
directly by plugins. Decorators that plugins use are automatically
generated below with the setattrs you'll see
"""
def wrap(f):
setattr(f, "_is_%s_hook" % hookname, True)
f._hook_arg = arg
return f
return wrap
def hook_get(hookname):
"""
Function to return the list of hooks of a particular type. Genearlly
this is not called directly. Callers tend to use the dynamically
generated calls 'hook_get_*' that are created below with the setattrs
"""
return _plugin_hooks[hookname]
def active_get(hookname):
"""
Function to return the list of hook arguments. Genearlly
this is not called directly. Callers tend to use the dynamically
generated calls 'active_get_*' that are created below with the
setattrs
"""
return [x[2] for x in _plugin_hooks[hookname]]
_hook_names = ["keyword", "command", "msg_regex"]
_reset_variables()
_this_mod = sys.modules[__name__]
for x in _hook_names:
# Dynamically create the decorators and functions for various hooks
setattr(_this_mod, "hook_add_%s" % x, functools.partial(hook_add, x))
setattr(_this_mod, "hook_get_%ss" % x, functools.partial(hook_get, x))
setattr(_this_mod, "active_%ss" % x, functools.partial(active_get, x))
class PluginMetaClass(type):
"""
The metaclass that makes all of the plugin magic work. All subclassing
gets caught here, which we can use to have plugins automagically
register themselves
"""
def __init__(cls, name, bases, attrs):
"""
Catch subclassing. If the class doesn't yet have _plugin_classes,
it means it's the Plugin class itself, otherwise it's a class
that's been subclassed from Plugin (ie, a real plugin class)
"""
if not hasattr(cls, "_plugin_classes"):
cls._plugin_classes = []
else:
cls._plugin_classes.append(cls)
cls.__name__ = name
class Plugin(object):
"""
The class that all plugin classes should inherit from
"""
# Set the metaclass
__metaclass__ = PluginMetaClass
def __init__(self, irc, *args, **kwargs):
"""
Default constructor for Plugin. Stores the IRC instance, etc
"""
self.irc = irc
self.name = self.__class__.__name__
def _init_plugins(*args, **kwargs):
"""
Create instances of the plugin classes and create a cache
of their hook functions
"""
for cls in Plugin._plugin_classes:
# Create instance of 'p'
instance = cls(*args, **kwargs)
# Store the instance
_plugin_instances.append(instance)
# Setup _keyword_hooks by looking at all of the attributes
# in the class and finding the ones that have a _is_*_hook
# attribute
for attr_name in dir(instance):
attr = getattr(instance, attr_name)
for hook_key in _hook_names:
if getattr(attr, "_is_%s_hook" % hook_key, False):
hook_arg = getattr(attr, "_hook_arg", None)
# Append (module, method, arg) tuple
_plugin_hooks[hook_key].append(
(attr.__module__, attr, hook_arg))
def load_plugins(plugindir, *args, **kwargs):
"""
Module function that loads plugins from a particular directory
"""
config = utils.load_config("Pyhole", kwargs.get("conf_file"))
plugins = os.path.abspath(plugindir)
plugin_names = config.get("plugins", type="list")
for p in plugin_names:
try:
__import__(os.path.basename(plugindir), globals(), locals(), [p])
except Exception, e:
# Catch all because this could be many things
kwargs.get("irc").log.error(e)
pass
_init_plugins(*args, **kwargs)
def reload_plugins(plugins, *args, **kwargs):
"""
Module function that'll reload all of the plugins
"""
config = utils.load_config("Pyhole", kwargs.get("conf_file"))
# When the modules are reloaded, the meta class will append
# all of the classes again, so we need to make sure this is empty
Plugin._plugin_classes = []
_reset_variables()
# Now reload all of the plugins
plugins_to_reload = []
plugindir = os.path.basename(plugins)
# Reload existing plugins
for mod, val in sys.modules.items():
if plugindir in mod and val and mod != plugindir:
mod_file = val.__file__
if not os.path.isfile(mod_file):
continue
for p in config.get("plugins", type="list"):
if plugindir + "." + p == mod:
plugins_to_reload.append(mod)
for p in plugins_to_reload:
try:
reload(sys.modules[p])
except Exception, e:
# Catch all because this could be many things
kwargs.get("irc").log.error(e)
pass
# Load new plugins
load_plugins(plugindir, *args, **kwargs)
def active_plugins():
"""
Get the loaded plugin names
"""
return [x.__name__ for x in Plugin._plugin_classes]
def active_plugin_classes():
"""
Get the loaded plugin classes
"""
return Plugin._plugin_classes
| |
#!/usr/bin/env python
#
# $Id$
#
import os
import errno
import socket
import struct
import sys
import base64
import re
import _psutil_posix
import _psutil_linux
from psutil import _psposix
from psutil.error import AccessDenied, NoSuchProcess, TimeoutExpired
from psutil._common import *
__extra__all__ = [
"IOPRIO_CLASS_NONE", "IOPRIO_CLASS_RT", "IOPRIO_CLASS_BE",
"IOPRIO_CLASS_IDLE",
"phymem_buffers", "cached_phymem"]
def _get_boot_time():
"""Return system boot time (epoch in seconds)"""
f = open('/proc/stat', 'r')
try:
for line in f:
if line.startswith('btime'):
return float(line.strip().split()[1])
raise RuntimeError("line not found")
finally:
f.close()
def _get_num_cpus():
"""Return the number of CPUs on the system"""
num = 0
f = open('/proc/cpuinfo', 'r')
try:
lines = f.readlines()
finally:
f.close()
for line in lines:
if line.lower().startswith('processor'):
num += 1
# unknown format (e.g. amrel/sparc architectures), see:
# http://code.google.com/p/psutil/issues/detail?id=200
if num == 0:
f = open('/proc/stat', 'r')
try:
lines = f.readlines()
finally:
f.close()
search = re.compile('cpu\d')
for line in lines:
line = line.split(' ')[0]
if search.match(line):
num += 1
if num == 0:
raise RuntimeError("can't determine number of CPUs")
return num
# Number of clock ticks per second
_CLOCK_TICKS = os.sysconf(os.sysconf_names["SC_CLK_TCK"])
_TERMINAL_MAP = _psposix._get_terminal_map()
BOOT_TIME = _get_boot_time()
NUM_CPUS = _get_num_cpus()
# ioprio_* constants http://linux.die.net/man/2/ioprio_get
IOPRIO_CLASS_NONE = 0
IOPRIO_CLASS_RT = 1
IOPRIO_CLASS_BE = 2
IOPRIO_CLASS_IDLE = 3
# http://students.mimuw.edu.pl/lxr/source/include/net/tcp_states.h
_TCP_STATES_TABLE = {"01" : "ESTABLISHED",
"02" : "SYN_SENT",
"03" : "SYN_RECV",
"04" : "FIN_WAIT1",
"05" : "FIN_WAIT2",
"06" : "TIME_WAIT",
"07" : "CLOSE",
"08" : "CLOSE_WAIT",
"09" : "LAST_ACK",
"0A" : "LISTEN",
"0B" : "CLOSING"
}
# --- system memory functions
def cached_phymem():
"""Return the amount of cached memory on the system, in bytes.
This reflects the "cached" column of free command line utility.
"""
f = open('/proc/meminfo', 'r')
try:
for line in f:
if line.startswith('Cached:'):
return int(line.split()[1]) * 1024
raise RuntimeError("line not found")
finally:
f.close()
def phymem_buffers():
"""Return the amount of physical memory buffers used by the
kernel in bytes.
This reflects the "buffers" column of free command line utility.
"""
f = open('/proc/meminfo', 'r')
try:
for line in f:
if line.startswith('Buffers:'):
return int(line.split()[1]) * 1024
raise RuntimeError("line not found")
finally:
f.close()
def phymem_usage():
# total, used and free values are matched against free cmdline utility
# the percentage matches top/htop and gnome-system-monitor
f = open('/proc/meminfo', 'r')
try:
total = free = buffers = cached = None
for line in f:
if line.startswith('MemTotal:'):
total = int(line.split()[1]) * 1024
elif line.startswith('MemFree:'):
free = int(line.split()[1]) * 1024
elif line.startswith('Buffers:'):
buffers = int(line.split()[1]) * 1024
elif line.startswith('Cached:'):
cached = int(line.split()[1]) * 1024
break
used = total - free
percent = usage_percent(total - (free + buffers + cached), total,
_round=1)
return ntuple_sysmeminfo(total, used, free, percent)
finally:
f.close()
def virtmem_usage():
f = open('/proc/meminfo', 'r')
try:
total = free = None
for line in f:
if line.startswith('SwapTotal:'):
total = int(line.split()[1]) * 1024
elif line.startswith('SwapFree:'):
free = int(line.split()[1]) * 1024
if total is not None and free is not None:
break
assert total is not None and free is not None
used = total - free
percent = usage_percent(used, total, _round=1)
return ntuple_sysmeminfo(total, used, free, percent)
finally:
f.close()
# --- system CPU functions
def get_system_cpu_times():
"""Return a named tuple representing the following CPU times:
user, nice, system, idle, iowait, irq, softirq.
"""
f = open('/proc/stat', 'r')
try:
values = f.readline().split()
finally:
f.close()
values = values[1:8]
values = tuple([float(x) / _CLOCK_TICKS for x in values])
return ntuple_sys_cputimes(*values[:7])
def get_system_per_cpu_times():
"""Return a list of namedtuple representing the CPU times
for every CPU available on the system.
"""
cpus = []
f = open('/proc/stat', 'r')
# get rid of the first line who refers to system wide CPU stats
try:
f.readline()
for line in f.readlines():
if line.startswith('cpu'):
values = line.split()[1:8]
values = tuple([float(x) / _CLOCK_TICKS for x in values])
entry = ntuple_sys_cputimes(*values[:7])
cpus.append(entry)
return cpus
finally:
f.close()
# --- system disk functions
def disk_partitions(all=False):
"""Return mounted disk partitions as a list of nameduples"""
phydevs = []
f = open("/proc/filesystems", "r")
try:
for line in f:
if not line.startswith("nodev"):
phydevs.append(line.strip())
finally:
f.close()
retlist = []
partitions = _psutil_linux.get_disk_partitions()
for partition in partitions:
device, mountpoint, fstype = partition
if device == 'none':
device = ''
if not all:
if device == '' or fstype not in phydevs:
continue
ntuple = ntuple_partition(device, mountpoint, fstype)
retlist.append(ntuple)
return retlist
get_disk_usage = _psposix.get_disk_usage
# --- process functions
def get_pid_list():
"""Returns a list of PIDs currently running on the system."""
pids = [int(x) for x in os.listdir('/proc') if x.isdigit()]
# special case for 0 (kernel process) PID
pids.insert(0, 0)
return pids
def pid_exists(pid):
"""Check For the existence of a unix pid."""
return _psposix.pid_exists(pid)
def network_io_counters():
"""Return network I/O statistics for every network interface
installed on the system as a dict of raw tuples.
"""
f = open("/proc/net/dev", "r")
try:
lines = f.readlines()
finally:
f.close()
retdict = {}
for line in lines[2:]:
fields = line.split()
name = fields[0][:-1]
bytes_recv = int(fields[1])
packets_recv = int(fields[2])
bytes_sent = int(fields[9])
packets_sent = int(fields[10])
retdict[name] = (bytes_sent, bytes_recv, packets_sent, packets_recv)
return retdict
def disk_io_counters():
"""Return disk I/O statistics for every disk installed on the
system as a dict of raw tuples.
"""
# man iostat states that sectors are equivalent with blocks and
# have a size of 512 bytes since 2.4 kernels. This value is
# needed to calculate the amount of disk I/O in bytes.
SECTOR_SIZE = 512
# determine partitions we want to look for
partitions = []
f = open("/proc/partitions", "r")
try:
lines = f.readlines()[2:]
finally:
f.close()
for line in lines:
_, _, _, name = line.split()
if name[-1].isdigit():
partitions.append(name)
#
retdict = {}
f = open("/proc/diskstats", "r")
try:
lines = f.readlines()
finally:
f.close()
for line in lines:
_, _, name, reads, _, rbytes, rtime, writes, _, wbytes, wtime = \
line.split()[:11]
if name in partitions:
rbytes = int(rbytes) * SECTOR_SIZE
wbytes = int(wbytes) * SECTOR_SIZE
reads = int(reads)
writes = int(writes)
# TODO: times are expressed in milliseconds while OSX/BSD has
# these expressed in nanoseconds; figure this out.
rtime = int(rtime)
wtime = int(wtime)
retdict[name] = (reads, writes, rbytes, wbytes, rtime, wtime)
return retdict
# taken from /fs/proc/array.c
_status_map = {"R" : STATUS_RUNNING,
"S" : STATUS_SLEEPING,
"D" : STATUS_DISK_SLEEP,
"T" : STATUS_STOPPED,
"t" : STATUS_TRACING_STOP,
"Z" : STATUS_ZOMBIE,
"X" : STATUS_DEAD,
"x" : STATUS_DEAD,
"K" : STATUS_WAKE_KILL,
"W" : STATUS_WAKING}
# --- decorators
def wrap_exceptions(callable):
"""Call callable into a try/except clause and translate ENOENT,
EACCES and EPERM in NoSuchProcess or AccessDenied exceptions.
"""
def wrapper(self, *args, **kwargs):
try:
return callable(self, *args, **kwargs)
except (OSError, IOError), err:
# ENOENT (no such file or directory) gets raised on open().
# ESRCH (no such process) can get raised on read() if
# process is gone in meantime.
if err.errno in (errno.ENOENT, errno.ESRCH):
raise NoSuchProcess(self.pid, self._process_name)
if err.errno in (errno.EPERM, errno.EACCES):
raise AccessDenied(self.pid, self._process_name)
raise
return wrapper
class Process(object):
"""Linux process implementation."""
__slots__ = ["pid", "_process_name"]
def __init__(self, pid):
self.pid = pid
self._process_name = None
@wrap_exceptions
def get_process_name(self):
if self.pid == 0:
return 'sched' # special case for kernel process
f = open("/proc/%s/stat" % self.pid)
try:
name = f.read().split(' ')[1].replace('(', '').replace(')', '')
finally:
f.close()
# XXX - gets changed later and probably needs refactoring
return name
def get_process_exe(self):
if self.pid in (0, 2):
raise AccessDenied(self.pid, self._process_name)
try:
exe = os.readlink("/proc/%s/exe" % self.pid)
except (OSError, IOError), err:
if err.errno == errno.ENOENT:
# no such file error; might be raised also if the
# path actually exists for system processes with
# low pids (about 0-20)
if os.path.lexists("/proc/%s/exe" % self.pid):
return ""
else:
# ok, it is a process which has gone away
raise NoSuchProcess(self.pid, self._process_name)
if err.errno in (errno.EPERM, errno.EACCES):
raise AccessDenied(self.pid, self._process_name)
raise
# readlink() might return paths containing null bytes causing
# problems when used with other fs-related functions (os.*,
# open(), ...)
exe = exe.replace('\x00', '')
# It seems symlinks can point to a deleted/invalid location
# (this usually happens with "pulseaudio" process).
# However, if we had permissions to execute readlink() it's
# likely that we'll be able to figure out exe from argv[0]
# later on.
if exe.endswith(" (deleted)") and not os.path.isfile(exe):
return ""
return exe
@wrap_exceptions
def get_process_cmdline(self):
if self.pid == 0:
return [] # special case for kernel process
f = open("/proc/%s/cmdline" % self.pid)
try:
# return the args as a list
return [x for x in f.read().split('\x00') if x]
finally:
f.close()
@wrap_exceptions
def get_process_terminal(self):
if self.pid == 0:
return None # special case for kernel process
f = open("/proc/%s/stat" % self.pid)
try:
tty_nr = int(f.read().split(' ')[6])
finally:
f.close()
try:
return _TERMINAL_MAP[tty_nr]
except KeyError:
return None
@wrap_exceptions
def get_process_io_counters(self):
# special case for 0 (kernel process) PID
if self.pid == 0:
return ntuple_io(0, 0, 0, 0)
f = open("/proc/%s/io" % self.pid)
try:
for line in f:
if line.startswith("rchar"):
read_count = int(line.split()[1])
elif line.startswith("wchar"):
write_count = int(line.split()[1])
elif line.startswith("read_bytes"):
read_bytes = int(line.split()[1])
elif line.startswith("write_bytes"):
write_bytes = int(line.split()[1])
return ntuple_io(read_count, write_count, read_bytes, write_bytes)
finally:
f.close()
@wrap_exceptions
def get_cpu_times(self):
# special case for 0 (kernel process) PID
if self.pid == 0:
return ntuple_cputimes(0.0, 0.0)
f = open("/proc/%s/stat" % self.pid)
try:
st = f.read().strip()
finally:
f.close()
# ignore the first two values ("pid (exe)")
st = st[st.find(')') + 2:]
values = st.split(' ')
utime = float(values[11]) / _CLOCK_TICKS
stime = float(values[12]) / _CLOCK_TICKS
return ntuple_cputimes(utime, stime)
@wrap_exceptions
def process_wait(self, timeout=None):
try:
return _psposix.wait_pid(self.pid, timeout)
except TimeoutExpired:
raise TimeoutExpired(self.pid, self._process_name)
@wrap_exceptions
def get_process_create_time(self):
# special case for 0 (kernel processes) PID; return system uptime
if self.pid == 0:
return BOOT_TIME
f = open("/proc/%s/stat" % self.pid)
try:
st = f.read().strip()
finally:
f.close()
# ignore the first two values ("pid (exe)")
st = st[st.find(')') + 2:]
values = st.split(' ')
# According to documentation, starttime is in field 21 and the
# unit is jiffies (clock ticks).
# We first divide it for clock ticks and then add uptime returning
# seconds since the epoch, in UTC.
starttime = (float(values[19]) / _CLOCK_TICKS) + BOOT_TIME
return starttime
@wrap_exceptions
def get_memory_info(self):
# special case for 0 (kernel processes) PID
if self.pid == 0:
return ntuple_meminfo(0, 0)
f = open("/proc/%s/status" % self.pid)
try:
virtual_size = 0
resident_size = 0
_flag = False
for line in f:
if (not _flag) and line.startswith("VmSize:"):
virtual_size = int(line.split()[1]) * 1024
_flag = True
elif line.startswith("VmRSS"):
resident_size = int(line.split()[1]) * 1024
break
return ntuple_meminfo(resident_size, virtual_size)
finally:
f.close()
@wrap_exceptions
def get_process_cwd(self):
if self.pid == 0:
raise AccessDenied(self.pid, self._process_name)
# readlink() might return paths containing null bytes causing
# problems when used with other fs-related functions (os.*,
# open(), ...)
path = os.readlink("/proc/%s/cwd" % self.pid)
return path.replace('\x00', '')
@wrap_exceptions
def get_process_num_threads(self):
if self.pid == 0:
return 0
f = open("/proc/%s/status" % self.pid)
try:
for line in f:
if line.startswith("Threads:"):
return int(line.split()[1])
raise RuntimeError("line not found")
finally:
f.close()
@wrap_exceptions
def get_process_threads(self):
if self.pid == 0:
return []
thread_ids = os.listdir("/proc/%s/task" % self.pid)
thread_ids.sort()
retlist = []
for thread_id in thread_ids:
try:
f = open("/proc/%s/task/%s/stat" % (self.pid, thread_id))
except (OSError, IOError), err:
if err.errno == errno.ENOENT:
# no such file or directory; it means thread
# disappeared on us
continue
raise
try:
st = f.read().strip()
finally:
f.close()
# ignore the first two values ("pid (exe)")
st = st[st.find(')') + 2:]
values = st.split(' ')
utime = float(values[11]) / _CLOCK_TICKS
stime = float(values[12]) / _CLOCK_TICKS
ntuple = ntuple_thread(int(thread_id), utime, stime)
retlist.append(ntuple)
return retlist
@wrap_exceptions
def get_process_nice(self):
#f = open('/proc/%s/stat' % self.pid, 'r')
#try:
# data = f.read()
# return int(data.split()[18])
#finally:
# f.close()
# Use C implementation
return _psutil_posix.getpriority(self.pid)
@wrap_exceptions
def set_process_nice(self, value):
return _psutil_posix.setpriority(self.pid, value)
# only starting from kernel 2.6.13
if hasattr(_psutil_linux, "ioprio_get"):
@wrap_exceptions
def get_process_ionice(self):
ioclass, value = _psutil_linux.ioprio_get(self.pid)
return ntuple_ionice(ioclass, value)
@wrap_exceptions
def set_process_ionice(self, ioclass, value):
if ioclass in (IOPRIO_CLASS_NONE, None):
if value:
raise ValueError("can't specify value with IOPRIO_CLASS_NONE")
ioclass = IOPRIO_CLASS_NONE
value = 0
if ioclass in (IOPRIO_CLASS_RT, IOPRIO_CLASS_BE):
if value is None:
value = 4
elif ioclass == IOPRIO_CLASS_IDLE:
if value:
raise ValueError("can't specify value with IOPRIO_CLASS_IDLE")
value = 0
else:
value = 0
if not 0 <= value <= 8:
raise ValueError("value argument range expected is between 0 and 8")
return _psutil_linux.ioprio_set(self.pid, ioclass, value)
@wrap_exceptions
def get_process_status(self):
if self.pid == 0:
return 0
f = open("/proc/%s/status" % self.pid)
try:
for line in f:
if line.startswith("State:"):
letter = line.split()[1]
if letter in _status_map:
return _status_map[letter]
return constant(-1, '?')
finally:
f.close()
@wrap_exceptions
def get_open_files(self):
if self.pid == 0:
return []
retlist = []
files = os.listdir("/proc/%s/fd" % self.pid)
for fd in files:
file = "/proc/%s/fd/%s" % (self.pid, fd)
if os.path.islink(file):
file = os.readlink(file)
if file.startswith("socket:["):
continue
if file.startswith("pipe:["):
continue
if file == "[]":
continue
if os.path.isfile(file) and not file in retlist:
ntuple = ntuple_openfile(file, int(fd))
retlist.append(ntuple)
return retlist
@wrap_exceptions
def get_connections(self):
if self.pid == 0:
return []
inodes = {}
# os.listdir() is gonna raise a lot of access denied
# exceptions in case of unprivileged user; that's fine:
# lsof does the same so it's unlikely that we can to better.
for fd in os.listdir("/proc/%s/fd" % self.pid):
try:
inode = os.readlink("/proc/%s/fd/%s" % (self.pid, fd))
except OSError:
continue
if inode.startswith('socket:['):
# the process is using a socket
inode = inode[8:][:-1]
inodes[inode] = fd
if not inodes:
# no connections for this process
return []
def process(file, family, _type):
retlist = []
f = open(file)
try:
f.readline() # skip the first line
for line in f:
_, laddr, raddr, status, _, _, _, _, _, inode = \
line.split()[:10]
if inode in inodes:
laddr = self._decode_address(laddr, family)
raddr = self._decode_address(raddr, family)
if _type == socket.SOCK_STREAM:
status = _TCP_STATES_TABLE[status]
else:
status = ""
fd = int(inodes[inode])
conn = ntuple_connection(fd, family, _type, laddr,
raddr, status)
retlist.append(conn)
return retlist
finally:
f.close()
tcp4 = process("/proc/net/tcp", socket.AF_INET, socket.SOCK_STREAM)
udp4 = process("/proc/net/udp", socket.AF_INET, socket.SOCK_DGRAM)
try:
tcp6 = process("/proc/net/tcp6", socket.AF_INET6, socket.SOCK_STREAM)
udp6 = process("/proc/net/udp6", socket.AF_INET6, socket.SOCK_DGRAM)
except IOError, err:
if err.errno == errno.ENOENT:
# IPv6 is not supported on this platform
tcp6 = udp6 = []
else:
raise
return tcp4 + tcp6 + udp4 + udp6
# --- lsof implementation
#
# def get_connections(self):
# lsof = _psposix.LsofParser(self.pid, self._process_name)
# return lsof.get_process_connections()
@wrap_exceptions
def get_process_ppid(self):
if self.pid == 0:
return 0
f = open("/proc/%s/status" % self.pid)
try:
for line in f:
if line.startswith("PPid:"):
# PPid: nnnn
return int(line.split()[1])
raise RuntimeError("line not found")
finally:
f.close()
@wrap_exceptions
def get_process_uids(self):
if self.pid == 0:
return ntuple_uids(0, 0, 0)
f = open("/proc/%s/status" % self.pid)
try:
for line in f:
if line.startswith('Uid:'):
_, real, effective, saved, fs = line.split()
return ntuple_uids(int(real), int(effective), int(saved))
raise RuntimeError("line not found")
finally:
f.close()
@wrap_exceptions
def get_process_gids(self):
if self.pid == 0:
return ntuple_uids(0, 0, 0)
f = open("/proc/%s/status" % self.pid)
try:
for line in f:
if line.startswith('Gid:'):
_, real, effective, saved, fs = line.split()
return ntuple_gids(int(real), int(effective), int(saved))
raise RuntimeError("line not found")
finally:
f.close()
@staticmethod
def _decode_address(addr, family):
"""Accept an "ip:port" address as displayed in /proc/net/*
and convert it into a human readable form, like:
"0500000A:0016" -> ("10.0.0.5", 22)
"0000000000000000FFFF00000100007F:9E49" -> ("::ffff:127.0.0.1", 40521)
The IP address portion is a little or big endian four-byte
hexadecimal number; that is, the least significant byte is listed
first, so we need to reverse the order of the bytes to convert it
to an IP address.
The port is represented as a two-byte hexadecimal number.
Reference:
http://linuxdevcenter.com/pub/a/linux/2000/11/16/LinuxAdmin.html
"""
ip, port = addr.split(':')
port = int(port, 16)
if sys.version_info >= (3,):
ip = ip.encode('ascii')
# this usually refers to a local socket in listen mode with
# no end-points connected
if not port:
return ()
if family == socket.AF_INET:
# see: http://code.google.com/p/psutil/issues/detail?id=201
if sys.byteorder == 'little':
ip = socket.inet_ntop(family, base64.b16decode(ip)[::-1])
else:
ip = socket.inet_ntop(family, base64.b16decode(ip))
else: # IPv6
# old version - let's keep it, just in case...
#ip = ip.decode('hex')
#return socket.inet_ntop(socket.AF_INET6,
# ''.join(ip[i:i+4][::-1] for i in xrange(0, 16, 4)))
ip = base64.b16decode(ip)
# see: http://code.google.com/p/psutil/issues/detail?id=201
if sys.byteorder == 'little':
ip = socket.inet_ntop(socket.AF_INET6,
struct.pack('>4I', *struct.unpack('<4I', ip)))
else:
ip = socket.inet_ntop(socket.AF_INET6,
struct.pack('<4I', *struct.unpack('<4I', ip)))
return (ip, port)
| |
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from __future__ import absolute_import, print_function
import os
import unittest2 as unittest
from txaio.testutil import replace_loop
if os.environ.get('USE_TWISTED', False):
from mock import patch
from zope.interface import implementer
from twisted.internet.interfaces import IReactorTime
@implementer(IReactorTime)
class FakeReactor(object):
'''
This just fakes out enough reactor methods so .run() can work.
'''
stop_called = False
def __init__(self, to_raise):
self.stop_called = False
self.to_raise = to_raise
self.delayed = []
def run(self, *args, **kw):
raise self.to_raise
def stop(self):
self.stop_called = True
def callLater(self, delay, func, *args, **kwargs):
self.delayed.append((delay, func, args, kwargs))
def connectTCP(self, *args, **kw):
raise RuntimeError("ConnectTCP shouldn't get called")
class TestWampTwistedRunner(unittest.TestCase):
# XXX should figure out *why* but the test_protocol timeout
# tests fail if we *don't* patch out this txaio stuff. So,
# presumably it's messing up some global state that both tests
# implicitly depend on ...
@patch('txaio.use_twisted')
@patch('txaio.start_logging')
@patch('txaio.config')
def test_connect_error(self, *args):
'''
Ensure the runner doesn't swallow errors and that it exits the
reactor properly if there is one.
'''
try:
from autobahn.twisted.wamp import ApplicationRunner
from twisted.internet.error import ConnectionRefusedError
# the 'reactor' member doesn't exist until we import it
from twisted.internet import reactor # noqa: F401
except ImportError:
raise unittest.SkipTest('No twisted')
runner = ApplicationRunner(u'ws://localhost:1', u'realm')
exception = ConnectionRefusedError("It's a trap!")
with patch('twisted.internet.reactor', FakeReactor(exception)) as mockreactor:
self.assertRaises(
ConnectionRefusedError,
# pass a no-op session-creation method
runner.run, lambda _: None, start_reactor=True
)
self.assertTrue(mockreactor.stop_called)
else:
# Asyncio tests.
try:
import asyncio
from unittest.mock import patch, Mock
except ImportError:
# Trollius >= 0.3 was renamed to asyncio
# noinspection PyUnresolvedReferences
import trollius as asyncio
from mock import patch, Mock
from autobahn.asyncio.wamp import ApplicationRunner
class TestApplicationRunner(unittest.TestCase):
'''
Test the autobahn.asyncio.wamp.ApplicationRunner class.
'''
def _assertRaisesRegex(self, exception, error, *args, **kw):
try:
self.assertRaisesRegex
except AttributeError:
f = self.assertRaisesRegexp
else:
f = self.assertRaisesRegex
f(exception, error, *args, **kw)
def test_explicit_SSLContext(self):
'''
Ensure that loop.create_connection is called with the exact SSL
context object that is passed (as ssl) to the __init__ method of
ApplicationRunner.
'''
with replace_loop(Mock()) as loop:
with patch.object(asyncio, 'get_event_loop', return_value=loop):
loop.run_until_complete = Mock(return_value=(Mock(), Mock()))
ssl = {}
runner = ApplicationRunner(u'ws://127.0.0.1:8080/ws', u'realm',
ssl=ssl)
runner.run('_unused_')
self.assertIs(ssl, loop.create_connection.call_args[1]['ssl'])
def test_omitted_SSLContext_insecure(self):
'''
Ensure that loop.create_connection is called with ssl=False
if no ssl argument is passed to the __init__ method of
ApplicationRunner and the websocket URL starts with "ws:".
'''
with replace_loop(Mock()) as loop:
with patch.object(asyncio, 'get_event_loop', return_value=loop):
loop.run_until_complete = Mock(return_value=(Mock(), Mock()))
runner = ApplicationRunner(u'ws://127.0.0.1:8080/ws', u'realm')
runner.run('_unused_')
self.assertIs(False, loop.create_connection.call_args[1]['ssl'])
def test_omitted_SSLContext_secure(self):
'''
Ensure that loop.create_connection is called with ssl=True
if no ssl argument is passed to the __init__ method of
ApplicationRunner and the websocket URL starts with "wss:".
'''
with replace_loop(Mock()) as loop:
with patch.object(asyncio, 'get_event_loop', return_value=loop):
loop.run_until_complete = Mock(return_value=(Mock(), Mock()))
runner = ApplicationRunner(u'wss://127.0.0.1:8080/wss', u'realm')
runner.run(self.fail)
self.assertIs(True, loop.create_connection.call_args[1]['ssl'])
def test_conflict_SSL_True_with_ws_url(self):
'''
ApplicationRunner must raise an exception if given an ssl value of True
but only a "ws:" URL.
'''
with replace_loop(Mock()) as loop:
loop.run_until_complete = Mock(return_value=(Mock(), Mock()))
runner = ApplicationRunner(u'ws://127.0.0.1:8080/wss', u'realm',
ssl=True)
error = ('^ssl argument value passed to ApplicationRunner '
'conflicts with the "ws:" prefix of the url '
'argument\. Did you mean to use "wss:"\?$')
self._assertRaisesRegex(Exception, error, runner.run, '_unused_')
def test_conflict_SSLContext_with_ws_url(self):
'''
ApplicationRunner must raise an exception if given an ssl value that is
an instance of SSLContext, but only a "ws:" URL.
'''
import ssl
try:
# Try to create an SSLContext, to be as rigorous as we can be
# by avoiding making assumptions about the ApplicationRunner
# implementation. If we happen to be on a Python that has no
# SSLContext, we pass ssl=True, which will simply cause this
# test to degenerate to the behavior of
# test_conflict_SSL_True_with_ws_url (above). In fact, at the
# moment (2015-05-10), none of this matters because the
# ApplicationRunner implementation does not check to require
# that its ssl argument is either a bool or an SSLContext. But
# that may change, so we should be careful.
ssl.create_default_context
except AttributeError:
context = True
else:
context = ssl.create_default_context()
with replace_loop(Mock()) as loop:
loop.run_until_complete = Mock(return_value=(Mock(), Mock()))
runner = ApplicationRunner(u'ws://127.0.0.1:8080/wss', u'realm',
ssl=context)
error = ('^ssl argument value passed to ApplicationRunner '
'conflicts with the "ws:" prefix of the url '
'argument\. Did you mean to use "wss:"\?$')
self._assertRaisesRegex(Exception, error, runner.run, '_unused_')
| |
#===============================================================================
# Copyright 2007 Matt Chaput
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
from array import array
from whoosh.fields import UnknownFieldError
from whoosh.store import LockError
from whoosh.writing import IndexWriter
from whoosh.filedb import postpool
from whoosh.support.filelock import try_for
from whoosh.filedb.fileindex import SegmentDeletionMixin, Segment, SegmentSet
from whoosh.filedb.filepostings import FilePostingWriter
from whoosh.filedb.filetables import (FileTableWriter, FileListWriter,
FileRecordWriter, encode_termkey,
encode_vectorkey, encode_terminfo,
enpickle, packint)
from whoosh.util import fib
from whoosh.util.collections2 import defaultdict
DOCLENGTH_TYPE = "H"
DOCLENGTH_LIMIT = 2 ** 16 - 1
# Merge policies
# A merge policy is a callable that takes the Index object, the SegmentWriter
# object, and the current SegmentSet (not including the segment being written),
# and returns an updated SegmentSet (not including the segment being written).
def NO_MERGE(ix, writer, segments):
"""This policy does not merge any existing segments.
"""
return segments
def MERGE_SMALL(ix, writer, segments):
"""This policy merges small segments, where "small" is defined using a
heuristic based on the fibonacci sequence.
"""
from whoosh.filedb.filereading import SegmentReader
newsegments = SegmentSet()
sorted_segment_list = sorted((s.doc_count_all(), s) for s in segments)
total_docs = 0
for i, (count, seg) in enumerate(sorted_segment_list):
if count > 0:
total_docs += count
if total_docs < fib(i + 5):
writer.add_reader(SegmentReader(ix.storage, seg, ix.schema))
else:
newsegments.append(seg)
return newsegments
def OPTIMIZE(ix, writer, segments):
"""This policy merges all existing segments.
"""
from whoosh.filedb.filereading import SegmentReader
for seg in segments:
writer.add_reader(SegmentReader(ix.storage, seg, ix.schema))
return SegmentSet()
# Convenience functions
def create_terms(storage, segment):
termfile = storage.create_file(segment.term_filename)
return FileTableWriter(termfile,
keycoder=encode_termkey,
valuecoder=encode_terminfo)
def create_storedfields(storage, segment):
listfile = storage.create_file(segment.docs_filename)
return FileListWriter(listfile, valuecoder=enpickle)
def create_vectors(storage, segment):
vectorfile = storage.create_file(segment.vector_filename)
return FileTableWriter(vectorfile, keycoder=encode_vectorkey,
valuecoder=packint)
def create_doclengths(storage, segment, fieldcount):
recordformat = "!" + DOCLENGTH_TYPE * fieldcount
recordfile = storage.create_file(segment.doclen_filename)
return FileRecordWriter(recordfile, recordformat)
# Writing classes
class FileIndexWriter(SegmentDeletionMixin, IndexWriter):
# This class is mostly a shell for SegmentWriter. It exists to handle
# multiple SegmentWriters during merging/optimizing.
def __init__(self, ix, postlimit=32 * 1024 * 1024, blocklimit=128,
timeout=0.0, delay=0.1):
"""
:param ix: the Index object you want to write to.
:param postlimit: Essentially controls the maximum amount of memory the
indexer uses at a time, in bytes (the actual amount of memory used
by the Python process will be much larger because of other
overhead). The default (32MB) is a bit small. You may want to
increase this value for very large collections, e.g.
``postlimit=256*1024*1024``.
"""
self.lock = ix.storage.lock(ix.indexname + "_LOCK")
if not try_for(self.lock.acquire, timeout=timeout, delay=delay):
raise LockError("Index %s is already locked for writing")
self.index = ix
self.segments = ix.segments.copy()
self.postlimit = postlimit
self.blocklimit = blocklimit
self._segment_writer = None
self._searcher = ix.searcher()
def _finish(self):
self._close_reader()
self.lock.release()
self._segment_writer = None
def segment_writer(self):
"""Returns the underlying SegmentWriter object.
"""
if not self._segment_writer:
self._segment_writer = SegmentWriter(self.index, self.postlimit,
self.blocklimit)
return self._segment_writer
def add_document(self, **fields):
self.segment_writer().add_document(fields)
def commit(self, mergetype=MERGE_SMALL):
"""Finishes writing and unlocks the index.
:param mergetype: How to merge existing segments. One of
:class:`whoosh.filedb.filewriting.NO_MERGE`,
:class:`whoosh.filedb.filewriting.MERGE_SMALL`,
or :class:`whoosh.filedb.filewriting.OPTIMIZE`.
"""
self._close_reader()
if self._segment_writer or mergetype is OPTIMIZE:
self._merge_segments(mergetype)
self.index.commit(self.segments)
self._finish()
def cancel(self):
if self._segment_writer:
self._segment_writer._close_all()
self._finish()
def _merge_segments(self, mergetype):
sw = self.segment_writer()
new_segments = mergetype(self.index, sw, self.segments)
sw.close()
new_segments.append(sw.segment())
self.segments = new_segments
class SegmentWriter(object):
"""Do not instantiate this object directly; it is created by the
IndexWriter object.
Handles the actual writing of new documents to the index: writes stored
fields, handles the posting pool, and writes out the term index.
"""
def __init__(self, ix, postlimit, blocklimit, name=None):
"""
:param ix: the Index object in which to write the new segment.
:param postlimit: the maximum size for a run in the posting pool.
:param blocklimit: the maximum number of postings in a posting block.
:param name: the name of the segment.
"""
self.index = ix
self.schema = ix.schema
self.storage = storage = ix.storage
self.name = name or ix._next_segment_name()
self.max_doc = 0
self.pool = postpool.PostingPool(postlimit)
# Create mappings of field numbers to the position of that field in the
# lists of scorable and stored fields. For example, consider a schema
# with fields (A, B, C, D, E, F). If B, D, and E are scorable, then the
# list of scorable fields is (B, D, E). The _scorable_to_pos dictionary
# would then map B -> 0, D -> 1, and E -> 2.
self._scorable_to_pos = dict((fnum, i)
for i, fnum
in enumerate(self.schema.scorable_fields()))
self._stored_to_pos = dict((fnum, i)
for i, fnum
in enumerate(self.schema.stored_fields()))
# Create a temporary segment object just so we can access its
# *_filename attributes (so if we want to change the naming convention,
# we only have to do it in one place).
tempseg = Segment(self.name, 0, 0, None)
self.termtable = create_terms(storage, tempseg)
self.docslist = create_storedfields(storage, tempseg)
self.doclengths = None
if self.schema.scorable_fields():
self.doclengths = create_doclengths(storage, tempseg, len(self._scorable_to_pos))
postfile = storage.create_file(tempseg.posts_filename)
self.postwriter = FilePostingWriter(postfile, blocklimit=blocklimit)
self.vectortable = None
if self.schema.has_vectored_fields():
# Table associating document fields with (postoffset, postcount)
self.vectortable = create_vectors(storage, tempseg)
vpostfile = storage.create_file(tempseg.vectorposts_filename)
self.vpostwriter = FilePostingWriter(vpostfile, stringids=True)
# Keep track of the total number of tokens (across all docs)
# in each field
self.field_length_totals = defaultdict(int)
def segment(self):
"""Returns an index.Segment object for the segment being written."""
return Segment(self.name, self.max_doc, dict(self.field_length_totals))
def _close_all(self):
self.termtable.close()
self.postwriter.close()
self.docslist.close()
if self.doclengths:
self.doclengths.close()
if self.vectortable:
self.vectortable.close()
self.vpostwriter.close()
def close(self):
"""Finishes writing the segment (flushes the posting pool out to disk)
and closes all open files.
"""
self._flush_pool()
self._close_all()
def add_reader(self, reader):
"""Adds the contents of another segment to this one. This is used to
merge existing segments into the new one before deleting them.
:param ix: The index.Index object containing the segment to merge.
:param segment: The index.Segment object to merge into this one.
"""
start_doc = self.max_doc
has_deletions = reader.has_deletions()
if has_deletions:
doc_map = {}
schema = self.schema
name2num = schema.name_to_number
stored_to_pos = self._stored_to_pos
def storedkeyhelper(item):
return stored_to_pos[name2num(item[0])]
# Merge document info
docnum = 0
vectored_fieldnums = schema.vectored_fields()
for docnum in xrange(reader.doc_count_all()):
if not reader.is_deleted(docnum):
# Copy the stored fields and field lengths from the reader
# into this segment
storeditems = reader.stored_fields(docnum).items()
storedvalues = [v for k, v
in sorted(storeditems, key=storedkeyhelper)]
self._add_doc_data(storedvalues,
reader.doc_field_lengths(docnum))
if has_deletions:
doc_map[docnum] = self.max_doc
# Copy term vectors
for fieldnum in vectored_fieldnums:
if reader.has_vector(docnum, fieldnum):
self._add_vector(fieldnum,
reader.vector(docnum, fieldnum).items())
self.max_doc += 1
# Add field length totals
for fieldnum in schema.scorable_fields():
self.field_length_totals[fieldnum] += reader.field_length(fieldnum)
# Merge terms
current_fieldnum = None
decoder = None
for fieldnum, text, _, _ in reader:
if fieldnum != current_fieldnum:
current_fieldnum = fieldnum
decoder = schema[fieldnum].format.decode_frequency
postreader = reader.postings(fieldnum, text)
for docnum, valuestring in postreader.all_items():
if has_deletions:
newdoc = doc_map[docnum]
else:
newdoc = start_doc + docnum
# TODO: Is there a faster way to do this?
freq = decoder(valuestring)
self.pool.add_posting(fieldnum, text, newdoc, freq, valuestring)
def add_document(self, fields):
scorable_to_pos = self._scorable_to_pos
stored_to_pos = self._stored_to_pos
schema = self.schema
# Sort the keys by their order in the schema
fieldnames = [name for name in fields.keys()
if not name.startswith("_")]
fieldnames.sort(key=schema.name_to_number)
# Check if the caller gave us a bogus field
for name in fieldnames:
if name not in schema:
raise UnknownFieldError("There is no field named %r" % name)
# Create an array of counters to record the length of each field
fieldlengths = array(DOCLENGTH_TYPE, [0] * len(scorable_to_pos))
# Create a list (initially a list of Nones) in which we will put stored
# field values as we get them. Why isn't this an empty list that we
# append to? Because if the caller doesn't supply a value for a stored
# field, we don't want to have a list in the wrong order/of the wrong
# length.
storedvalues = [None] * len(stored_to_pos)
for name in fieldnames:
value = fields.get(name)
if value:
fieldnum = schema.name_to_number(name)
field = schema.field_by_number(fieldnum)
# If the field is indexed, add the words in the value to the
# index
if field.indexed:
# Count of all terms in the value
count = 0
# Count of UNIQUE terms in the value
unique = 0
# TODO: Method for adding progressive field values, ie
# setting start_pos/start_char?
for w, freq, valuestring in field.index(value):
#assert w != ""
self.pool.add_posting(fieldnum, w, self.max_doc, freq,
valuestring)
count += freq
unique += 1
if field.scorable:
# Add the term count to the total for this field
self.field_length_totals[fieldnum] += count
# Set the term count to the per-document field length
pos = scorable_to_pos[fieldnum]
fieldlengths[pos] = min(count, DOCLENGTH_LIMIT)
# If the field is vectored, add the words in the value to the
# vector table
vector = field.vector
if vector:
# TODO: Method for adding progressive field values, ie
# setting start_pos/start_char?
vlist = sorted((w, valuestring) for w, freq, valuestring
in vector.word_values(value, mode="index"))
self._add_vector(fieldnum, vlist)
# If the field is stored, put the value in storedvalues
if field.stored:
# Caller can override the stored value by including a key
# _stored_<fieldname>
storedname = "_stored_" + name
if storedname in fields:
stored_value = fields[storedname]
else :
stored_value = value
storedvalues[stored_to_pos[fieldnum]] = stored_value
self._add_doc_data(storedvalues, fieldlengths)
self.max_doc += 1
def _add_terms(self):
pass
def _add_doc_data(self, storedvalues, fieldlengths):
self.docslist.append(storedvalues)
if self.doclengths:
self.doclengths.append(fieldlengths)
def _add_vector(self, fieldnum, vlist):
vpostwriter = self.vpostwriter
vformat = self.schema[fieldnum].vector
offset = vpostwriter.start(vformat)
for text, valuestring in vlist:
assert isinstance(text, unicode), "%r is not unicode" % text
vpostwriter.write(text, valuestring)
vpostwriter.finish()
self.vectortable.add((self.max_doc, fieldnum), offset)
def _flush_pool(self):
# This method pulls postings out of the posting pool (built up as
# documents are added) and writes them to the posting file. Each time
# it encounters a posting for a new term, it writes the previous term
# to the term index (by waiting to write the term entry, we can easily
# count the document frequency and sum the terms by looking at the
# postings).
termtable = self.termtable
postwriter = self.postwriter
schema = self.schema
current_fieldnum = None # Field number of the current term
current_text = None # Text of the current term
first = True
current_freq = 0
offset = None
# Loop through the postings in the pool. Postings always come out of
# the pool in (field number, lexical) order.
for fieldnum, text, docnum, freq, valuestring in self.pool:
# Is this the first time through, or is this a new term?
if first or fieldnum > current_fieldnum or text > current_text:
if first:
first = False
else:
# This is a new term, so finish the postings and add the
# term to the term table
postcount = postwriter.finish()
termtable.add((current_fieldnum, current_text),
(current_freq, offset, postcount))
# Reset the post writer and the term variables
current_fieldnum = fieldnum
current_text = text
current_freq = 0
offset = postwriter.start(schema[fieldnum].format)
elif (fieldnum < current_fieldnum
or (fieldnum == current_fieldnum and text < current_text)):
# This should never happen!
raise Exception("Postings are out of order: %s:%s .. %s:%s" %
(current_fieldnum, current_text, fieldnum, text))
# Write a posting for this occurrence of the current term
current_freq += freq
postwriter.write(docnum, valuestring)
# If there are still "uncommitted" postings at the end, finish them off
if not first:
postcount = postwriter.finish()
termtable.add((current_fieldnum, current_text),
(current_freq, offset, postcount))
| |
# Copyright 2013 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import re
import sys
import fixtures
from jsonschema import exceptions as jsonschema_exc
import six
from nova.api.openstack import api_version_request as api_version
from nova.api import validation
from nova.api.validation import parameter_types
from nova.api.validation import validators
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
query_schema = {
'type': 'object',
'properties': {
'foo': parameter_types.single_param({'type': 'string',
'format': 'uuid'}),
'foos': parameter_types.multi_params({'type': 'string'})
},
'patternProperties': {
"^_": parameter_types.multi_params({'type': 'string'})},
'additionalProperties': True
}
class FakeQueryParametersController(object):
@validation.query_schema(query_schema, '2.3')
def get(self, req):
return list(set(req.GET.keys()))
class RegexFormatFakeController(object):
schema = {
'type': 'object',
'properties': {
'foo': {
'format': 'regex',
},
},
}
@validation.schema(request_body_schema=schema)
def post(self, req, body):
return 'Validation succeeded.'
class FakeRequest(object):
api_version_request = api_version.APIVersionRequest("2.1")
environ = {}
legacy_v2 = False
def is_legacy_v2(self):
return self.legacy_v2
class ValidationRegex(test.NoDBTestCase):
def test_cell_names(self):
cellre = re.compile(parameter_types.valid_cell_name_regex.regex)
self.assertTrue(cellre.search('foo'))
self.assertFalse(cellre.search('foo.bar'))
self.assertFalse(cellre.search('foo@bar'))
self.assertFalse(cellre.search('foo!bar'))
self.assertFalse(cellre.search(' foo!bar'))
self.assertFalse(cellre.search('\nfoo!bar'))
def test_build_regex_range(self):
# this is much easier to think about if we only use the ascii
# subset because it's a printable range we can think
# about. The algorithm works for all ranges.
def _get_all_chars():
for i in range(0x7F):
yield six.unichr(i)
self.useFixture(fixtures.MonkeyPatch(
'nova.api.validation.parameter_types._get_all_chars',
_get_all_chars))
# note that since we use only the ascii range in the tests
# we have to clear the cache to recompute them.
parameter_types._reset_cache()
r = parameter_types._build_regex_range(ws=False)
self.assertEqual(r, re.escape('!') + '-' + re.escape('~'))
# if we allow whitespace the range starts earlier
r = parameter_types._build_regex_range(ws=True)
self.assertEqual(r, re.escape(' ') + '-' + re.escape('~'))
# excluding a character will give us 2 ranges
r = parameter_types._build_regex_range(ws=True, exclude=['A'])
self.assertEqual(r,
re.escape(' ') + '-' + re.escape('@') +
'B' + '-' + re.escape('~'))
# inverting which gives us all the initial unprintable characters.
r = parameter_types._build_regex_range(ws=False, invert=True)
self.assertEqual(r,
re.escape('\x00') + '-' + re.escape(' '))
# excluding characters that create a singleton. Naively this would be:
# ' -@B-BD-~' which seems to work, but ' -@BD-~' is more natural.
r = parameter_types._build_regex_range(ws=True, exclude=['A', 'C'])
self.assertEqual(r,
re.escape(' ') + '-' + re.escape('@') +
'B' + 'D' + '-' + re.escape('~'))
# ws=True means the positive regex has printable whitespaces,
# so the inverse will not. The inverse will include things we
# exclude.
r = parameter_types._build_regex_range(
ws=True, exclude=['A', 'B', 'C', 'Z'], invert=True)
self.assertEqual(r,
re.escape('\x00') + '-' + re.escape('\x1f') + 'A-CZ')
class APIValidationTestCase(test.NoDBTestCase):
post_schema = None
def setUp(self):
super(APIValidationTestCase, self).setUp()
self.post = None
if self.post_schema is not None:
@validation.schema(request_body_schema=self.post_schema)
def post(req, body):
return 'Validation succeeded.'
self.post = post
def check_validation_error(self, method, body, expected_detail, req=None):
if not req:
req = FakeRequest()
try:
method(body=body, req=req)
except exception.ValidationError as ex:
self.assertEqual(400, ex.kwargs['code'])
if isinstance(expected_detail, list):
self.assertIn(ex.kwargs['detail'], expected_detail,
'Exception details did not match expected')
elif not re.match(expected_detail, ex.kwargs['detail']):
self.assertEqual(expected_detail, ex.kwargs['detail'],
'Exception details did not match expected')
except Exception as ex:
self.fail('An unexpected exception happens: %s' % ex)
else:
self.fail('Any exception does not happen.')
class FormatCheckerTestCase(test.NoDBTestCase):
def _format_checker(self, format, value, error_message):
format_checker = validators.FormatChecker()
exc = self.assertRaises(jsonschema_exc.FormatError,
format_checker.check, value, format)
self.assertIsInstance(exc.cause, exception.InvalidName)
self.assertEqual(error_message,
exc.cause.format_message())
def test_format_checker_failed_with_non_string_name(self):
error_message = ("An invalid 'name' value was provided. The name must "
"be: printable characters. "
"Can not start or end with whitespace.")
self._format_checker("name", " ", error_message)
self._format_checker("name", None, error_message)
def test_format_checker_failed_with_non_string_cell_name(self):
error_message = ("An invalid 'name' value was provided. "
"The name must be: printable characters except "
"!, ., @. Can not start or end with whitespace.")
self._format_checker("cell_name", None, error_message)
def test_format_checker_failed_name_with_leading_trailing_spaces(self):
error_message = ("An invalid 'name' value was provided. "
"The name must be: printable characters with at "
"least one non space character")
self._format_checker("name_with_leading_trailing_spaces",
None, error_message)
def test_format_checker_failed_cell_name_with_leading_trailing_spaces(
self):
error_message = ("An invalid 'name' value was provided. "
"The name must be: printable characters except"
" !, ., @, with at least one non space character")
self._format_checker("cell_name_with_leading_trailing_spaces",
None, error_message)
class MicroversionsSchemaTestCase(APIValidationTestCase):
def setUp(self):
super(MicroversionsSchemaTestCase, self).setUp()
schema_v21_int = {
'type': 'object',
'properties': {
'foo': {
'type': 'integer',
}
}
}
schema_v20_str = copy.deepcopy(schema_v21_int)
schema_v20_str['properties']['foo'] = {'type': 'string'}
@validation.schema(schema_v20_str, '2.0', '2.0')
@validation.schema(schema_v21_int, '2.1')
def post(req, body):
return 'Validation succeeded.'
self.post = post
def test_validate_v2compatible_request(self):
req = FakeRequest()
req.legacy_v2 = True
self.assertEqual(self.post(body={'foo': 'bar'}, req=req),
'Validation succeeded.')
detail = ("Invalid input for field/attribute foo. Value: 1. "
"1 is not of type 'string'")
self.check_validation_error(self.post, body={'foo': 1},
expected_detail=detail, req=req)
def test_validate_v21_request(self):
req = FakeRequest()
self.assertEqual(self.post(body={'foo': 1}, req=req),
'Validation succeeded.')
detail = ("Invalid input for field/attribute foo. Value: bar. "
"'bar' is not of type 'integer'")
self.check_validation_error(self.post, body={'foo': 'bar'},
expected_detail=detail, req=req)
def test_validate_v2compatible_request_with_none_min_version(self):
schema_none = {
'type': 'object',
'properties': {
'foo': {
'type': 'integer'
}
}
}
@validation.schema(schema_none)
def post(req, body):
return 'Validation succeeded.'
req = FakeRequest()
req.legacy_v2 = True
self.assertEqual('Validation succeeded.',
post(body={'foo': 1}, req=req))
detail = ("Invalid input for field/attribute foo. Value: bar. "
"'bar' is not of type 'integer'")
self.check_validation_error(post, body={'foo': 'bar'},
expected_detail=detail, req=req)
class QueryParamsSchemaTestCase(test.NoDBTestCase):
def setUp(self):
super(QueryParamsSchemaTestCase, self).setUp()
self.controller = FakeQueryParametersController()
def test_validate_request(self):
req = fakes.HTTPRequest.blank("/tests?foo=%s" % fakes.FAKE_UUID)
req.api_version_request = api_version.APIVersionRequest("2.3")
self.assertEqual(['foo'], self.controller.get(req))
def test_validate_request_failed(self):
# parameter 'foo' expect a UUID
req = fakes.HTTPRequest.blank("/tests?foo=abc")
req.api_version_request = api_version.APIVersionRequest("2.3")
ex = self.assertRaises(exception.ValidationError, self.controller.get,
req)
if six.PY3:
self.assertEqual("Invalid input for query parameters foo. Value: "
"abc. 'abc' is not a 'uuid'", six.text_type(ex))
else:
self.assertEqual("Invalid input for query parameters foo. Value: "
"abc. u'abc' is not a 'uuid'", six.text_type(ex))
def test_validate_request_with_multiple_values(self):
req = fakes.HTTPRequest.blank("/tests?foos=abc")
req.api_version_request = api_version.APIVersionRequest("2.3")
self.assertEqual(['foos'], self.controller.get(req))
req = fakes.HTTPRequest.blank("/tests?foos=abc&foos=def")
self.assertEqual(['foos'], self.controller.get(req))
def test_validate_request_with_multiple_values_fails(self):
req = fakes.HTTPRequest.blank(
"/tests?foo=%s&foo=%s" % (fakes.FAKE_UUID, fakes.FAKE_UUID))
req.api_version_request = api_version.APIVersionRequest("2.3")
self.assertRaises(exception.ValidationError, self.controller.get, req)
def test_validate_request_unicode_decode_failure(self):
req = fakes.HTTPRequest.blank("/tests?foo=%88")
req.api_version_request = api_version.APIVersionRequest("2.1")
ex = self.assertRaises(
exception.ValidationError, self.controller.get, req)
self.assertIn("Query string is not UTF-8 encoded", six.text_type(ex))
def test_strip_out_additional_properties(self):
req = fakes.HTTPRequest.blank(
"/tests?foos=abc&foo=%s&bar=123&-bar=456" % fakes.FAKE_UUID)
req.api_version_request = api_version.APIVersionRequest("2.3")
res = self.controller.get(req)
res.sort()
self.assertEqual(['foo', 'foos'], res)
def test_no_strip_out_additional_properties_when_not_match_version(self):
req = fakes.HTTPRequest.blank(
"/tests?foos=abc&foo=%s&bar=123&bar=456" % fakes.FAKE_UUID)
# The JSON-schema matches to the API version 2.3 and above. Request
# with version 2.1 to ensure there isn't no strip out for additional
# parameters when schema didn't match the request version.
req.api_version_request = api_version.APIVersionRequest("2.1")
res = self.controller.get(req)
res.sort()
self.assertEqual(['bar', 'foo', 'foos'], res)
def test_strip_out_correct_pattern_retained(self):
req = fakes.HTTPRequest.blank(
"/tests?foos=abc&foo=%s&bar=123&_foo_=456" % fakes.FAKE_UUID)
req.api_version_request = api_version.APIVersionRequest("2.3")
res = self.controller.get(req)
res.sort()
self.assertEqual(['_foo_', 'foo', 'foos'], res)
class RequiredDisableTestCase(APIValidationTestCase):
post_schema = {
'type': 'object',
'properties': {
'foo': {
'type': 'integer',
},
},
}
def test_validate_required_disable(self):
self.assertEqual(self.post(body={'foo': 1}, req=FakeRequest()),
'Validation succeeded.')
self.assertEqual(self.post(body={'abc': 1}, req=FakeRequest()),
'Validation succeeded.')
class RequiredEnableTestCase(APIValidationTestCase):
post_schema = {
'type': 'object',
'properties': {
'foo': {
'type': 'integer',
},
},
'required': ['foo']
}
def test_validate_required_enable(self):
self.assertEqual(self.post(body={'foo': 1},
req=FakeRequest()), 'Validation succeeded.')
def test_validate_required_enable_fails(self):
detail = "'foo' is a required property"
self.check_validation_error(self.post, body={'abc': 1},
expected_detail=detail)
class AdditionalPropertiesEnableTestCase(APIValidationTestCase):
post_schema = {
'type': 'object',
'properties': {
'foo': {
'type': 'integer',
},
},
'required': ['foo'],
}
def test_validate_additionalProperties_enable(self):
self.assertEqual(self.post(body={'foo': 1}, req=FakeRequest()),
'Validation succeeded.')
self.assertEqual(self.post(body={'foo': 1, 'ext': 1},
req=FakeRequest()),
'Validation succeeded.')
class AdditionalPropertiesDisableTestCase(APIValidationTestCase):
post_schema = {
'type': 'object',
'properties': {
'foo': {
'type': 'integer',
},
},
'required': ['foo'],
'additionalProperties': False,
}
def test_validate_additionalProperties_disable(self):
self.assertEqual(self.post(body={'foo': 1}, req=FakeRequest()),
'Validation succeeded.')
def test_validate_additionalProperties_disable_fails(self):
detail = "Additional properties are not allowed ('ext' was unexpected)"
self.check_validation_error(self.post, body={'foo': 1, 'ext': 1},
expected_detail=detail)
class PatternPropertiesTestCase(APIValidationTestCase):
post_schema = {
'patternProperties': {
'^[a-zA-Z0-9]{1,10}$': {
'type': 'string'
},
},
'additionalProperties': False,
}
def test_validate_patternProperties(self):
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'bar'}, req=FakeRequest()))
def test_validate_patternProperties_fails(self):
details = [
"Additional properties are not allowed ('__' was unexpected)",
"'__' does not match any of the regexes: '^[a-zA-Z0-9]{1,10}$'"
]
self.check_validation_error(self.post, body={'__': 'bar'},
expected_detail=details)
details = [
"'' does not match any of the regexes: '^[a-zA-Z0-9]{1,10}$'",
"Additional properties are not allowed ('' was unexpected)"
]
self.check_validation_error(self.post, body={'': 'bar'},
expected_detail=details)
details = [
("'0123456789a' does not match any of the regexes: "
"'^[a-zA-Z0-9]{1,10}$'"),
("Additional properties are not allowed ('0123456789a' was"
" unexpected)")
]
self.check_validation_error(self.post, body={'0123456789a': 'bar'},
expected_detail=details)
# Note(jrosenboom): This is referencing an internal python error
# string, which is no stable interface. We need a patch in the
# jsonschema library in order to fix this properly.
if sys.version[:3] in ['3.5', '3.6', '3.7']:
detail = "expected string or bytes-like object"
else:
detail = "expected string or buffer"
self.check_validation_error(self.post, body={None: 'bar'},
expected_detail=detail)
class StringTestCase(APIValidationTestCase):
post_schema = {
'type': 'object',
'properties': {
'foo': {
'type': 'string',
},
},
}
def test_validate_string(self):
self.assertEqual(self.post(body={'foo': 'abc'}, req=FakeRequest()),
'Validation succeeded.')
self.assertEqual(self.post(body={'foo': '0'}, req=FakeRequest()),
'Validation succeeded.')
self.assertEqual(self.post(body={'foo': ''}, req=FakeRequest()),
'Validation succeeded.')
def test_validate_string_fails(self):
detail = ("Invalid input for field/attribute foo. Value: 1."
" 1 is not of type 'string'")
self.check_validation_error(self.post, body={'foo': 1},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: 1.5."
" 1.5 is not of type 'string'")
self.check_validation_error(self.post, body={'foo': 1.5},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: True."
" True is not of type 'string'")
self.check_validation_error(self.post, body={'foo': True},
expected_detail=detail)
class StringLengthTestCase(APIValidationTestCase):
post_schema = {
'type': 'object',
'properties': {
'foo': {
'type': 'string',
'minLength': 1,
'maxLength': 10,
},
},
}
def test_validate_string_length(self):
self.assertEqual(self.post(body={'foo': '0'}, req=FakeRequest()),
'Validation succeeded.')
self.assertEqual(self.post(body={'foo': '0123456789'},
req=FakeRequest()),
'Validation succeeded.')
def test_validate_string_length_fails(self):
detail = ("Invalid input for field/attribute foo. Value: ."
" '' is too short")
self.check_validation_error(self.post, body={'foo': ''},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: 0123456789a."
" '0123456789a' is too long")
self.check_validation_error(self.post, body={'foo': '0123456789a'},
expected_detail=detail)
class IntegerTestCase(APIValidationTestCase):
post_schema = {
'type': 'object',
'properties': {
'foo': {
'type': ['integer', 'string'],
'pattern': '^[0-9]+$',
},
},
}
def test_validate_integer(self):
self.assertEqual(self.post(body={'foo': 1}, req=FakeRequest()),
'Validation succeeded.')
self.assertEqual(self.post(body={'foo': '1'}, req=FakeRequest()),
'Validation succeeded.')
self.assertEqual(self.post(body={'foo': '0123456789'},
req=FakeRequest()),
'Validation succeeded.')
def test_validate_integer_fails(self):
detail = ("Invalid input for field/attribute foo. Value: abc."
" 'abc' does not match '^[0-9]+$'")
self.check_validation_error(self.post, body={'foo': 'abc'},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: True."
" True is not of type 'integer', 'string'")
self.check_validation_error(self.post, body={'foo': True},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: 0xffff."
" '0xffff' does not match '^[0-9]+$'")
self.check_validation_error(self.post, body={'foo': '0xffff'},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: 1.0."
" 1.0 is not of type 'integer', 'string'")
self.check_validation_error(self.post, body={'foo': 1.0},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: 1.0."
" '1.0' does not match '^[0-9]+$'")
self.check_validation_error(self.post, body={'foo': '1.0'},
expected_detail=detail)
class IntegerRangeTestCase(APIValidationTestCase):
post_schema = {
'type': 'object',
'properties': {
'foo': {
'type': ['integer', 'string'],
'pattern': '^[0-9]+$',
'minimum': 1,
'maximum': 10,
},
},
}
def test_validate_integer_range(self):
self.assertEqual(self.post(body={'foo': 1}, req=FakeRequest()),
'Validation succeeded.')
self.assertEqual(self.post(body={'foo': 10}, req=FakeRequest()),
'Validation succeeded.')
self.assertEqual(self.post(body={'foo': '1'}, req=FakeRequest()),
'Validation succeeded.')
def test_validate_integer_range_fails(self):
detail = ("Invalid input for field/attribute foo. Value: 0."
" 0(.0)? is less than the minimum of 1")
self.check_validation_error(self.post, body={'foo': 0},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: 11."
" 11(.0)? is greater than the maximum of 10")
self.check_validation_error(self.post, body={'foo': 11},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: 0."
" 0(.0)? is less than the minimum of 1")
self.check_validation_error(self.post, body={'foo': '0'},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: 11."
" 11(.0)? is greater than the maximum of 10")
self.check_validation_error(self.post, body={'foo': '11'},
expected_detail=detail)
class BooleanTestCase(APIValidationTestCase):
post_schema = {
'type': 'object',
'properties': {
'foo': parameter_types.boolean,
},
}
def test_validate_boolean(self):
self.assertEqual('Validation succeeded.',
self.post(body={'foo': True}, req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': False}, req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'True'}, req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'False'}, req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': '1'}, req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': '0'}, req=FakeRequest()))
def test_validate_boolean_fails(self):
enum_boolean = ("[True, 'True', 'TRUE', 'true', '1', 'ON', 'On',"
" 'on', 'YES', 'Yes', 'yes',"
" False, 'False', 'FALSE', 'false', '0', 'OFF', 'Off',"
" 'off', 'NO', 'No', 'no']")
detail = ("Invalid input for field/attribute foo. Value: bar."
" 'bar' is not one of %s") % enum_boolean
self.check_validation_error(self.post, body={'foo': 'bar'},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: 2."
" '2' is not one of %s") % enum_boolean
self.check_validation_error(self.post, body={'foo': '2'},
expected_detail=detail)
class HostnameTestCase(APIValidationTestCase):
post_schema = {
'type': 'object',
'properties': {
'foo': parameter_types.hostname,
},
}
def test_validate_hostname(self):
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'localhost'},
req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'localhost.localdomain.com'},
req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'my-host'}, req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'my_host'}, req=FakeRequest()))
def test_validate_hostname_fails(self):
detail = ("Invalid input for field/attribute foo. Value: True."
" True is not of type 'string'")
self.check_validation_error(self.post, body={'foo': True},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: 1."
" 1 is not of type 'string'")
self.check_validation_error(self.post, body={'foo': 1},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: my$host."
" 'my$host' does not match '^[a-zA-Z0-9-._]*$'")
self.check_validation_error(self.post, body={'foo': 'my$host'},
expected_detail=detail)
class HostnameIPaddressTestCase(APIValidationTestCase):
post_schema = {
'type': 'object',
'properties': {
'foo': parameter_types.hostname_or_ip_address,
},
}
def test_validate_hostname_or_ip_address(self):
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'localhost'},
req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'localhost.localdomain.com'},
req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'my-host'}, req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'my_host'}, req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': '192.168.10.100'},
req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': '2001:db8::9abc'},
req=FakeRequest()))
def test_validate_hostname_or_ip_address_fails(self):
detail = ("Invalid input for field/attribute foo. Value: True."
" True is not of type 'string'")
self.check_validation_error(self.post, body={'foo': True},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: 1."
" 1 is not of type 'string'")
self.check_validation_error(self.post, body={'foo': 1},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: my$host."
" 'my$host' does not match '^[a-zA-Z0-9-_.:]*$'")
self.check_validation_error(self.post, body={'foo': 'my$host'},
expected_detail=detail)
class CellNameTestCase(APIValidationTestCase):
post_schema = {
'type': 'object',
'properties': {
'foo': parameter_types.cell_name,
},
}
def test_validate_name(self):
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'abc'},
req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'my server'},
req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': u'\u0434'}, req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': u'\u0434\u2006\ufffd'},
req=FakeRequest()))
def test_validate_name_fails(self):
error = ("An invalid 'name' value was provided. The name must be: "
"printable characters except !, ., @. "
"Can not start or end with whitespace.")
should_fail = (' ',
' server',
'server ',
u'a\xa0', # trailing unicode space
u'\uffff', # non-printable unicode
'abc!def',
'abc.def',
'abc@def')
for item in should_fail:
self.check_validation_error(self.post, body={'foo': item},
expected_detail=error)
# four-byte unicode, if supported by this python build
try:
self.check_validation_error(self.post, body={'foo': u'\U00010000'},
expected_detail=error)
except ValueError:
pass
class CellNameLeadingTrailingSpacesTestCase(APIValidationTestCase):
post_schema = {
'type': 'object',
'properties': {
'foo': parameter_types.cell_name_leading_trailing_spaces,
},
}
def test_validate_name(self):
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'abc'},
req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'my server'},
req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': u'\u0434'}, req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': u'\u0434\u2006\ufffd'},
req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': ' my server'},
req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'my server '},
req=FakeRequest()))
def test_validate_name_fails(self):
error = ("An invalid 'name' value was provided. The name must be: "
"printable characters except !, ., @, "
"with at least one non space character")
should_fail = (
' ',
u'\uffff', # non-printable unicode
'abc!def',
'abc.def',
'abc@def')
for item in should_fail:
self.check_validation_error(self.post, body={'foo': item},
expected_detail=error)
# four-byte unicode, if supported by this python build
try:
self.check_validation_error(self.post, body={'foo': u'\U00010000'},
expected_detail=error)
except ValueError:
pass
class NameTestCase(APIValidationTestCase):
post_schema = {
'type': 'object',
'properties': {
'foo': parameter_types.name,
},
}
def test_validate_name(self):
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'm1.small'},
req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'my server'},
req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'a'}, req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': u'\u0434'}, req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': u'\u0434\u2006\ufffd'},
req=FakeRequest()))
def test_validate_name_fails(self):
error = ("An invalid 'name' value was provided. The name must be: "
"printable characters. "
"Can not start or end with whitespace.")
should_fail = (' ',
' server',
'server ',
u'a\xa0', # trailing unicode space
u'\uffff', # non-printable unicode
)
for item in should_fail:
self.check_validation_error(self.post, body={'foo': item},
expected_detail=error)
# four-byte unicode, if supported by this python build
try:
self.check_validation_error(self.post, body={'foo': u'\U00010000'},
expected_detail=error)
except ValueError:
pass
class NameWithLeadingTrailingSpacesTestCase(APIValidationTestCase):
post_schema = {
'type': 'object',
'properties': {
'foo': parameter_types.name_with_leading_trailing_spaces,
},
}
def test_validate_name(self):
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'm1.small'},
req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'my server'},
req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'a'}, req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': u'\u0434'}, req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': u'\u0434\u2006\ufffd'},
req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': ' abc '},
req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'abc abc abc'},
req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': ' abc abc abc '},
req=FakeRequest()))
# leading unicode space
self.assertEqual('Validation succeeded.',
self.post(body={'foo': '\xa0abc'},
req=FakeRequest()))
def test_validate_name_fails(self):
error = ("An invalid 'name' value was provided. The name must be: "
"printable characters with at least one non space character")
should_fail = (
' ',
u'\xa0', # unicode space
u'\uffff', # non-printable unicode
)
for item in should_fail:
self.check_validation_error(self.post, body={'foo': item},
expected_detail=error)
# four-byte unicode, if supported by this python build
try:
self.check_validation_error(self.post, body={'foo': u'\U00010000'},
expected_detail=error)
except ValueError:
pass
class NoneTypeTestCase(APIValidationTestCase):
post_schema = {
'type': 'object',
'properties': {
'foo': parameter_types.none
}
}
def test_validate_none(self):
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'None'},
req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': None},
req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': {}},
req=FakeRequest()))
def test_validate_none_fails(self):
detail = ("Invalid input for field/attribute foo. Value: ."
" '' is not one of ['None', None, {}]")
self.check_validation_error(self.post, body={'foo': ''},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: "
"{'key': 'val'}. {'key': 'val'} is not one of "
"['None', None, {}]")
self.check_validation_error(self.post, body={'foo': {'key': 'val'}},
expected_detail=detail)
class NameOrNoneTestCase(APIValidationTestCase):
post_schema = {
'type': 'object',
'properties': {
'foo': parameter_types.name_or_none
}
}
def test_valid(self):
self.assertEqual('Validation succeeded.',
self.post(body={'foo': None},
req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': '1'},
req=FakeRequest()))
def test_validate_fails(self):
detail = ("Invalid input for field/attribute foo. Value: 1234. 1234 "
"is not valid under any of the given schemas")
self.check_validation_error(self.post, body={'foo': 1234},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: . '' "
"is not valid under any of the given schemas")
self.check_validation_error(self.post, body={'foo': ''},
expected_detail=detail)
too_long_name = 256 * "k"
detail = ("Invalid input for field/attribute foo. Value: %s. "
"'%s' is not valid under any of the "
"given schemas") % (too_long_name, too_long_name)
self.check_validation_error(self.post,
body={'foo': too_long_name},
expected_detail=detail)
class TcpUdpPortTestCase(APIValidationTestCase):
post_schema = {
'type': 'object',
'properties': {
'foo': parameter_types.tcp_udp_port,
},
}
def test_validate_tcp_udp_port(self):
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 1024}, req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': '1024'}, req=FakeRequest()))
def test_validate_tcp_udp_port_fails(self):
detail = ("Invalid input for field/attribute foo. Value: True."
" True is not of type 'integer', 'string'")
self.check_validation_error(self.post, body={'foo': True},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: 65536."
" 65536(.0)? is greater than the maximum of 65535")
self.check_validation_error(self.post, body={'foo': 65536},
expected_detail=detail)
class CidrFormatTestCase(APIValidationTestCase):
post_schema = {
'type': 'object',
'properties': {
'foo': {
'type': 'string',
'format': 'cidr',
},
},
}
def test_validate_cidr(self):
self.assertEqual('Validation succeeded.',
self.post(
body={'foo': '192.168.10.0/24'},
req=FakeRequest()
))
def test_validate_cidr_fails(self):
detail = ("Invalid input for field/attribute foo."
" Value: bar."
" 'bar' is not a 'cidr'")
self.check_validation_error(self.post,
body={'foo': 'bar'},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo."
" Value: . '' is not a 'cidr'")
self.check_validation_error(self.post, body={'foo': ''},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo."
" Value: 192.168.1.0. '192.168.1.0' is not a 'cidr'")
self.check_validation_error(self.post, body={'foo': '192.168.1.0'},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo."
" Value: 192.168.1.0 /24."
" '192.168.1.0 /24' is not a 'cidr'")
self.check_validation_error(self.post, body={'foo': '192.168.1.0 /24'},
expected_detail=detail)
class DatetimeTestCase(APIValidationTestCase):
post_schema = {
'type': 'object',
'properties': {
'foo': {
'type': 'string',
'format': 'date-time',
},
},
}
def test_validate_datetime(self):
self.assertEqual('Validation succeeded.',
self.post(
body={'foo': '2014-01-14T01:00:00Z'},
req=FakeRequest()
))
def test_validate_datetime_fails(self):
detail = ("Invalid input for field/attribute foo."
" Value: 2014-13-14T01:00:00Z."
" '2014-13-14T01:00:00Z' is not a 'date-time'")
self.check_validation_error(self.post,
body={'foo': '2014-13-14T01:00:00Z'},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo."
" Value: bar. 'bar' is not a 'date-time'")
self.check_validation_error(self.post, body={'foo': 'bar'},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: 1."
" '1' is not a 'date-time'")
self.check_validation_error(self.post, body={'foo': '1'},
expected_detail=detail)
class UuidTestCase(APIValidationTestCase):
post_schema = {
'type': 'object',
'properties': {
'foo': {
'type': 'string',
'format': 'uuid',
},
},
}
def test_validate_uuid(self):
self.assertEqual('Validation succeeded.',
self.post(
body={'foo': '70a599e0-31e7-49b7-b260-868f441e862b'},
req=FakeRequest()
))
def test_validate_uuid_fails(self):
detail = ("Invalid input for field/attribute foo."
" Value: 70a599e031e749b7b260868f441e862."
" '70a599e031e749b7b260868f441e862' is not a 'uuid'")
self.check_validation_error(self.post,
body={'foo': '70a599e031e749b7b260868f441e862'},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: 1."
" '1' is not a 'uuid'")
self.check_validation_error(self.post, body={'foo': '1'},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: abc."
" 'abc' is not a 'uuid'")
self.check_validation_error(self.post, body={'foo': 'abc'},
expected_detail=detail)
class UriTestCase(APIValidationTestCase):
post_schema = {
'type': 'object',
'properties': {
'foo': {
'type': 'string',
'format': 'uri',
},
},
}
def test_validate_uri(self):
self.assertEqual('Validation succeeded.',
self.post(
body={'foo': 'http://localhost:8774/v2/servers'},
req=FakeRequest()
))
self.assertEqual('Validation succeeded.',
self.post(
body={'foo': 'http://[::1]:8774/v2/servers'},
req=FakeRequest()
))
def test_validate_uri_fails(self):
base_detail = ("Invalid input for field/attribute foo. Value: {0}. "
"'{0}' is not a 'uri'")
invalid_uri = 'http://localhost:8774/v2/servers##'
self.check_validation_error(self.post,
body={'foo': invalid_uri},
expected_detail=base_detail.format(
invalid_uri))
invalid_uri = 'http://[fdf8:01]:8774/v2/servers'
self.check_validation_error(self.post,
body={'foo': invalid_uri},
expected_detail=base_detail.format(
invalid_uri))
invalid_uri = '1'
self.check_validation_error(self.post,
body={'foo': invalid_uri},
expected_detail=base_detail.format(
invalid_uri))
invalid_uri = 'abc'
self.check_validation_error(self.post,
body={'foo': invalid_uri},
expected_detail=base_detail.format(
invalid_uri))
class Ipv4TestCase(APIValidationTestCase):
post_schema = {
'type': 'object',
'properties': {
'foo': {
'type': 'string',
'format': 'ipv4',
},
},
}
def test_validate_ipv4(self):
self.assertEqual('Validation succeeded.',
self.post(
body={'foo': '192.168.0.100'},
req=FakeRequest()
))
def test_validate_ipv4_fails(self):
detail = ("Invalid input for field/attribute foo. Value: abc."
" 'abc' is not a 'ipv4'")
self.check_validation_error(self.post, body={'foo': 'abc'},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: localhost."
" 'localhost' is not a 'ipv4'")
self.check_validation_error(self.post, body={'foo': 'localhost'},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo."
" Value: 2001:db8::1234:0:0:9abc."
" '2001:db8::1234:0:0:9abc' is not a 'ipv4'")
self.check_validation_error(self.post,
body={'foo': '2001:db8::1234:0:0:9abc'},
expected_detail=detail)
class Ipv6TestCase(APIValidationTestCase):
post_schema = {
'type': 'object',
'properties': {
'foo': {
'type': 'string',
'format': 'ipv6',
},
},
}
def test_validate_ipv6(self):
self.assertEqual('Validation succeeded.',
self.post(
body={'foo': '2001:db8::1234:0:0:9abc'},
req=FakeRequest()
))
def test_validate_ipv6_fails(self):
detail = ("Invalid input for field/attribute foo. Value: abc."
" 'abc' is not a 'ipv6'")
self.check_validation_error(self.post, body={'foo': 'abc'},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: localhost."
" 'localhost' is not a 'ipv6'")
self.check_validation_error(self.post, body={'foo': 'localhost'},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo."
" Value: 192.168.0.100. '192.168.0.100' is not a 'ipv6'")
self.check_validation_error(self.post, body={'foo': '192.168.0.100'},
expected_detail=detail)
class Base64TestCase(APIValidationTestCase):
post_schema = {
'type': 'object',
'properties': {
'foo': {
'type': 'string',
'format': 'base64',
},
},
}
def test_validate_base64(self):
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'aGVsbG8gd29ybGQ='},
req=FakeRequest()))
# 'aGVsbG8gd29ybGQ=' is the base64 code of 'hello world'
def test_validate_base64_fails(self):
value = 'A random string'
detail = ("Invalid input for field/attribute foo. "
"Value: %s. '%s' is not a 'base64'") % (value, value)
self.check_validation_error(self.post, body={'foo': value},
expected_detail=detail)
class RegexFormatTestCase(APIValidationTestCase):
def setUp(self):
super(RegexFormatTestCase, self).setUp()
self.controller = RegexFormatFakeController()
def test_validate_regex(self):
req = fakes.HTTPRequest.blank("")
self.assertEqual('Validation succeeded.',
self.controller.post(req, body={'foo': u'Myserver'}))
def test_validate_regex_fails(self):
value = 1
req = fakes.HTTPRequest.blank("")
detail = ("Invalid input for field/attribute foo. "
"Value: %s. %s is not a 'regex'") % (value, value)
self.check_validation_error(self.controller.post, req=req,
body={'foo': value},
expected_detail=detail)
| |
"""
PySQL
An abstraction layer for databases, accessible via a MySQL connection.
"""
__author__ = "Jon Botelho"
import SocketServer
import struct
import os
from types import StringType
import pymongo
import re
class MySQLPacket (object):
def __init__ (self, data=None, length=0, number=0):
""" For creating outgoing packets. """
# Auto-fill the length if data is provided
if data:
self.data = data
self.length = len(data)
else:
self.length = length
self.number = number
@classmethod
def fromSocket (cls, sock, get_data=False):
""" For reading incoming packets. """
# Make a new instance of this class
packet = cls()
# Get the packet length (3 bytes), and the packet number (1 byte)
packet.length, length_byte3, packet.packet_number \
= struct.unpack("< HB B", sock.recv(4))
packet.length += length_byte3 << 16
if get_data:
# Read the packet's data, now that we have the length
packet.data = sock.recv(packet.length)
# We're done; return the complete packet
return packet
def __str__ (self):
""" Encode the packet for sending. """
self.length = len(self.data)
return struct.pack(
"< H B B",
self.length & 0xFFFF,
self.length >> 16,
self.number
) + self.data
class GreetingPacket (MySQLPacket):
"""
Sent to the client as soon as they connect.
Also known as the Handshake Initialization Packet.
"""
def __init__ (self, protocol_version=10, server_version="5.1.53 - log",
thread_id=11578506, salt=None, server_capabilities=0xF7FF,
charset=8, server_status=0x0002):
"""
Make a new greeting packet.
The defaults are:
protocol_version/server_version: MySQL 5.1
salt: Auto-generated, 20 bits
sever_capabilities: Everything supported except SSL
charset: latin1 COLLATE latin1_swedish_ci (8)
"""
# Generate a random salt if a valid one was not given
try:
salt = str(salt)
assert(len(salt) == 20)
except:
salt = os.urandom(20)
assert(len(salt) == 20)
self.salt = salt
# Fill in all the other values
self.number = 0
self.protocol_version = protocol_version
self.server_version = server_version
self.thread_id = thread_id
self.server_capabilities = server_capabilities
self.charset = charset
self.server_status = server_status
def __str__ (self):
""" Encode the packet for sending. """
# Encode the packet's payload
self.data = struct.pack(
"< B %ssB I 8sB H B H 13s 8sB" % len(self.server_version),
self.protocol_version,
self.server_version, 0,
self.thread_id,
self.salt[:8], 0,
self.server_capabilities,
self.charset,
self.server_status,
"\0" * 13,
self.salt[8:20], 0
)
# Return the encoded packet
return super(GreetingPacket, self).__str__()
class LoginRequestPacket (MySQLPacket):
"""
Sent by the client for authentication.
Tends to be the first packet sent by the client,
right after the Greeting Packet.
"""
@classmethod
def fromSocket (cls, sock):
""" For reading incoming packets. """
# Get some basic information from our parent class,
# and use the resulting object here-on
packet = super(LoginRequestPacket, cls).fromSocket(sock)
# Decode the first part of the packet
packet.client_capabilities, packet.extended_client_capabilities,\
packet.max_packet_size, packet.charset \
= struct.unpack("< H H I B", sock.recv(9))
# The second part is tricky because it contains null-terminated strings
rest = sock.recv(packet.length - 9)
# Get the null-terminated username string
packet.username, rest = rest.split("\0", 1)[0]
# The password is an SHA-1 hash (20-bytes/160-bits)
packet.password = rest[:20]
# The last thing is the schema name (null-terminated)
packet.schema = rest[20:-1]
# We're done; return the filled-in packet object
return packet
class OKPacket (MySQLPacket):
"""
Sent as a response to packets sent from the client.
"""
def __init__ (self, number=1, field_count=0, affected_rows=0,
insert_id=0, server_status=0, warnings=0):
# TODO: Change numeric fields to use length coded binary
self.number = number
self.field_count = field_count
self.affected_rows = affected_rows
self.insert_id = insert_id
self.server_status = server_status
self.warnings = warnings
def __str__ (self):
""" Encode the packet for sending. """
# Encode the packet's payload
self.data = struct.pack(
"< B B B H H",
self.field_count,
self.affected_rows,
self.insert_id,
self.server_status,
self.warnings
)
# Return the encoded packet
return super(OKPacket, self).__str__()
class CommandPacket(MySQLPacket):
"""
A packet containing a command from the client.
"""
commands = {
1: "Quit",
3: "Query"
}
@classmethod
def fromSocket (cls, sock):
""" For reading incoming packets. """
# Get some basic information from our parent class,
# and use the resulting object here-on
packet = super(CommandPacket, cls).fromSocket(sock)
# Decode the command type
packet.command = struct.unpack("< B", sock.recv(1))[0]
# Fill in a command description (for convenience)
if packet.command in cls.commands:
packet.description = cls.commands[packet.command]
else:
packet.description = "Unknown"
# Decode the actual command/statement
# (Usually SQL)
packet.statement = sock.recv(packet.length - 1)
# We're done; return the completed command object
return packet
class ResultSetHeaderPacket(MySQLPacket):
"""
Describes a result set.
"""
def __init__ (self, number=1, field_count=0):
# TODO: Change field count to use coded binary
self.number = number
self.field_count = field_count
def __str__ (self):
""" Encode the packet for sending. """
# Encode the packet's payload
self.data = struct.pack(
"B",
self.field_count
)
# Return the encoded packet
return super(ResultSetHeaderPacket, self).__str__()
class FieldPacket(MySQLPacket):
"""
Describes a field/column in a result set.
"""
def __init__ (self, number=1, catalog="def", database="",
table="", original_table="",
name="", original_name="",
charset=8, length=255, type=254, # Type 254 is String/VARCHAR
flags=0, decimals=0, default=0):
# TODO: Change numeric values to use length coded binary
# TODO: Change the text values to be length coded strings
self.number = number
self.catalog = catalog
self.database = database
self.table = table
self.original_table = original_table or self.table
self.name = name
self.original_name = original_name or self.name
self.charset = charset
self.length = length
self.type = type
self.flags = flags
self.decimals = decimals
self.default = default
def __str__ (self):
""" Encode the packet for sending. """
# Encode the packet's payload
# First do the strings
# TODO: Do real length coding on these
string_fields = ("catalog", "database", "table", "original_table",
"name", "original_name")
self.data = ""
for name in string_fields:
s = getattr(self, name)
self.data += struct.pack("B", len(s))
# Make sure there's no unicode
self.data += str(s)
# Then do the numeric packet fields
self.data += struct.pack(
"< B H I B H B H B B",
0,
self.charset,
self.length,
self.type,
self.flags,
self.decimals,
0,
self.default,
0
)
# Return the encoded packet
return super(FieldPacket, self).__str__()
class EOFPacket(MySQLPacket):
"""
Signals the end of a series of field packets.
"""
def __init__ (self, number=1, warnings=0, server_status=0):
self.number = number
self.warnings = warnings
self.server_status = server_status
def __str__ (self):
""" Encode the packet for sending. """
# Encode the packet's payload
self.data = struct.pack(
"< B H H",
0xFE,
self.warnings,
self.server_status
)
# Return the encoded packet
return super(EOFPacket, self).__str__()
class RowDataPacket(MySQLPacket):
"""
Signals the end of a series of field packets.
"""
def __init__ (self, number=1, values=[]):
self.number = number
self.values = values
def __str__ (self):
""" Encode the packet for sending. """
# Encode the packet's payload
# TODO: Do length encoded strings for real
# TODO: Figure out how to encode numbers, NULLs, etc.
self.data = ""
for value in self.values:
self.data += struct.pack("B", len(value))
self.data += value
# Return the encoded packet
return super(RowDataPacket, self).__str__()
class ResultSet(object):
""" Used to send a set of results back to the client. """
def __init__ (self, columns=[], rows=[], table="", database=""):
self.columns = columns
self.rows = rows
self.table = table
self.database = database
def toPackets (self):
"""
Gets a list of MySQL packets used to send this
result set to the client.
"""
# First make a header packet
i = 1
field_count = len(self.columns)
packets = [ResultSetHeaderPacket(i, field_count)]
# Then the field packets for each column
for col in self.columns:
i += 1
packets += [
FieldPacket(
i,
database=self.database,
table=self.table,
name=col
)
]
# Then an EOF
i += 1
packets += [EOFPacket(i)]
# Then the row data
for row in self.rows:
i += 1
packets += [RowDataPacket(i, row)]
# Then another EOF to finish it off
i += 1
packets += [EOFPacket(i)]
return packets
def __str__ (self):
return "".join([str(p) for p in self.toPackets()])
class Query(object):
pass
SQL_IDENTIFIER_REGEX = "(?:`[^`]+`|[$\w]+)"
# TODO: Add support for quote escapes
# TODO: Add support for floats and hex numbers
SQL_VALUE_REGEX = "(?:[0-9]+|'[^']+'|\"[^\"]+\")"
SQL_CONDITION_REGEX = ""
SQL_REGEX_DICT = {
"ident": SQL_IDENTIFIER_REGEX,
"value": SQL_VALUE_REGEX
}
from sql_constants import *
SQL_OPERATORS_REGEX = "|".join(SQL_ESCAPED_OPERATORS)
class SQLStatement(object):
def __init__ (self, statement):
self.statement = statement
def scan (self):
"""
Scans through the SQL statement, and splits it up
into tokens like "FROM", "=", "(", etc.
"""
def parse_op (scanner, token): return "operator", token
def parse_ident (scanner, token): return "identifier", token
def parse_value (scanner, token): return "value", token
def parse_kw (scanner, token): return "keyword", token
scanner = re.Scanner([
(SQL_OPERATORS_REGEX, parse_op),
(SQL_KEYWORDS_REGEX, parse_kw),
(SQL_VALUE_REGEX, parse_value),
(SQL_IDENTIFIER_REGEX, parse_ident),
("\s*", None)
])
return scanner.scan(self.statement)
class SelectQuery(Query):
""" Respresents an SQL SELECT query. """
statement_regex = re.compile((
"^\s*SELECT\s+" +
"\*\s+" +
"(FROM)\s+" +
"(?P<table>{ident})" +
"(?:\s+(WHERE)" +
"\s+({ident})\s*(=)\s*({value})" +
")?\s*;?\s*"
).format(**SQL_REGEX_DICT), re.I | re.S | re.M)
def __init__ (self, statement):
# Make sure we have a SELECT query
if not statement.lower().startswith("select"):
raise ValueError("The given statement a SELECT query.")
self.statement = statement
def execute (self):
# Figure out what the query actually means
tokens = SQLStatement(self.statement).scan()
if tokens[1]:
print "Could not parse statement. Error around: %s" % tokens[1]
return
tokens = tokens[0]
print "Tokens: %s" % (tokens,)
keyword = ""
arguments = []
parsed = []
i = 0
for token_type, token_value in tokens:
# TODO: Rework this to loop through without the i += 1 nonsense
# Should scan through, keep adding on to keyword args array
# until another keyword is encountered. Then, parse the args.
# Check if we have a keyword
if token_value.upper() in SQL_KEYWORDS or token_value == ";":
# The there was a previous keyword,
# We need to parse its arguments now before moving on.
if keyword:
if hasattr(sql_parsers, keyword):
result = getattr(sql_parsers, keyword)(arguments)
parsed.append((keyword, result))
else:
raise Exception("No parser found for %s" % keyword)
else:
arguments += (token_type, token_value)
keyword = token_value.upper()
# TODO: Get a table back out here
# TODO: Get the conditions
return table, conditions
class MySQLServerSession(object):
def __init__ (self, sock, client):
self.socket = sock
self.socket.send(str(GreetingPacket()))
# Auth isn't working for now
#packet = LoginRequestPacket.fromSocket(self.socket)
packet = MySQLPacket.fromSocket(self.socket, get_data=True)
self.socket.send(str(OKPacket(number=2)))
print "Connected to %s" % (client[0])
mongo = pymongo.Connection()
mongo_coll = mongo["pysql_test"]
print "Brought up MongoDB Connection"
# Now just sit here relieving commands all day
while True:
command = CommandPacket.fromSocket(self.socket)
print "%s: %s" % (command.description, command.statement)
if command.statement.lower().find("select") != -1:
"""cols = ["Name", "City"]
rows = [["Jon", "NYC"], ["GMP", "Worc"]]
rs = ResultSet(cols, rows, "test_table", "test")
self.socket.send(str(rs))"""
query_info = SelectQuery(command.statement).execute()
if not query_info:
print "Unsupported SELECT query."
self.socket.send(str(OKPacket()))
return
table, cond = query_info
if len(cond):
cond = cond[0]
print "Running db.%s.find(%s)" % (table, cond)
results = mongo_coll[table].find(cond)
else:
print "Running db.%s.find()" % table
results = mongo_coll[table].find()
# If we got any results, send them back
if results.count():
# Convert everything to MySQL format
# Start by getting a list of all the columns
cols = results[0].keys()
# Delete the _id key to avoid issues for now
cols = [x for x in cols if x != "_id"]
# Put togeher all the dictionaries into flat rows
rows = []
for res in results:
data = []
for c in cols:
data.append(res[c])
rows.append(data)
print "Rows: %s" % rows
print "Cols: %s" % cols
# Turn it into actual packets and sent it out
rs = ResultSet(cols, rows, table, "pysql_test")
self.socket.send(str(rs))
else:
self.socket.send(str(OKPacket()))
else:
self.socket.send(str(OKPacket()))
class MyTCPHandler(SocketServer.BaseRequestHandler):
"""
The RequestHandler class for our server.
It is instantiated once per connection to the server, and must
override the handle() method to implement communication to the
client.
"""
def handle(self):
MySQLServerSession(self.request, self.client_address)
print "Done."
if __name__ == "__main__":
HOST, PORT = "0.0.0.0", 3306
# Create the server, binding to localhost on port 9999
server = SocketServer.TCPServer((HOST, PORT), MyTCPHandler)
# Activate the server; this will keep running until you
# interrupt the program with Ctrl-C
server.serve_forever()
| |
from concurrent.futures import Future
import pytest
from ray.util.client import _ClientContext
from ray.util.client.common import ClientActorRef, ClientObjectRef
from ray.util.client.ray_client_helpers import ray_start_client_server
from ray.util.client.ray_client_helpers import (
ray_start_client_server_pair,
ray_start_cluster_client_server_pair,
)
from ray._private.test_utils import wait_for_condition, object_memory_usage
import ray as real_ray
from ray.core.generated.gcs_pb2 import ActorTableData
from ray._raylet import ActorID, ObjectRef
def test_client_object_ref_basics(ray_start_regular):
with ray_start_client_server_pair() as pair:
ray, server = pair
ref = ray.put("Hello World")
# Make sure ClientObjectRef is a subclass of ObjectRef
assert isinstance(ref, ClientObjectRef)
assert isinstance(ref, ObjectRef)
# Invalid ref format.
with pytest.raises(Exception):
ClientObjectRef(b"\0")
obj_id = b"\0" * 28
fut = Future()
fut.set_result(obj_id)
server_ref = ObjectRef(obj_id)
for client_ref in [ClientObjectRef(obj_id), ClientObjectRef(fut)]:
client_members = set(client_ref.__dir__())
server_members = set(server_ref.__dir__())
client_members = {m for m in client_ref.__dir__() if not m.startswith("_")}
server_members = {m for m in server_ref.__dir__() if not m.startswith("_")}
assert client_members.difference(server_members) == {"id"}
assert server_members.difference(client_members) == set()
# Test __eq__()
assert client_ref == ClientObjectRef(obj_id)
assert client_ref != ref
assert client_ref != server_ref
# Test other methods
assert client_ref.__repr__() == f"ClientObjectRef({obj_id.hex()})"
assert client_ref.binary() == obj_id
assert client_ref.hex() == obj_id.hex()
assert not client_ref.is_nil()
assert client_ref.task_id() == server_ref.task_id()
assert client_ref.job_id() == server_ref.job_id()
def test_client_actor_ref_basics(ray_start_regular):
with ray_start_client_server_pair() as pair:
ray, server = pair
@ray.remote
class Counter:
def __init__(self):
self.acc = 0
def inc(self):
self.acc += 1
def get(self):
return self.acc
counter = Counter.remote()
ref = counter.actor_ref
# Make sure ClientActorRef is a subclass of ActorID
assert isinstance(ref, ClientActorRef)
assert isinstance(ref, ActorID)
# Invalid ref format.
with pytest.raises(Exception):
ClientActorRef(b"\0")
actor_id = b"\0" * 16
fut = Future()
fut.set_result(actor_id)
server_ref = ActorID(actor_id)
for client_ref in [ClientActorRef(actor_id), ClientActorRef(fut)]:
client_members = {m for m in client_ref.__dir__() if not m.startswith("_")}
server_members = {m for m in server_ref.__dir__() if not m.startswith("_")}
assert client_members.difference(server_members) == {"id"}
assert server_members.difference(client_members) == set()
# Test __eq__()
assert client_ref == ClientActorRef(actor_id)
assert client_ref != ref
assert client_ref != server_ref
# Test other methods
assert client_ref.__repr__() == f"ClientActorRef({actor_id.hex()})"
assert client_ref.binary() == actor_id
assert client_ref.hex() == actor_id.hex()
assert not client_ref.is_nil()
def server_object_ref_count(server, n):
assert server is not None
def test_cond():
if len(server.task_servicer.object_refs) == 0:
# No open clients
return n == 0
client_id = list(server.task_servicer.object_refs.keys())[0]
return len(server.task_servicer.object_refs[client_id]) == n
return test_cond
def server_actor_ref_count(server, n):
assert server is not None
def test_cond():
if len(server.task_servicer.actor_refs) == 0:
# No running actors
return n == 0
return len(server.task_servicer.actor_refs) == n
return test_cond
@pytest.mark.parametrize(
"ray_start_cluster",
[
{
"num_nodes": 1,
"do_init": False,
}
],
indirect=True,
)
def test_delete_refs_on_disconnect(ray_start_cluster):
cluster = ray_start_cluster
with ray_start_cluster_client_server_pair(cluster.address) as pair:
ray, server = pair
@ray.remote
def f(x):
return x + 2
thing1 = f.remote(6) # noqa
thing2 = ray.put("Hello World") # noqa
# One put, one function -- the function result thing1 is
# in a different category, according to the raylet.
# But we're maintaining the reference
assert server_object_ref_count(server, 3)()
# And can get the data
assert ray.get(thing1) == 8
# Close the client.
ray.close()
wait_for_condition(server_object_ref_count(server, 0), timeout=5)
# Connect to the real ray again, since we disconnected
# upon num_clients = 0.
real_ray.init(address=cluster.address, namespace="default_test_namespace")
def test_cond():
return object_memory_usage() == 0
wait_for_condition(test_cond, timeout=5)
def test_delete_ref_on_object_deletion(ray_start_regular):
with ray_start_client_server_pair() as pair:
ray, server = pair
vals = {
"ref": ray.put("Hello World"),
"ref2": ray.put("This value stays"),
}
del vals["ref"]
wait_for_condition(server_object_ref_count(server, 1), timeout=5)
@pytest.mark.parametrize(
"ray_start_cluster", [{"num_nodes": 1, "do_init": False}], indirect=True
)
def test_delete_actor_on_disconnect(ray_start_cluster):
cluster = ray_start_cluster
with ray_start_cluster_client_server_pair(cluster.address) as pair:
ray, server = pair
@ray.remote
class Accumulator:
def __init__(self):
self.acc = 0
def inc(self):
self.acc += 1
def get(self):
return self.acc
actor = Accumulator.remote()
actor.inc.remote()
assert server_actor_ref_count(server, 1)()
assert ray.get(actor.get.remote()) == 1
ray.close()
wait_for_condition(server_actor_ref_count(server, 0), timeout=5)
def test_cond():
alive_actors = [
v
for v in real_ray.state.actors().values()
if v["State"] != ActorTableData.DEAD
]
return len(alive_actors) == 0
# Connect to the real ray again, since we disconnected
# upon num_clients = 0.
real_ray.init(address=cluster.address, namespace="default_test_namespace")
wait_for_condition(test_cond, timeout=10)
def test_delete_actor(ray_start_regular):
with ray_start_client_server_pair() as pair:
ray, server = pair
@ray.remote
class Accumulator:
def __init__(self):
self.acc = 0
def inc(self):
self.acc += 1
actor = Accumulator.remote()
actor.inc.remote()
actor2 = Accumulator.remote()
actor2.inc.remote()
assert server_actor_ref_count(server, 2)()
del actor
wait_for_condition(server_actor_ref_count(server, 1), timeout=5)
def test_simple_multiple_references(ray_start_regular):
with ray_start_client_server() as ray:
@ray.remote
class A:
def __init__(self):
self.x = ray.put("hi")
def get(self):
return [self.x]
a = A.remote()
ref1 = ray.get(a.get.remote())[0]
ref2 = ray.get(a.get.remote())[0]
del a
assert ray.get(ref1) == "hi"
del ref1
assert ray.get(ref2) == "hi"
del ref2
def test_named_actor_refcount(ray_start_regular):
with ray_start_client_server_pair() as (ray, server):
@ray.remote
class ActorTest:
def __init__(self):
self._counter = 0
def bump(self):
self._counter += 1
def check(self):
return self._counter
ActorTest.options(name="actor", lifetime="detached").remote()
def connect_api():
api = _ClientContext()
api.connect("localhost:50051", namespace="default_test_namespace")
api.get_actor("actor")
return api
def check_owners(size):
return size == sum(
len(x) for x in server.task_servicer.actor_owners.values()
)
apis = [connect_api() for i in range(3)]
assert check_owners(3)
assert len(server.task_servicer.actor_refs) == 1
assert len(server.task_servicer.named_actors) == 1
apis.pop(0).disconnect()
assert check_owners(2)
assert len(server.task_servicer.actor_refs) == 1
assert len(server.task_servicer.named_actors) == 1
apis.pop(0).disconnect()
assert check_owners(1)
assert len(server.task_servicer.actor_refs) == 1
assert len(server.task_servicer.named_actors) == 1
apis.pop(0).disconnect()
# no more owners should be seen
assert check_owners(0)
# actor refs shouldn't be removed
assert len(server.task_servicer.actor_refs) == 1
assert len(server.task_servicer.named_actors) == 1
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", __file__] + sys.argv[1:]))
| |
import sys
import os
from nlpReaders.annotated_text import AnnotatedText as A
from nlpReaders.parc_reader import AnnotatedText as B
from parc_reader.parc_reader.reader import ParcCorenlpReader as P
from nltk.tree import *
import csv
#fixes all the omnipresent unicode issues
print sys.getdefaultencoding()
reload(sys)
sys.setdefaultencoding('utf-8')
#change this if you would only like to do a certain number of files, useful for testing
maxNumFiles = 1000
#base dir for all data files
data_dir = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '../..', 'data/'))
#store the WordNet generated lists of hyponyms
with open(data_dir + '/peopleHyponyms.csv', 'rb') as f:
reader = csv.reader(f)
peopleHyponyms = list(reader)
with open(data_dir + '/orgHyponyms.csv', 'rb') as f:
reader = csv.reader(f)
orgHyponyms = list(reader)
with open(data_dir + '/nounCues.csv', 'rb') as f:
reader = csv.reader(f)
nounCues = list(reader)
peopleHyponyms = peopleHyponyms[0]
orgHyponyms = orgHyponyms[0]
nounCues = nounCues[0]
#gets datapath, creates a list of files and any nested files (only goes down by one subdirectory)
def openDirectory(datapath):
listOfFiles = []
for item in os.listdir(datapath):
if os.path.isdir(os.path.join(datapath, item)):
item = os.path.join(datapath, item)
for newItem in os.listdir(item):
newItem = os.path.join(item, newItem)
listOfFiles.append(newItem)
elif os.path.isfile(os.path.join(datapath, item)):
item = os.path.join(datapath, item)
listOfFiles.append(item)
return listOfFiles
#open verb file, extract what we need and create a verbs list
def openVerbCues(verbCuesFile):
verbsList = []
with open(os.path.join(data_dir, verbCuesFile), 'rb') as f:
reader = csv.reader(f)
verbsList = list(reader)
newVerbsList = []
for indx, verb in enumerate(verbsList):
metadata = verb[1].split(';')
sentID = metadata[0]
tokID = metadata[1]
fileName = metadata[2]
newVerbsList.append([verb[0], sentID, tokID, fileName, verb[2]])
return newVerbsList
#get the files, open them, extract verbs and features and create a large array of rows
def findFiles(listOfNLPFiles, listOfAnnotatedFiles, listOfRawFiles, output, verbCuesFile):
flagNoLabels = False
myRows = []
j = 0
verbList = openVerbCues(verbCuesFile)
#open each NLP File
for myFile in listOfNLPFiles:
#in case you want a minimum file number
if j < -1:
j = j + 1
continue
else:
files = len(listOfNLPFiles)
filename = myFile.split('/')[-1]
fileNoXML = filename.split('.xml')[0]
print filename
myAnnotatedFile = None
#extract the PARC filename that match the title of the NLP filename
myAnnotatedFile = [s for s in listOfAnnotatedFiles if filename in s]
myRawFile = [s for s in listOfRawFiles if fileNoXML in s][0]
print myAnnotatedFile
if len(myAnnotatedFile) == 1:
myAnnotatedFile = myAnnotatedFile[0]
flagNoLabels = True
else:
#didn't find a file
print 'error opening Annotated File'
continue
print('opening file: ' + myFile + ' ' + str(j) + ' out of ' + str(files))
#extract the verbs whose metadata filename matches this NLP file
specificFileVerbs = []
for verb in verbList:
if (verb[3] == filename):
specificFileVerbs.append(verb)
#open the file, extract the features and return all the rows
fileRows = openFile(myFile, myAnnotatedFile, myRawFile, specificFileVerbs)
myRows += fileRows
numTokens = len(myRows)
print 'number of total tokens: ' + str(numTokens)
j = j + 1
if j == maxNumFiles:
break
open(os.path.join(data_dir, output), 'w').close()
writeToTXT(myRows, os.path.join(data_dir, output), flagNoLabels)
def openFile(coreNLPFileName, annotatedFileName, raw_file, verbList):
rows = []
#open annotated if it exists
if annotatedFileName != None:
try:
parc_xml = open(annotatedFileName).read()
corenlp_xml = open(coreNLPFileName).read()
raw_text = open(raw_file).read()
annotated_text = A(corenlp_xml)
article = P(corenlp_xml, parc_xml, raw_text)
filename = coreNLPFileName.split('/')[-1]
rows = findFeatures(filename, article, annotated_text, verbList, annotatedFileName)
rows += rows
return rows
except:
print 'error opening file'
return rows
else:
print 'here'
parc_xml = None
corenlp_xml = open(coreNLPFileName).read()
raw_text = open(raw_file).read()
annotated_text = A(corenlp_xml)
article = P(corenlp_xml, parc_xml, raw_text)
filename = coreNLPFileName.split('/')[-1]
rows = findFeatures(filename, article, annotated_text, verbList, annotatedFileName)
print rows
rows += rows
return rows
def writeToTXT(rows, filename, flagNoLabels):
#if the data is unlabelled, we create a second metadata file that stores the word
#as well as the filename and sentence ID
#this file will be used to reconstitute the spans once CRFsuite gets through them
if flagNoLabels == True:
newRows = []
metadataRows = []
token = ''
for row in rows:
row = row.split('\t')
metadata = row[-1]
for column in row:
if 'word[0]=' in column and 'word[-1]|word[0]=' not in column:
token = column
metadataRows.append(metadata + '\t' + token)
del row[-1]
row = '\t'.join(row)
newRows.append(row)
rows = newRows
#make a new filename with METADATA in it
fileRow = filename.split('/')
thisfile = fileRow[-1]
del fileRow[-1]
thisfile = 'METADATA' + thisfile
fileRow.append(thisfile)
metafile = '/'.join(fileRow)
with open(metafile, 'w') as myfile:
for row in metadataRows:
myfile.write(row + '\n')
myfile.close()
#write all the tokens and their features to a txt
with open(filename, 'w') as myfile:
for row in rows:
myfile.write(row + '\n')
myfile.close()
print '\nData written to ' + filename + '\n'
#gather all the features and create rows
def findFeatures(filename, article, corenlp_xml, verbsList, annotatedFileName):
print filename
rows = []
#find the constituency parse of the sentences
listOfParse = []
sent_tags = corenlp_xml.soup.find('sentences').find_all('sentence')
for s in sent_tags:
listOfParse.append(s.find('parse').text)
i = 0
print('extracting features ......')
openQuotes = 'false'
lastLabel = 'O'
#begin extracting features
for sentence in article.sentences:
lengthOfSentence = len(sentence['tokens'])
idOfSentence = sentence['id']
currentSentPers = 'false'
beginningQuote = 'false'
parseTree = listOfParse[i]
i = i + 1
#these are the features that will stay the same across each token in the sentence
#e.g. containsOrganization, containsNamedEntity etc.
#returns the array for this sentence of tokens, pos, lemma which will be used to
#constitute the token based features
rowSentFeats = ''
tokenArray, posArray, lemmaArray, peopleHyponym, orgHyponym, rowSentFeats = \
getSentenceFeatures(sentence, rowSentFeats)
#get verb cue, sentence wide features, e.g. containsVerbCue, verbCueNearTheEnd?
rowSentFeats = getVerbListSentenceFeatures(sentence, verbsList, rowSentFeats)
#rowSentFeats now contains a string that looks like
#'containsOrg='True'\tcontainsVerbCue='True'\t.....'
prevSyntactic = ''
for token in sentence['tokens']:
row = rowSentFeats
word = str(token['word'])
pos = str(token['pos'])
lemma = str(token['lemma'])
idOfToken = token['id']
#gather parc token or continue from this token if too many problems
#pretty hacky solution but doing my best to deal with the cases where the
#tokens are misaligned
#remember that this moves one token at a time, in some cases we'll have to skip
#one token, but the next one will be fine
skipIncrement = False
if annotatedFileName != None:
#assign labels
label = token['attribution']
role = token['role']
if label == None:
row = 'O\t' + row
lastLabel = 'O'
elif role == 'content' and lastLabel == 'O':
row = 'B\t' + row
lastLabel = 'B'
elif role == 'content' and lastLabel == 'B':
row = 'I\t' + row
lastLabel = 'I'
elif role == 'content':
row = 'I\t' + row
lastLabel = 'I'
else:
row = 'O\t' + row
lastLabel = 'O'
else:
row = '\t' + row
if "''" in str(token['word']):
openQuotes = 'false'
#append the features
row += 'id=' + str(idOfSentence) + '\t'
row += 'sentenceLength=' + str(lengthOfSentence) + '\t'
row = getTokenFeatures(idOfToken, tokenArray, row, 'word')
row = getTokenFeatures(idOfToken, posArray, row, 'pos')
row = getTokenFeatures(idOfToken, lemmaArray, row, 'lemma')
row = getTokenPairs(idOfToken, tokenArray, row, 'word')
row = getTokenPairs(idOfToken, posArray, row, 'pos')
row = getTokenPairs(idOfToken, lemmaArray, row, 'lemma')
row = findRelations(token, row)
for (idHypo, hyponym) in peopleHyponym:
row += 'personHyponym[' + str(idHypo - idOfToken) + ']=' + hyponym + '\t'
for (idHypo, hyponym) in orgHyponym:
row += 'organizationHyponym[' + str(idHypo - idOfToken) + ']=' + hyponym + '\t'
#the inside quote labels
if openQuotes == 'true' and beginningQuote == 'true':
row += "insidequotes='true'\t"
if "``" in str(token['word']):
openQuotes = 'true'
beginningQuote = 'true'
row = getConstituentLabels(parseTree, token, sentence, row)
prevSyntactic, row = getSyntactic(parseTree, token, sentence, row, verbsList, prevSyntactic)
row += 'filename=' + filename + '=sentenceID=' + str(idOfSentence)
rows.append(row)
return rows
#get verbCue sentence wide features
def getVerbListSentenceFeatures(sentence, verbs, rowSentFeats):
containsVerbCue = False
verbCueNearEnd = False
sentenceID = sentence['id']
sentenceLen = len(sentence['tokens'])
nearEnd = sentenceLen - min(4, sentenceLen/2)
for verb in verbs:
if str(verb[1]) == str(sentenceID):
sentVerb = sentence['tokens'][int(verb[2])]['word']
if sentVerb == verb[0]:
if verb[4] == 'Y':
containsVerbCue = True
rowSentFeats += '''containsVerbCue='true'\t'''
if int(verb[2]) >= nearEnd:
rowSentFeats += '''verbCueNearEnd='true'\t'''
return rowSentFeats
#identifies which relations the token has with its parents and children
#appends seperate features for the relation as well as the relation with the word itself
def findRelations(token, row):
listRelations = ['acl', 'acl:relcl', 'advcl', 'advmod', 'amod', 'appos', 'aux', 'auxpass', 'case', 'cc', 'cc:preconj',
'ccomp', 'compound', 'compound:prt', 'conj', 'cop', 'csubj', 'csubjpass', 'dep', 'det', 'det:predet',
'discourse', 'dislocated', 'dobj', 'expl', 'foreign', 'goeswith', 'iobj', 'list', 'mark', 'mwe', 'name',
'neg', 'nmod', 'nmod:npmod', 'nmod:poss', 'nmod:tmod', 'nsubj', 'nsubjpass', 'nummod', 'parataxis',
'punct', 'remnant', 'reparandum', 'root', 'vocative', 'xcomp']
if (token.has_key('parents')):
for parents in token['parents']:
relation, parent = parents
if relation in listRelations:
row += 'p-Relation=' + relation + '\t'
row += 'p-Relation|p-token=' + relation + '|' + parent['word'] + '\t'
else:
rel = relation.split(':')
row += 'p-Relation=' + rel[0] + '\t'
row += 'p-Relation|p-token=' + rel[0] + '|' + parent['word'] + '\t'
if (token.has_key('children')):
for aChild in token['children']:
relation, child = aChild
if relation in listRelations:
row += 'c-Relation=' + relation + '\t'
row += 'c-Relation|c-token=' + relation + '|' + child['word'] + '\t'
else:
rel = relation.split(':')
row += 'c-Relation=' + rel[0] + '\t'
row += 'c-Relation|c-token=' + rel[0] + '|' + child['word'] + '\t'
return row
#gets individual features, position indexed
#part of speech, lemma, word
def getTokenFeatures(idOfToken, array, row, name):
bottomBound = -5
if idOfToken < 5:
bottomBound = 0 - idOfToken
topBound = len(array) - idOfToken
if topBound > 5:
topBound = 6
j = idOfToken
while (bottomBound < topBound):
row += name + '[' + str(bottomBound) + ']=' + array[j + bottomBound] + '\t'
bottomBound = bottomBound + 1
return row
#gets the pairs of features from -5 to 5, position indexed
#works for PartOfSpeech, Lemma, Word
def getTokenPairs(idOfToken, array, row, name):
bottomBound = -5
if idOfToken < 5:
bottomBound = 0 - idOfToken
topBound = len(array) - idOfToken
if topBound > 5:
topBound = 6
j = idOfToken
while (bottomBound < topBound - 1):
row += name + '[' + str(bottomBound) + ']|' + name + '[' + str(bottomBound + 1) \
+ ']=' + array[j + bottomBound] + '|' + array[j + bottomBound + 1] + '\t'
bottomBound = bottomBound + 1
return row
#gets a series of trues and falses depending on whether the sentence contains any
#of the key features
def getSentenceFeatures(sentence, row):
tokenArray = []
posArray = []
lemmaArray = []
peopleHyponym = []
orgHyponym = []
foundPerson=False
foundOrganization=False
foundPronoun=False
foundQuotes=False
foundAccording = False
possibleNounCue = False
for token in sentence['tokens']:
namedEnt = str(token['ner'])
if (namedEnt == 'PERSON' and foundPerson==False):
row += '''containsPerson='true'\t'''
foundPerson = True
elif (namedEnt == 'ORGANIZATION' and foundOrganization==False):
row += '''containsOrganization='true'\t'''
foundOrganization = True
pos = str(token['pos'])
if 'PRP' in pos and foundPronoun==False:
row += '''containsPronoun='true'\t'''
foundPronoun=True
word = str(token['word'])
if "''" in word and foundQuotes==False:
row += '''containsQuotes='true'\t'''
foundQuotes=True
tokenArray = tokenArray + [word]
posArray = posArray + [pos]
lemmaArray = lemmaArray + [str(token['lemma'])]
if word.lower() in peopleHyponyms:
peopleHyponym.append((token['id'], word))
if word.lower() in orgHyponyms:
orgHyponym.append((token['id'], word))
if (word.lower() != 'to' and foundAccording == True):
foundAccording = False
if (word.lower() == 'according'):
foundAccording = True
if (word.lower() == 'to' and foundAccording == True):
row += '''containsAccordingTo='true'\t'''
foundAccording = False
if word.lower() in nounCues and '''containsNounCue='true''' not in row:
row += '''containsNounCue='true'\t'''
return tokenArray, posArray, lemmaArray, peopleHyponym, orgHyponym, row
#use the parse tree to set up finding the constituencies
def getConstituentLabels(parseTree, token, sentence, row):
subTree = sentence['parse']
listOfWords = []
listofConstituences = getPaths(subTree, listOfWords, token, sentence)
if (listofConstituences != None):
for (lab, dep) in listofConstituences:
row += 'const=(' + lab + ',' + str(dep) + ')\t'
return row
#use a stack, go through each word
def getPaths(treeDict, listOfWords, token, sentence):
targetWord = str(token['word'])
word = treeDict['word']
s = Stack()
s.push(treeDict)
currIdentity = len(sentence['tokens'])
while not (s.isEmpty()):
currTreeDict = s.pop()
thisWord = currTreeDict['word']
if thisWord != None:
currIdentity = currIdentity - 1
#if we found the token's word
if thisWord == targetWord and currIdentity == token['id']:
OGdepth = currTreeDict['depth']
parent = currTreeDict['parent']
#finding each parent and their constituents
if parent.has_key('depth') and parent['depth'] != 0:
while (parent['depth'] != 1):
code = str(parent['code'])
depth = parent['depth'] - 1
constTuple = (code, depth)
parent = parent['parent']
listOfWords.append(constTuple)
if len(listOfWords) == OGdepth - 2:
myList = listOfWords
return myList
else:
return listOfWords
#push all the children onto the stack
else:
for child in currTreeDict['children']:
s.push(child)
#use the parse tree to find all the syntactic info
def getSyntactic(parseTree, token, sentence, row, verbsList, prevSyntactic):
targetWord = str(token['word'])
idOfWord = token['id']
for item in sentence['tokens']:
pos = item['pos']
if '(' in item['word']:
parseTree = parseTree.replace('(' + pos + ' ()', '(' + pos + ' -LRB-)')
elif ')' in item['word']:
parseTree = parseTree.replace('(' + pos + ' ))', '(' + pos + ' -RRB-)')
tree = ParentedTree.fromstring(parseTree)
#reformat the text to match properly
if targetWord == '(':
targetWord = '-LRB-'
if targetWord == ')':
targetWord = '-RRB-'
#get the indices for all the leaves that match this token
indices = [i for i, x in enumerate(tree.leaves()) if x == targetWord]
occurence = 0
scopingId = 0
if idOfWord != tree.leaves().index(targetWord):
occurence = indices.index(idOfWord)
scopingId = occurence
#if there aren't multiple occurences of the same token
if occurence == 0:
gen = tree.subtrees(lambda tree2: str(tree2.leaves()[0]) == targetWord)
try:
subtree = gen.next()
except:
print sys.exc_info()[0]
print 'error collecting subtree'
return prevSyntactic, row
#find correct token within the sentence
else:
next = 'false'
for mytree in tree.subtrees(lambda tree2: str(tree2.leaves()[0]) == targetWord):
if next == 'true' and occurence == 0:
subtree = mytree
break
else:
next = 'false'
if mytree.height() == 2:
next = 'true'
occurence = occurence - 1
#get subtree's label, length of span and depth
flattened = subtree.flatten()
label = flattened.label()
lengthSpan = len(flattened.leaves())
depth = len(subtree.treeposition())
#find the ID in the original sentence so that we can find out whether any of the words are verb cues
tokenArray = []
idOfWord = None
for token in sentence['tokens']:
word = token['word']
if word == '(':
word = '-LRB-'
if word == ')':
word = '-RRB-'
if targetWord == word and scopingId == 0:
idOfWord = token['id']
break
elif targetWord == word:
scopingId = scopingId - 1
else:
continue
tokenArray = tree.leaves()
#check if any tokens in the span are verb cues and append if found
constHasSpan = False
for verb in verbsList:
verbWord = verb[0]
verbSent = int(verb[1])
verbTok = int(verb[2])
verbLabel = verb[4]
if verbSent != sentence['id']:
continue
else:
for i in range(len(flattened.leaves())):
if i + idOfWord == verbTok and tokenArray[i + idOfWord] == verbWord and verbLabel == 'Y':
row += '''constSpanVC='true'\t'''
constHasSpan = True
row += 'constLabel=' + label + '\t' + 'constSpanLength=' + str(lengthSpan) + '\t' + 'depthSpan=' + str(depth) + '\t'
#get the subtree's parent tree
parentTreePosList = list(subtree.treeposition())
#no parent, return the row as it is
if len(parentTreePosList) == 0:
return prevSyntactic, row
elif targetWord == '-LRB-' or targetWord == 'RRB':
return prevSyntactic, row
parentTreePosList.pop()
parentTreeHead = tuple(parentTreePosList)
parentTree = tree[parentTreePosList]
parentFlat = parentTree.flatten()
parentLabel = parentFlat.label()
lengthSpanParent = len(parentFlat.leaves())
parentDepth = len(parentTree.treeposition())
#find correct word id that begins the span
begIndex = None
for indx, word in enumerate(tokenArray):
i = 0
if parentFlat.leaves()[0] == word:
for item in parentFlat.leaves():
if i + indx == len(tokenArray):
continue
if item == tokenArray[i + indx] and i == len(parentFlat.leaves()) - 1:
begIndex = indx
if item == tokenArray[i + indx]:
i = i + 1
#find out if any of these are verbs
for verb in verbsList:
verbWord = verb[0]
verbSent = int(verb[1])
verbTok = int(verb[2])
verbLabel = verb[4]
if verbSent != sentence['id']:
continue
else:
for i in range(len(parentFlat.leaves())):
if i + begIndex == verbTok and tokenArray[i + begIndex] == verbWord and verbLabel == 'Y':
row += '''parentConstSpanVC='true'\t'''
#no need to add it twice
constHasSpan = False
#if the child tree had a verb cue, then we know the parent does
if constHasSpan == True:
row += '''parentConstSpanVC='true'\t'''
row += 'parentConstLabel=' + parentLabel + '\t' + 'parentConstSpanLength=' + \
str(lengthSpanParent) + '\t' + 'parentDepthSpan=' + str(parentDepth) + '\t'
return prevSyntactic, row
#parse command line arguments
def main():
usageMessage = '\nCorrect usage of the Content Span Extractor command is as follows: \n' + \
'\n\n WHEN AN ANNOTATED FILESET EXISTS TO GET LABELS FROM:\n' + \
'To extract tokens and their features: \n python source/intermediaries/contentSpanExtractor.py -labelled /pathToCoreNLPDirectory /pathToAnnotatedFilesDirectory /pathToRawDirectory nameCorrespondingTaggedVerbCuesFile nameOfOutputFile.txt \n' + \
'\nTo use the default path names for the PARC training data, and filename PARCtrainContentSpans.txt please use the command with the label -default, as follows: \n' + \
'\t python source/intermediaries/contentSpanExtractor.py -labelled -default' + \
'\n\n WHEN THE LABELS ARE UNKNOWN:\n' + \
'To extract tokens and their features: \n python source/intermediaries/contentSpanExtractor.py -unlabelled /pathToCoreNLPDirectory /pathToRawDirectory nameCorrespondingTaggedVerbCuesFile nameOfOutputFile.txt \n' + \
'\nFor reference, the path to the CoreNLP file is: /home/ndg/dataset/ptb2-corenlp/CoreNLP/ + train, test or dev depending on your needs. \n' + \
'The path to the Parc3 files is /home/ndg/dataset/parc3/ + train, test or dev depending on your needs.\n'
args = sys.argv
if len(args) == 7:
flag = args[1]
pathToCORENLP = args[2]
pathToAnnotatedFiles = args[3]
pathToRaw = argsg[4]
verbCuesFile = args[5]
nameTxtOutput = args[6]
if flag != '-labelled':
print usageMessage
return
if os.path.isdir(pathToCORENLP):
print 'valid path to a directory'
else:
print 'ERROR: The path to this coreNLP directory does not exist.'
print usageMessage
return
if os.path.isdir(pathToAnnotatedFiles):
print 'valid path to a directory'
else:
print 'ERROR: The path to this annotated file directory does not exist.'
print usageMessage
return
if os.path.isfile(data_dir + nameTxtOutput):
print "That file already exists, you probably don't want to overwrite it"
var = raw_input("Are you sure you want to overwrite this file? Please answer Y or N\n")
if var == 'Y' or var == 'y':
coreNLPFiles = openDirectory(pathToCORENLP)
annotatedFiles = openDirectory(pathToAnnotatedFiles)
rawFiles = openDirectory(pathToRaw)
findFiles(coreNLPFiles, annotatedFiles, rawFiles, nameTxtOutput, verbCuesFile)
return
else:
return
else:
print 'valid filename'
coreNLPFiles = openDirectory(pathToCORENLP)
annotatedFiles = openDirectory(pathToAnnotatedFiles)
rawFiles = openDirectory(pathToRaw)
findFiles(coreNLPFiles, annotatedFiles, rawFiles, nameTxtOutput, verbCuesFile)
elif len(args) == 6:
pathToCORENLP = args[2]
pathToRaw = args[3]
verbCuesFile = args[4]
nameTxtOutput = args[5]
if args[1] != '-unlabelled':
print usageMessage
return
if os.path.isdir(pathToCORENLP):
print 'valid path to a directory'
else:
print 'ERROR: The path to this coreNLP directory does not exist.'
print usageMessage
return
if os.path.isfile(data_dir + nameTxtOutput):
print "That file already exists, you probably don't want to overwrite it"
var = raw_input("Are you sure you want to overwrite this file? Please answer Y or N\n")
if var == 'Y' or var == 'y':
coreNLPFiles = openDirectory(pathToCORENLP)
rawFiles = openDirectory(pathToRaw)
findFiles(coreNLPFiles, None, rawFiles, nameTxtOutput, verbCuesFile)
return
else:
return
coreNLPFiles = openDirectory(pathToCORENLP)
rawFiles = openDirectory(pathToRaw)
findFiles(coreNLPFiles, None, rawFiles, nameTxtOutput, verbCuesFile)
elif len(args) == 3:
if args[1] == '-labelled' and args[2] == '-default':
pathToCORENLP = '/home/ndg/dataset/ptb2-corenlp/CoreNLP_tokenized/train/'
pathToAnnotatedFiles = '/home/ndg/dataset/parc3/train/'
pathToRaw = '/home/ndg/dataset/ptb2-corenlp/masked_raw/train/'
verbCuesFile = 'train/PARCTrainVerbFeatsFOR_SPAN_EXTRACTOR.csv'
nameTxtOutput = 'PARCTrainContentSpans.txt'
coreNLPFiles = openDirectory(pathToCORENLP)
annotatedFiles = openDirectory(pathToAnnotatedFiles)
rawFiles = openDirectory(pathToRaw)
findFiles(coreNLPFiles, annotatedFiles, rawFiles, nameTxtOutput, verbCuesFile)
else:
print usageMessage
else:
print usageMessage
class Stack:
def __init__(self):
self.items = []
def isEmpty(self):
return self.items == []
def push(self, item):
self.items.append(item)
def pop(self):
return self.items.pop()
def peek(self):
return self.items[len(self.items)-1]
def size(self):
return len(self.items)
if __name__ == '__main__':
main()
| |
import operator
import os
import pprint
import random
import signal
import time
import uuid
import logging
import pytest
import psutil
from collections import defaultdict, namedtuple
from multiprocessing import Process, Queue
from queue import Empty, Full
from cassandra import ConsistencyLevel, WriteTimeout
from cassandra.query import SimpleStatement
from dtest import RUN_STATIC_UPGRADE_MATRIX, Tester
from tools.misc import generate_ssl_stores, new_node
from .upgrade_base import switch_jdks
from .upgrade_manifest import (build_upgrade_pairs,
current_2_1_x, current_2_2_x, current_3_0_x,
indev_3_11_x,
current_3_11_x, indev_trunk)
logger = logging.getLogger(__name__)
def data_writer(tester, to_verify_queue, verification_done_queue, rewrite_probability=0):
"""
Process for writing/rewriting data continuously.
Pushes to a queue to be consumed by data_checker.
Pulls from a queue of already-verified rows written by data_checker that it can overwrite.
Intended to be run using multiprocessing.
"""
# 'tester' is a cloned object so we shouldn't be inappropriately sharing anything with another process
session = tester.patient_cql_connection(tester.node1, keyspace="upgrade", protocol_version=tester.protocol_version)
prepared = session.prepare("UPDATE cf SET v=? WHERE k=?")
prepared.consistency_level = ConsistencyLevel.QUORUM
def handle_sigterm(signum, frame):
# need to close queue gracefully if possible, or the data_checker process
# can't seem to empty the queue and test failures result.
to_verify_queue.close()
exit(0)
signal.signal(signal.SIGTERM, handle_sigterm)
while True:
try:
key = None
if (rewrite_probability > 0) and (random.randint(0, 100) <= rewrite_probability):
try:
key = verification_done_queue.get_nowait()
except Empty:
# we wanted a re-write but the re-writable queue was empty. oh well.
pass
key = key or uuid.uuid4()
val = uuid.uuid4()
session.execute(prepared, (val, key))
to_verify_queue.put((key, val,))
except Exception:
logger.debug("Error in data writer process!")
to_verify_queue.close()
raise
def data_checker(tester, to_verify_queue, verification_done_queue):
"""
Process for checking data continuously.
Pulls from a queue written to by data_writer to know what to verify.
Pushes to a queue to tell data_writer what's been verified and could be a candidate for re-writing.
Intended to be run using multiprocessing.
"""
# 'tester' is a cloned object so we shouldn't be inappropriately sharing anything with another process
session = tester.patient_cql_connection(tester.node1, keyspace="upgrade", protocol_version=tester.protocol_version)
prepared = session.prepare("SELECT v FROM cf WHERE k=?")
prepared.consistency_level = ConsistencyLevel.QUORUM
def handle_sigterm(signum, frame):
# need to close queue gracefully if possible, or the data_checker process
# can't seem to empty the queue and test failures result.
verification_done_queue.close()
exit(0)
signal.signal(signal.SIGTERM, handle_sigterm)
while True:
try:
# here we could block, but if the writer process terminates early with an empty queue
# we would end up blocking indefinitely
(key, expected_val) = to_verify_queue.get_nowait()
actual_val = session.execute(prepared, (key,))[0][0]
except Empty:
time.sleep(0.1) # let's not eat CPU if the queue is empty
continue
except Exception:
logger.debug("Error in data verifier process!")
verification_done_queue.close()
raise
else:
try:
verification_done_queue.put_nowait(key)
except Full:
# the rewritable queue is full, not a big deal. drop this one.
# we keep the rewritable queue held to a modest max size
# and allow dropping some rewritables because we don't want to
# rewrite rows in the same sequence as originally written
pass
assert expected_val == actual_val, "Data did not match expected value!"
def counter_incrementer(tester, to_verify_queue, verification_done_queue, rewrite_probability=0):
"""
Process for incrementing counters continuously.
Pushes to a queue to be consumed by counter_checker.
Pulls from a queue of already-verified rows written by data_checker that it can increment again.
Intended to be run using multiprocessing.
"""
# 'tester' is a cloned object so we shouldn't be inappropriately sharing anything with another process
session = tester.patient_cql_connection(tester.node1, keyspace="upgrade", protocol_version=tester.protocol_version)
prepared = session.prepare("UPDATE countertable SET c = c + 1 WHERE k1=?")
prepared.consistency_level = ConsistencyLevel.QUORUM
def handle_sigterm(signum, frame):
# need to close queue gracefully if possible, or the data_checker process
# can't seem to empty the queue and test failures result.
to_verify_queue.close()
exit(0)
signal.signal(signal.SIGTERM, handle_sigterm)
while True:
try:
key = None
count = 0 # this will get set to actual last known count if we do a re-write
if (rewrite_probability > 0) and (random.randint(0, 100) <= rewrite_probability):
try:
key, count = verification_done_queue.get_nowait()
except Empty:
# we wanted a re-write but the re-writable queue was empty. oh well.
pass
key = key or uuid.uuid4()
session.execute(prepared, (key))
to_verify_queue.put_nowait((key, count + 1,))
except Exception:
logger.debug("Error in counter incrementer process!")
to_verify_queue.close()
raise
def counter_checker(tester, to_verify_queue, verification_done_queue):
"""
Process for checking counters continuously.
Pulls from a queue written to by counter_incrementer to know what to verify.
Pushes to a queue to tell counter_incrementer what's been verified and could be a candidate for incrementing again.
Intended to be run using multiprocessing.
"""
# 'tester' is a cloned object so we shouldn't be inappropriately sharing anything with another process
session = tester.patient_cql_connection(tester.node1, keyspace="upgrade", protocol_version=tester.protocol_version)
prepared = session.prepare("SELECT c FROM countertable WHERE k1=?")
prepared.consistency_level = ConsistencyLevel.QUORUM
def handle_sigterm(signum, frame):
# need to close queue gracefully if possible, or the data_checker process
# can't seem to empty the queue and test failures result.
verification_done_queue.close()
exit(0)
signal.signal(signal.SIGTERM, handle_sigterm)
while True:
try:
# here we could block, but if the writer process terminates early with an empty queue
# we would end up blocking indefinitely
(key, expected_count) = to_verify_queue.get_nowait()
actual_count = session.execute(prepared, (key,))[0][0]
except Empty:
time.sleep(0.1) # let's not eat CPU if the queue is empty
continue
except Exception:
logger.debug("Error in counter verifier process!")
verification_done_queue.close()
raise
else:
tester.assertEqual(expected_count, actual_count, "Data did not match expected value!")
try:
verification_done_queue.put_nowait((key, actual_count))
except Full:
# the rewritable queue is full, not a big deal. drop this one.
# we keep the rewritable queue held to a modest max size
# and allow dropping some rewritables because we don't want to
# rewrite rows in the same sequence as originally written
pass
@pytest.mark.upgrade_test
@pytest.mark.resource_intensive
@pytest.mark.skip("Fake skip so that this isn't run outside of a generated class that removes this annotation")
class TestUpgrade(Tester):
"""
Upgrades a 3-node Murmur3Partitioner cluster through versions specified in test_version_metas.
"""
test_version_metas = None # set on init to know which versions to use
subprocs = None # holds any subprocesses, for status checking and cleanup
extra_config = None # holds a non-mutable structure that can be cast as dict()
@pytest.fixture(autouse=True)
def fixture_add_additional_log_patterns(self, fixture_dtest_setup):
fixture_dtest_setup.ignore_log_patterns = (
# This one occurs if we do a non-rolling upgrade, the node
# it's trying to send the migration to hasn't started yet,
# and when it does, it gets replayed and everything is fine.
r'Can\'t send migration request: node.*is down',
r'RejectedExecutionException.*ThreadPoolExecutor has shut down',
# Occurs due to test/ccm writing topo on down nodes
r'Cannot update data center or rack from.*for live host',
# Normal occurance. See CASSANDRA-12026. Likely won't be needed after C* 4.0.
r'Unknown column cdc during deserialization',
)
def prepare(self):
logger.debug("Upgrade test beginning, setting CASSANDRA_VERSION to {}, and jdk to {}. (Prior values will be restored after test)."
.format(self.test_version_metas[0].version, self.test_version_metas[0].java_version))
cluster = self.cluster
cluster.set_install_dir(version=self.test_version_metas[0].version)
switch_jdks(self.test_version_metas[0].java_version)
self.fixture_dtest_setup.reinitialize_cluster_for_different_version()
logger.debug("Versions to test (%s): %s" % (type(self), str([v.version for v in self.test_version_metas])))
def init_config(self):
Tester.init_config(self)
if self.extra_config is not None:
logger.debug("Setting extra configuration options:\n{}".format(
pprint.pformat(dict(self.extra_config), indent=4))
)
self.cluster.set_configuration_options(
values=dict(self.extra_config)
)
def test_parallel_upgrade(self):
"""
Test upgrading cluster all at once (requires cluster downtime).
"""
self.upgrade_scenario()
@pytest.mark.timeout(3000)
def test_rolling_upgrade(self):
"""
Test rolling upgrade of the cluster, so we have mixed versions part way through.
"""
self.upgrade_scenario(rolling=True)
def test_parallel_upgrade_with_internode_ssl(self):
"""
Test upgrading cluster all at once (requires cluster downtime), with internode ssl.
"""
self.upgrade_scenario(internode_ssl=True)
@pytest.mark.timeout(3000)
def test_rolling_upgrade_with_internode_ssl(self):
"""
Rolling upgrade test using internode ssl.
"""
self.upgrade_scenario(rolling=True, internode_ssl=True)
def upgrade_scenario(self, populate=True, create_schema=True, rolling=False, after_upgrade_call=(), internode_ssl=False):
# Record the rows we write as we go:
if populate:
self.prepare()
self.row_values = set()
cluster = self.cluster
if cluster.version() >= '3.0':
cluster.set_configuration_options({'enable_user_defined_functions': 'true',
'enable_scripted_user_defined_functions': 'true'})
elif cluster.version() >= '2.2':
cluster.set_configuration_options({'enable_user_defined_functions': 'true'})
if internode_ssl:
logger.debug("***using internode ssl***")
generate_ssl_stores(self.fixture_dtest_setup.test_path)
self.cluster.enable_internode_ssl(self.fixture_dtest_setup.test_path)
if populate:
# Start with 3 node cluster
logger.debug('Creating cluster (%s)' % self.test_version_metas[0].version)
cluster.populate(3)
[node.start(use_jna=True, wait_for_binary_proto=True) for node in cluster.nodelist()]
else:
logger.debug("Skipping cluster creation (should already be built)")
# add nodes to self for convenience
for i, node in enumerate(cluster.nodelist(), 1):
node_name = 'node' + str(i)
setattr(self, node_name, node)
if create_schema:
if rolling:
self._create_schema_for_rolling()
else:
self._create_schema()
else:
logger.debug("Skipping schema creation (should already be built)")
time.sleep(5) # sigh...
self._log_current_ver(self.test_version_metas[0])
if rolling:
# start up processes to write and verify data
write_proc, verify_proc, verification_queue = self._start_continuous_write_and_verify(wait_for_rowcount=5000)
# upgrade through versions
for version_meta in self.test_version_metas[1:]:
for num, node in enumerate(self.cluster.nodelist()):
# sleep (sigh) because driver needs extra time to keep up with topo and make quorum possible
# this is ok, because a real world upgrade would proceed much slower than this programmatic one
# additionally this should provide more time for timeouts and other issues to crop up as well, which we could
# possibly "speed past" in an overly fast upgrade test
time.sleep(60)
self.upgrade_to_version(version_meta, partial=True, nodes=(node,), internode_ssl=internode_ssl)
self._check_on_subprocs(self.fixture_dtest_setup.subprocs)
logger.debug('Successfully upgraded %d of %d nodes to %s' %
(num + 1, len(self.cluster.nodelist()), version_meta.version))
self.cluster.set_install_dir(version=version_meta.version)
self.fixture_dtest_setup.reinitialize_cluster_for_different_version()
# Stop write processes
write_proc.terminate()
# wait for the verification queue's to empty (and check all rows) before continuing
self._wait_until_queue_condition('writes pending verification', verification_queue, operator.le, 0, max_wait_s=1200)
self._check_on_subprocs([verify_proc]) # make sure the verification processes are running still
self._terminate_subprocs()
# not a rolling upgrade, do everything in parallel:
else:
# upgrade through versions
for version_meta in self.test_version_metas[1:]:
self._write_values()
self._increment_counters()
self.upgrade_to_version(version_meta, internode_ssl=internode_ssl)
self.cluster.set_install_dir(version=version_meta.version)
self.fixture_dtest_setup.reinitialize_cluster_for_different_version()
self._check_values()
self._check_counters()
self._check_select_count()
# run custom post-upgrade callables
for call in after_upgrade_call:
call()
logger.debug('All nodes successfully upgraded to %s' % version_meta.version)
self._log_current_ver(version_meta)
cluster.stop()
def tearDown(self):
# just to be super sure we get cleaned up
self._terminate_subprocs()
super(TestUpgrade, self).tearDown()
def _check_on_subprocs(self, subprocs):
"""
Check on given subprocesses.
If any are not alive, we'll go ahead and terminate any remaining alive subprocesses since this test is going to fail.
"""
subproc_statuses = [s.is_alive() for s in subprocs]
if not all(subproc_statuses):
message = "A subprocess has terminated early. Subprocess statuses: "
for s in subprocs:
message += "{name} (is_alive: {aliveness}), ".format(name=s.name, aliveness=s.is_alive())
message += "attempting to terminate remaining subprocesses now."
self._terminate_subprocs()
raise RuntimeError(message)
def _terminate_subprocs(self):
for s in self.fixture_dtest_setup.subprocs:
if s.is_alive():
try:
psutil.Process(s.pid).kill() # with fire damnit
except Exception:
logger.debug("Error terminating subprocess. There could be a lingering process.")
pass
def upgrade_to_version(self, version_meta, partial=False, nodes=None, internode_ssl=False):
"""
Upgrade Nodes - if *partial* is True, only upgrade those nodes
that are specified by *nodes*, otherwise ignore *nodes* specified
and upgrade all nodes.
"""
logger.debug('Upgrading {nodes} to {version}'.format(nodes=[n.name for n in nodes] if nodes is not None else 'all nodes', version=version_meta.version))
switch_jdks(version_meta.java_version)
logger.debug("JAVA_HOME: " + os.environ.get('JAVA_HOME'))
if not partial:
nodes = self.cluster.nodelist()
for node in nodes:
logger.debug('Shutting down node: ' + node.name)
node.drain()
node.watch_log_for("DRAINED")
node.stop(wait_other_notice=False)
for node in nodes:
node.set_install_dir(version=version_meta.version)
logger.debug("Set new cassandra dir for %s: %s" % (node.name, node.get_install_dir()))
if internode_ssl and (version_meta.family == 'trunk' or version_meta.family >= '4.0'):
node.set_configuration_options({'server_encryption_options': {'enabled': True, 'enable_legacy_ssl_storage_port': True}})
# hacky? yes. We could probably extend ccm to allow this publicly.
# the topology file needs to be written before any nodes are started
# otherwise they won't be grouped into dc's properly for multi-dc tests
self.cluster._Cluster__update_topology_files()
# Restart nodes on new version
for node in nodes:
logger.debug('Starting %s on new version (%s)' % (node.name, version_meta.version))
# Setup log4j / logback again (necessary moving from 2.0 -> 2.1):
node.set_log_level("INFO")
node.start(wait_other_notice=240, wait_for_binary_proto=True)
node.nodetool('upgradesstables -a')
def _log_current_ver(self, current_version_meta):
"""
Logs where we currently are in the upgrade path, surrounding the current branch/tag, like ***sometag***
"""
vers = [m.version for m in self.test_version_metas]
curr_index = vers.index(current_version_meta.version)
logger.debug(
"Current upgrade path: {}".format(
vers[:curr_index] + ['***' + current_version_meta.version + '***'] + vers[curr_index + 1:]))
def _create_schema_for_rolling(self):
"""
Slightly different schema variant for testing rolling upgrades with quorum reads/writes.
"""
session = self.patient_cql_connection(self.node2, protocol_version=self.protocol_version)
session.execute("CREATE KEYSPACE upgrade WITH replication = {'class':'SimpleStrategy', 'replication_factor':3};")
session.execute('use upgrade')
session.execute('CREATE TABLE cf ( k uuid PRIMARY KEY, v uuid )')
session.execute('CREATE INDEX vals ON cf (v)')
session.execute("""
CREATE TABLE countertable (
k1 uuid,
c counter,
PRIMARY KEY (k1)
);""")
def _create_schema(self):
session = self.patient_cql_connection(self.node2, protocol_version=self.protocol_version)
session.execute("CREATE KEYSPACE upgrade WITH replication = {'class':'SimpleStrategy', 'replication_factor':2};")
session.execute('use upgrade')
session.execute('CREATE TABLE cf ( k int PRIMARY KEY, v text )')
session.execute('CREATE INDEX vals ON cf (v)')
session.execute("""
CREATE TABLE countertable (
k1 text,
k2 int,
c counter,
PRIMARY KEY (k1, k2)
);""")
def _write_values(self, num=100):
session = self.patient_cql_connection(self.node2, protocol_version=self.protocol_version)
session.execute("use upgrade")
for i in range(num):
x = len(self.row_values) + 1
session.execute("UPDATE cf SET v='%d' WHERE k=%d" % (x, x))
self.row_values.add(x)
def _check_values(self, consistency_level=ConsistencyLevel.ALL):
for node in self.cluster.nodelist():
session = self.patient_cql_connection(node, protocol_version=self.protocol_version)
session.execute("use upgrade")
for x in self.row_values:
query = SimpleStatement("SELECT k,v FROM cf WHERE k=%d" % x, consistency_level=consistency_level)
result = session.execute(query)
k, v = result[0]
assert x == k
assert str(x) == v
def _wait_until_queue_condition(self, label, queue, opfunc, required_len, max_wait_s=600):
"""
Waits up to max_wait_s for queue size to return True when evaluated against a condition function from the operator module.
Label is just a string identifier for easier debugging.
On Mac OS X may not be able to check queue size, in which case it will not block.
If time runs out, raises RuntimeError.
"""
wait_end_time = time.time() + max_wait_s
while time.time() < wait_end_time:
try:
qsize = queue.qsize()
except NotImplementedError:
logger.debug("Queue size may not be checkable on Mac OS X. Test will continue without waiting.")
break
if opfunc(qsize, required_len):
logger.debug("{} queue size ({}) is '{}' to {}. Continuing.".format(label, qsize, opfunc.__name__, required_len))
break
if divmod(round(time.time()), 30)[1] == 0:
logger.debug("{} queue size is at {}, target is to reach '{}' {}".format(label, qsize, opfunc.__name__, required_len))
time.sleep(0.1)
continue
else:
raise RuntimeError("Ran out of time waiting for queue size ({}) to be '{}' to {}. Aborting.".format(qsize, opfunc.__name__, required_len))
def _start_continuous_write_and_verify(self, wait_for_rowcount=0, max_wait_s=600):
"""
Starts a writer process, a verifier process, a queue to track writes,
and a queue to track successful verifications (which are rewrite candidates).
wait_for_rowcount provides a number of rows to write before unblocking and continuing.
Returns the writer process, verifier process, and the to_verify_queue.
"""
# queue of writes to be verified
to_verify_queue = Queue(10000)
# queue of verified writes, which are update candidates
verification_done_queue = Queue(maxsize=500)
writer = Process(target=data_writer, args=(self, to_verify_queue, verification_done_queue, 25))
# daemon subprocesses are killed automagically when the parent process exits
writer.daemon = True
self.fixture_dtest_setup.subprocs.append(writer)
writer.start()
if wait_for_rowcount > 0:
self._wait_until_queue_condition('rows written (but not verified)', to_verify_queue, operator.ge, wait_for_rowcount, max_wait_s=max_wait_s)
verifier = Process(target=data_checker, args=(self, to_verify_queue, verification_done_queue))
# daemon subprocesses are killed automagically when the parent process exits
verifier.daemon = True
self.fixture_dtest_setup.subprocs.append(verifier)
verifier.start()
return writer, verifier, to_verify_queue
def _start_continuous_counter_increment_and_verify(self, wait_for_rowcount=0, max_wait_s=600):
"""
Starts a counter incrementer process, a verifier process, a queue to track writes,
and a queue to track successful verifications (which are re-increment candidates).
Returns the writer process, verifier process, and the to_verify_queue.
"""
# queue of writes to be verified
to_verify_queue = Queue()
# queue of verified writes, which are update candidates
verification_done_queue = Queue(maxsize=500)
incrementer = Process(target=data_writer, args=(self, to_verify_queue, verification_done_queue, 25))
# daemon subprocesses are killed automagically when the parent process exits
incrementer.daemon = True
self.fixture_dtest_setup.subprocs.append(incrementer)
incrementer.start()
if wait_for_rowcount > 0:
self._wait_until_queue_condition('counters incremented (but not verified)', to_verify_queue, operator.ge, wait_for_rowcount, max_wait_s=max_wait_s)
count_verifier = Process(target=data_checker, args=(self, to_verify_queue, verification_done_queue))
# daemon subprocesses are killed automagically when the parent process exits
count_verifier.daemon = True
self.fixture_dtest_setup.subprocs.append(count_verifier)
count_verifier.start()
return incrementer, count_verifier, to_verify_queue
def _increment_counters(self, opcount=25000):
logger.debug("performing {opcount} counter increments".format(opcount=opcount))
session = self.patient_cql_connection(self.node2, protocol_version=self.protocol_version)
session.execute("use upgrade;")
update_counter_query = ("UPDATE countertable SET c = c + 1 WHERE k1='{key1}' and k2={key2}")
self.expected_counts = {}
for i in range(10):
self.expected_counts[uuid.uuid4()] = defaultdict(int)
fail_count = 0
for i in range(opcount):
key1 = random.choice(list(self.expected_counts.keys()))
key2 = random.randint(1, 10)
try:
query = SimpleStatement(update_counter_query.format(key1=key1, key2=key2), consistency_level=ConsistencyLevel.ALL)
session.execute(query)
except WriteTimeout:
fail_count += 1
else:
self.expected_counts[key1][key2] += 1
if fail_count > 100:
break
assert fail_count < 100, "Too many counter increment failures"
def _check_counters(self):
logger.debug("Checking counter values...")
session = self.patient_cql_connection(self.node2, protocol_version=self.protocol_version)
session.execute("use upgrade;")
for key1 in list(self.expected_counts.keys()):
for key2 in list(self.expected_counts[key1].keys()):
expected_value = self.expected_counts[key1][key2]
query = SimpleStatement("SELECT c from countertable where k1='{key1}' and k2={key2};".format(key1=key1, key2=key2),
consistency_level=ConsistencyLevel.ONE)
results = session.execute(query)
if results is not None:
actual_value = results[0][0]
else:
# counter wasn't found
actual_value = None
assert actual_value == expected_value
def _check_select_count(self, consistency_level=ConsistencyLevel.ALL):
logger.debug("Checking SELECT COUNT(*)")
session = self.patient_cql_connection(self.node2, protocol_version=self.protocol_version)
session.execute("use upgrade;")
expected_num_rows = len(self.row_values)
countquery = SimpleStatement("SELECT COUNT(*) FROM cf;", consistency_level=consistency_level)
result = session.execute(countquery)
if result is not None:
actual_num_rows = result[0][0]
assert actual_num_rows == expected_num_rows, "SELECT COUNT(*) returned %s when expecting %s" % (actual_num_rows, expected_num_rows)
else:
self.fail("Count query did not return")
class BootstrapMixin(object):
"""
Can be mixed into UpgradeTester or a subclass thereof to add bootstrap tests.
Using this class is not currently feasible on lengthy upgrade paths, as each
version bump adds a node and this will eventually exhaust resources.
"""
def _bootstrap_new_node(self):
# Check we can bootstrap a new node on the upgraded cluster:
logger.debug("Adding a node to the cluster")
nnode = new_node(self.cluster, remote_debug_port=str(2000 + len(self.cluster.nodes)))
nnode.start(use_jna=True, wait_other_notice=240, wait_for_binary_proto=True)
self._write_values()
self._increment_counters()
self._check_values()
self._check_counters()
def _bootstrap_new_node_multidc(self):
# Check we can bootstrap a new node on the upgraded cluster:
logger.debug("Adding a node to the cluster")
nnode = new_node(self.cluster, remote_debug_port=str(2000 + len(self.cluster.nodes)), data_center='dc2')
nnode.start(use_jna=True, wait_other_notice=240, wait_for_binary_proto=True)
self._write_values()
self._increment_counters()
self._check_values()
self._check_counters()
def test_bootstrap(self):
# try and add a new node
self.upgrade_scenario(after_upgrade_call=(self._bootstrap_new_node,))
def test_bootstrap_multidc(self):
# try and add a new node
# multi dc, 2 nodes in each dc
self.prepare()
cluster = self.cluster
if cluster.version() >= '3.0':
cluster.set_configuration_options({'enable_user_defined_functions': 'true',
'enable_scripted_user_defined_functions': 'true'})
elif cluster.version() >= '2.2':
cluster.set_configuration_options({'enable_user_defined_functions': 'true'})
cluster.populate([2, 2])
[node.start(use_jna=True, wait_for_binary_proto=True) for node in self.cluster.nodelist()]
self._multidc_schema_create()
self.upgrade_scenario(populate=False, create_schema=False, after_upgrade_call=(self._bootstrap_new_node_multidc,))
def _multidc_schema_create(self):
session = self.patient_cql_connection(self.cluster.nodelist()[0], protocol_version=self.protocol_version)
if self.cluster.version() >= '1.2':
# DDL for C* 1.2+
session.execute("CREATE KEYSPACE upgrade WITH replication = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':2};")
else:
# DDL for C* 1.1
session.execute("""CREATE KEYSPACE upgrade WITH strategy_class = 'NetworkTopologyStrategy'
AND strategy_options:'dc1':1
AND strategy_options:'dc2':2;
""")
session.execute('use upgrade')
session.execute('CREATE TABLE cf ( k int PRIMARY KEY , v text )')
session.execute('CREATE INDEX vals ON cf (v)')
session.execute("""
CREATE TABLE countertable (
k1 text,
k2 int,
c counter,
PRIMARY KEY (k1, k2)
);""")
def create_upgrade_class(clsname, version_metas, protocol_version,
bootstrap_test=False, extra_config=None):
"""
Dynamically creates a test subclass for testing the given versions.
'clsname' is the name of the new class.
'protocol_version' is an int.
'bootstrap_test' is a boolean, if True bootstrap testing will be included. Default False.
'version_list' is a list of versions ccm will recognize, to be upgraded in order.
'extra_config' is tuple of config options that can (eventually) be cast as a dict,
e.g. (('partitioner', org.apache.cassandra.dht.Murmur3Partitioner''))
"""
if extra_config is None:
extra_config = (('partitioner', 'org.apache.cassandra.dht.Murmur3Partitioner'),)
if bootstrap_test:
parent_classes = (TestUpgrade, BootstrapMixin)
else:
parent_classes = (TestUpgrade,)
# short names for debug output
parent_class_names = [cls.__name__ for cls in parent_classes]
print("Creating test class {} ".format(clsname))
print(" for C* versions:\n{} ".format(pprint.pformat(version_metas)))
print(" using protocol: v{}, and parent classes: {}".format(protocol_version, parent_class_names))
print(" to run these tests alone, use `nosetests {}.py:{}`".format(__name__, clsname))
upgrade_applies_to_env = RUN_STATIC_UPGRADE_MATRIX or version_metas[-1].matches_current_env_version_family
newcls = type(
clsname,
parent_classes,
{'test_version_metas': version_metas, '__test__': True, 'protocol_version': protocol_version, 'extra_config': extra_config}
)
# Remove the skip annotation in the superclass we just derived from, we will add it back if we actually intend
# to skip with a better message
newcls.pytestmark = [mark for mark in newcls.pytestmark if not mark.name == "skip"]
if not upgrade_applies_to_env:
newcls.pytestmark.append(pytest.mark.skip("test not applicable to env"))
if clsname in globals():
raise RuntimeError("Class by name already exists!")
globals()[clsname] = newcls
return newcls
MultiUpgrade = namedtuple('MultiUpgrade', ('name', 'version_metas', 'protocol_version', 'extra_config'))
MULTI_UPGRADES = (
# Proto v3 upgrades (v3 is supported on 2.1, 2.2, 3.0, 3.11)
MultiUpgrade(name='TestProtoV3Upgrade_AllVersions_EndsAt_3_11_X',
version_metas=[current_2_1_x, current_2_2_x, current_3_0_x, indev_3_11_x], protocol_version=3, extra_config=None),
MultiUpgrade(name='TestProtoV3Upgrade_AllVersions_RandomPartitioner_EndsAt_3_11_X_HEAD',
version_metas=[current_2_1_x, current_2_2_x, current_3_0_x, indev_3_11_x], protocol_version=3,
extra_config=(
('partitioner', 'org.apache.cassandra.dht.RandomPartitioner'),
)),
# Proto v4 upgrades (v4 is supported on 2.2, 3.0, 3.1, trunk)
MultiUpgrade(name='TestProtoV4Upgrade_AllVersions_EndsAt_Trunk_HEAD',
version_metas=[current_2_2_x, current_3_0_x, current_3_11_x, indev_trunk], protocol_version=4, extra_config=None),
MultiUpgrade(name='TestProtoV4Upgrade_AllVersions_RandomPartitioner_EndsAt_Trunk_HEAD',
version_metas=[current_2_2_x, current_3_0_x, current_3_11_x, indev_trunk], protocol_version=4,
extra_config=(
('partitioner', 'org.apache.cassandra.dht.RandomPartitioner'),
)),
#Beta versions don't work with this test since it doesn't specify use beta in the client
#It's fine I guess for now? Can update on release
# Proto v5 upgrades (v5 is supported on 3.0, 3.11, trunk)
# MultiUpgrade(name='TestProtoV5Upgrade_AllVersions_EndsAt_Trunk_HEAD',
# version_metas=[current_3_0_x, current_3_x, indev_trunk], protocol_version=5, extra_config=None),
# MultiUpgrade(name='TestProtoV5Upgrade_AllVersions_RandomPartitioner_EndsAt_Trunk_HEAD',
# version_metas=[current_3_0_x, current_3_x, indev_trunk], protocol_version=5,
# extra_config=(
# ('partitioner', 'org.apache.cassandra.dht.RandomPartitioner'),
# )),
)
for upgrade in MULTI_UPGRADES:
# if any version_metas are None, this means they are versions not to be tested currently
if all(upgrade.version_metas):
metas = upgrade.version_metas
if not RUN_STATIC_UPGRADE_MATRIX:
if metas[-1].matches_current_env_version_family:
# looks like this test should actually run in the current env, so let's set the final version to match the env exactly
oldmeta = metas[-1]
newmeta = oldmeta.clone_with_local_env_version()
logger.debug("{} appears applicable to current env. Overriding final test version from {} to {}".format(upgrade.name, oldmeta.version, newmeta.version))
metas[-1] = newmeta
create_upgrade_class(upgrade.name, [m for m in metas], protocol_version=upgrade.protocol_version, extra_config=upgrade.extra_config)
for pair in build_upgrade_pairs():
create_upgrade_class(
'Test' + pair.name,
[pair.starting_meta, pair.upgrade_meta],
protocol_version=pair.starting_meta.max_proto_v,
bootstrap_test=True
)
| |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
These routines perform some hierrachical agglomerative clustering
of some input data. The following alternatives are proposed:
- Distance based average-link
- Similarity-based average-link
- Distance based maximum-link
- Ward's algorithm under graph constraints
- Ward's algorithm without graph constraints
In this latest version, the results are returned in a 'WeightedForest'
structure, which gives access to the clustering hierarchy, facilitates
the plot of the result etc.
For back-compatibility, *_segment versions of the algorithms have been
appended, with the old API (except the qmax parameter, which now
represents the number of wanted clusters)
Author : Bertrand Thirion,Pamela Guevara, 2006-2009
"""
#---------------------------------------------------------------------------
# ------ Routines for Agglomerative Hierarchical Clustering ----------------
# --------------------------------------------------------------------------
import numpy as np
from warnings import warn
from ..graph.graph import WeightedGraph
from ..graph.forest import Forest
class WeightedForest(Forest):
"""
This is a weighted Forest structure, i.e. a tree
- each node has one parent and children
(hierarchical structure)
- some of the nodes can be viewed as leaves, other as roots
- the edges within a tree are associated with a weight:
+1 from child to parent
-1 from parent to child
- additionally, the nodes have a value, which is called 'height',
especially useful from dendrograms
members
-------
V : (int, >0) the number of vertices
E : (int) the number of edges
parents: array of shape (self.V) the parent array
edges: array of shape (self.E,2) reprensenting pairwise neighbors
weights, array of shape (self.E), +1/-1 for scending/descending links
children: list of arrays that represents the childs of any node
height: array of shape(self.V)
"""
def __init__(self, V, parents=None, height=None):
"""
Parameters
----------
V: the number of edges of the graph
parents=None: array of shape (V)
the parents of the graph
by default, the parents are set to range(V), i.e. each
node is its own parent, and each node is a tree
height=None: array of shape(V)
the height of the nodes
"""
V = int(V)
if V < 1:
raise ValueError('cannot create graphs with no vertex')
self.V = int(V)
# define the parents
if parents == None:
self.parents = np.arange(self.V)
else:
if np.size(parents) != V:
raise ValueError('Incorrect size for parents')
if parents.max() > self.V:
raise ValueError('Incorrect value for parents')
self.parents = np.reshape(parents, self.V)
self.define_graph_attributes()
if self.check() == 0:
raise ValueError('The proposed structure is not a forest')
self.children = []
if height == None:
height = np.zeros(self.V)
else:
if np.size(height) != V:
raise ValueError('Incorrect size for height')
self.height = np.reshape(height, self.V)
def set_height(self, height=None):
"""Set the height array
"""
if height == None:
height = np.zeros(self.V)
if np.size(height) != self.V:
raise ValueError('Incorrect size for height')
self.height = np.reshape(height, self.V)
def get_height(self):
"""Get the height array
"""
return self.height
def check_compatible_height(self):
"""Check that height[parents[i]]>=height[i] for all nodes
"""
OK = True
for i in range(self.V):
if self.height[self.parents[i]] < self.height[i]:
OK = False
return OK
def plot(self, ax=None):
"""Plot the dendrogram associated with self
the rank of the data in the dendogram is returned
Parameters
----------
ax: axis handle, optional
Returns
-------
ax, the axis handle
"""
import matplotlib.pylab as mp
if self.check_compatible_height() == False:
raise ValueError('cannot plot myself in my current state')
n = np.sum(self.isleaf())
# 1. find a permutation of the leaves that makes it nice
aux = _label(self.parents)
temp = np.zeros(self.V)
rank = np.arange(self.V)
temp[:n] = np.argsort(aux[:n])
for i in range(n):
rank[temp[i]] = i
# 2. derive the abscissa in the dendrogram
idx = np.zeros(self.V)
temp = np.argsort(rank[:n])
for i in range(n):
idx[temp[i]] = i
for i in range(n, self.V):
j = np.nonzero(self.parents == i)[0]
idx[i] = np.mean(idx[j])
# 3. plot
if ax == None:
mp.figure()
ax = mp.subplot(1, 1, 1)
for i in range(self.V):
h1 = self.height[i]
h2 = self.height[self.parents[i]]
mp.plot([idx[i], idx[i]], [h1, h2], 'k')
ch = self.get_children()
for i in range(self.V):
if np.size(ch[i]) > 0:
lidx = idx[ch[i]]
m = lidx.min()
M = lidx.max()
h = self.height[i]
mp.plot([m, M], [h, h], 'k')
cM = 1.05 * self.height.max() - 0.05 * self.height.min()
cm = 1.05 * self.height.min() - 0.05 * self.height.max()
mp.axis([-1, idx.max() + 1, cm, cM])
return ax
def partition(self, threshold):
""" Partition the tree according to a cut criterion
"""
valid = self.height < threshold
f = self.subforest(valid)
u = f.cc()
return u[f.isleaf()]
def split(self, k):
"""
idem as partition, but a number of components are supplied instead
"""
k = int(k)
if k > self.V:
k = self.V
nbcc = self.cc().max() + 1
if k <= nbcc:
u = self.cc()
return u[self.isleaf()]
sh = np.sort(self.height)
th = sh[nbcc - k]
u = self.partition(th)
return u
def plot_height(self):
"""Plot the height of the non-leaves nodes
"""
import matplotlib.pylab as mp
mp.figure()
sh = np.sort(self.height[self.isleaf() == False])
n = np.sum(self.isleaf() == False)
mp.bar(np.arange(n), sh)
def list_of_subtrees(self):
"""
returns the list of all non-trivial subtrees in the graph
Caveat: theis function assumes that the vertices are sorted in a
way such that parent[i]>i forall i
Only the leaves are listeed, not the subtrees themselves
"""
lst = []
n = np.sum(self.isleaf())
for i in range(self.V):
lst.append(np.array([], np.int))
for i in range(n):
lst[i] = np.array([i], np.int)
for i in range(self.V - 1):
j = self.parents[i]
lst[j] = np.hstack((lst[i], lst[j]))
return lst[n:self.V]
#--------------------------------------------------------------------------
#------------- Average link clustering on a graph -------------------------
# -------------------------------------------------------------------------
def fusion(K, pop, i, j, k):
""" Modifies the graph K to merge nodes i and j into nodes k
The similarity values are weighted averaged, where pop[i] and pop[j]
yield the relative weights.
this is used in average_link_slow (deprecated)
"""
#
fi = float(pop[i]) / (pop[k])
fj = 1.0 - fi
#
# replace i ny k
#
idxi = np.nonzero(K.edges[:, 0] == i)
K.weights[idxi] = K.weights[idxi] * fi
K.edges[idxi, 0] = k
idxi = np.nonzero(K.edges[:, 1] == i)
K.weights[idxi] = K.weights[idxi] * fi
K.edges[idxi, 1] = k
#
# replace j by k
#
idxj = np.nonzero(K.edges[:, 0] == j)
K.weights[idxj] = K.weights[idxj] * fj
K.edges[idxj, 0] = k
idxj = np.nonzero(K.edges[:, 1] == j)
K.weights[idxj] = K.weights[idxj] * fj
K.edges[idxj, 1] = k
#
#sum/remove double edges
#
#left side
idxk = np.nonzero(K.edges[:, 0] == k)[0]
corr = K.edges[idxk, 1]
scorr = np.sort(corr)
acorr = np.argsort(corr)
for a in range(np.size(scorr) - 1):
if scorr[a] == scorr[a + 1]:
i1 = idxk[acorr[a]]
i2 = idxk[acorr[a + 1]]
K.weights[i1] = K.weights[i1] + K.weights[i2]
K.weights[i2] = - np.inf
K.edges[i2] = -1
#right side
idxk = np.nonzero(K.edges[:, 1] == k)[0]
corr = K.edges[idxk, 0]
scorr = np.sort(corr)
acorr = np.argsort(corr)
for a in range(np.size(scorr) - 1):
if scorr[a] == scorr[a + 1]:
i1 = idxk[acorr[a]]
i2 = idxk[acorr[a + 1]]
K.weights[i1] = K.weights[i1] + K.weights[i2]
K.weights[i2] = - np.inf
K.edges[i2] = - 1
def average_link_graph(G):
"""
Agglomerative function based on a (hopefully sparse) similarity graph
Parameters
----------
G the input graph
Returns
-------
t a weightForest structure that represents the dendrogram of the data
CAVEAT
------
In that case, the homogeneity is associated with high similarity
(as opposed to low cost as in most clustering procedures,
e.g. distance-based procedures). Thus the tree is created with
negated affinity values, in roder to respect the traditional
ordering of cluster potentials. individual points have the
potential (-np.inf).
This problem is handled transparently inthe associated segment functionp.
"""
warn('Function average_link_graph deprecated, will be removed',
FutureWarning,
stacklevel=2)
# prepare a graph with twice the number of vertices
n = G.V
nbcc = G.cc().max() + 1
K = WeightedGraph(2 * G.V)
K.E = G.E
K.edges = G.edges.copy()
K.weights = G.weights.copy()
parent = np.arange(2 * n - nbcc, dtype=np.int)
pop = np.ones(2 * n - nbcc, np.int)
height = np.inf * np.ones(2 * n - nbcc)
# iteratively merge clusters
for q in range(n - nbcc):
# 1. find the heaviest edge
m = (K.weights).argmax()
cost = K.weights[m]
k = q + n
height[k] = cost
i = K.edges[m, 0]
j = K.edges[m, 1]
# 2. remove the current edge
K.edges[m] = -1
K.weights[m] = - np.inf
m = np.nonzero((K.edges[:, 0] == j) * (K.edges[:, 1] == i))[0]
K.edges[m] = - 1
K.weights[m] = - np.inf
# 3. merge the edges with third part edges
parent[i] = k
parent[j] = k
pop[k] = pop[i] + pop[j]
fusion(K, pop, i, j, k)
height[height < 0] = 0
height[np.isinf(height)] = height[n] + 1
t = WeightedForest(2 * n - nbcc, parent, - height)
return t
def average_link_graph_segment(G, stop=0, qmax=1, verbose=False):
"""Agglomerative function based on a (hopefully sparse) similarity graph
Parameters
----------
G the input graph
stop: float
the stopping criterion
qmax: int, optional
the number of desired clusters (in the limit of the stopping criterion)
verbose : bool, optional
If True, print diagnostic information
Returns
-------
u: array of shape (G.V)
a labelling of the graph vertices according to the criterion
cost: array of shape (G.V (?))
the cost of each merge step during the clustering procedure
"""
warn('Function average_link_graph_segment deprecated, will be removed',
FutureWarning,
stacklevel=2)
# prepare a graph with twice the number of vertices
n = G.V
if qmax == - 1:
qmax = n
qmax = int(np.minimum(qmax, n))
t = average_link_graph(G)
if verbose:
t.plot()
u1 = np.zeros(n, np.int)
u2 = np.zeros(n, np.int)
if stop >= 0:
u1 = t.partition( - stop)
if qmax > 0:
u2 = t.split(qmax)
if u1.max() < u2.max():
u = u2
else:
u = u1
cost = - t.get_height()
cost = cost[t.isleaf() == False]
return u, cost
#--------------------------------------------------------------------------
#------------- Ward's algorithm with graph constraints --------------------
# -------------------------------------------------------------------------
def _inertia_(i, j, Features):
"""
Compute the variance of the set which is
the concatenation of Feature[i] and Features[j]
"""
if np.size(np.shape(Features[i])) < 2:
print i, np.shape(Features[i]), Features[i]
if np.size(np.shape(Features[i])) < 2:
print j, np.shape(Features[j]), Features[j]
if np.shape(Features[i])[1] != np.shape(Features[j])[1]:
print i, j, np.shape(Features[i]), np.shape(Features[j])
localset = np.vstack((Features[i], Features[j]))
return np.var(localset, 0).sum()
def _inertia(i, j, Features):
"""
Compute the variance of the set which is
the concatenation of Feature[i] and Features[j]
"""
n = Features[0][i] + Features[0][j]
s = Features[1][i] + Features[1][j]
q = Features[2][i] + Features[2][j]
return np.sum(q - (s ** 2 / n))
def _initial_inertia(K, Features, seeds=None):
""" Compute the variance associated with each
edge-related pair of vertices
Thre sult is written in K;weights
if seeds if provided (seeds!=None)
this is done only for vertices adjacent to the seeds
"""
if seeds == None:
for e in range(K.E):
i = K.edges[e, 0]
j = K.edges[e, 1]
ESS = _inertia(i, j, Features)
K.weights[e] = ESS
else:
aux = np.zeros(K.V).astype('bool')
aux[seeds] = 1
for e in range(K.E):
i = K.edges[e, 0]
j = K.edges[e, 1]
if (aux[i] or aux[j]):
K.weights[e] = _inertia(i, j, Features)
else:
K.weights[e] = np.inf
def _auxiliary_graph(G, Features):
"""
prepare a graph with twice the number of vertices
this graph will contain the connectivity information
along the merges.
"""
K = WeightedGraph(2 * G.V - 1)
K.E = G.E
K.edges = G.edges.copy()
K.weights = np.ones(K.E)
K.symmeterize()
if K.E > 0:
valid = K.edges[:, 0] < K.edges[:, 1]
K.remove_edges(valid)
#
K.remove_trivial_edges()
_initial_inertia(K, Features)
return K
def _remap(K, i, j, k, Features, linc, rinc):
"""Modifies the graph K to merge nodes i and j into nodes k
the graph weights are modified accordingly
Parameters
----------
K graph instance:
the existing graphical model
i,j,k: int
indexes of the nodes to be merged and of the parent respectively
Features: list of node-per-node features
linc: array of shape(K.V)
left incidence matrix
rinc: array of shape(K.V)
right incidencematrix
"""
# -------
# replace i by k
# --------
idxi = np.array(linc[i]).astype(np.int)
if np.size(idxi) > 1:
for l in idxi:
K.weights[l] = _inertia(k, K.edges[l, 1], Features)
elif np.size(idxi) == 1:
K.weights[idxi] = _inertia(k, K.edges[idxi, 1], Features)
if np.size(idxi) > 0:
K.edges[idxi, 0] = k
idxi = np.array(rinc[i]).astype(np.int)
if np.size(idxi) > 1:
for l in idxi:
K.weights[l] = _inertia(K.edges[l, 0], k, Features)
elif np.size(idxi) == 1:
K.weights[idxi] = _inertia(K.edges[idxi, 0], k, Features)
if np.size(idxi) > 0:
K.edges[idxi, 1] = k
#------
# replace j by k
#-------
idxj = np.array(linc[j]).astype(np.int)
if np.size(idxj) > 1:
for l in idxj:
K.weights[l] = _inertia(k, K.edges[l, 1], Features)
elif np.size(idxj) == 1:
K.weights[idxj] = _inertia(k, K.edges[idxj, 1], Features)
if np.size(idxj) > 0:
K.edges[idxj, 0] = k
idxj = np.array(rinc[j]).astype(np.int)
if np.size(idxj) > 1:
for l in idxj:
K.weights[l] = _inertia(k, K.edges[l, 0], Features)
elif np.size(idxj) == 1:
K.weights[idxj] = _inertia(k, K.edges[idxj, 0], Features)
if np.size(idxj) > 0:
K.edges[idxj, 1] = k
#------
# update linc,rinc
#------
lidxk = list(np.concatenate((linc[j], linc[i])))
for l in lidxk:
if K.edges[l, 1] == - 1:
lidxk.remove(l)
linc[k] = lidxk
linc[i] = []
linc[j] = []
ridxk = list(np.concatenate((rinc[j], rinc[i])))
for l in ridxk:
if K.edges[l, 0] == - 1:
ridxk.remove(l)
rinc[k] = ridxk
rinc[i] = []
rinc[j] = []
#------
#remove double edges
#------
#left side
idxk = np.array(linc[k]).astype(np.int)
if np.size(idxk) > 0:
corr = K.edges[idxk, 1]
scorr = np.sort(corr)
acorr = np.argsort(corr)
for a in range(np.size(scorr) - 1):
if scorr[a] == scorr[a + 1]:
i2 = idxk[acorr[a + 1]]
K.weights[i2] = np.inf
rinc[K.edges[i2, 1]].remove(i2)
K.edges[i2] = - 1
linc[k].remove(i2)
#right side
idxk = np.array(rinc[k]).astype(np.int)
if np.size(idxk) > 0:
corr = K.edges[idxk, 0]
scorr = np.sort(corr)
acorr = np.argsort(corr)
for a in range(np.size(scorr) - 1):
if scorr[a] == scorr[a + 1]:
i2 = idxk[acorr[a + 1]]
K.weights[i2] = np.inf
linc[K.edges[i2, 0]].remove(i2)
K.edges[i2] = - 1
rinc[k].remove(i2)
return linc, rinc
def ward_quick(G, feature, verbose=False):
""" Agglomerative function based on a topology-defining graph
and a feature matrix.
Parameters
----------
G : graph instance
topology-defining graph
feature: array of shape (G.V,dim_feature)
some vectorial information related to the graph vertices
verbose : bool, optional
If True, print diagnostic information
Returns
-------
t: weightForest instance,
that represents the dendrogram of the data
Notes
----
Hopefully a quicker version
A euclidean distance is used in the feature space
Caveat : only approximate
"""
warn('Function ward_quick from ' +
'nipy.algorithms.clustering.hierrachical_clustering ' +
'deprecated, will be removed',
FutureWarning,
stacklevel=2)
# basic check
if feature.ndim == 1:
feature = np.reshape(feature, (-1, 1))
if feature.shape[0] != G.V:
raise ValueError(
"Incompatible dimension for the feature matrix and the graph")
Features = [np.ones(2 * G.V), np.zeros((2 * G.V, feature.shape[1])),
np.zeros((2 * G.V, feature.shape[1]))]
Features[1][:G.V] = feature
Features[2][:G.V] = feature ** 2
n = G.V
nbcc = G.cc().max() + 1
# prepare a graph with twice the number of vertices
K = _auxiliary_graph(G, Features)
parent = np.arange(2 * n - nbcc).astype(np.int)
height = np.zeros(2 * n - nbcc)
linc = K.left_incidence()
rinc = K.right_incidence()
# iteratively merge clusters
q = 0
while (q < n - nbcc):
# 1. find the lightest edges
aux = np.zeros(2 * n)
ape = np.nonzero(K.weights < np.inf)
ape = np.reshape(ape, np.size(ape))
idx = np.argsort(K.weights[ape])
for e in range(n - nbcc - q):
i, j = K.edges[ape[idx[e]], 0], K.edges[ape[idx[e]], 1]
if (aux[i] == 1) or (aux[j] == 1):
break
aux[i] = 1
aux[j] = 1
emax = np.maximum(e, 1)
for e in range(emax):
m = ape[idx[e]]
cost = K.weights[m]
k = q + n
i = K.edges[m, 0]
j = K.edges[m, 1]
height[k] = cost
if verbose:
print q, i, j, m, cost
# 2. remove the current edge
K.edges[m] = -1
K.weights[m] = np.inf
linc[i].remove(m)
rinc[j].remove(m)
ml = linc[j]
if np.sum(K.edges[ml, 1] == i) > 0:
m = ml[np.flatnonzero(K.edges[ml, 1] == i)]
K.edges[m] = -1
K.weights[m] = np.inf
linc[j].remove(m)
rinc[i].remove(m)
# 3. merge the edges with third part edges
parent[i] = k
parent[j] = k
for p in range(3):
Features[p][k] = Features[p][i] + Features[p][j]
linc, rinc = _remap(K, i, j, k, Features, linc, rinc)
q += 1
# build a tree to encode the results
t = WeightedForest(2 * n - nbcc, parent, height)
return t
def ward_field_segment(F, stop=-1, qmax=-1, verbose=False):
"""Agglomerative function based on a field structure
Parameters
----------
F the input field (graph+feature)
stop: float, optional
the stopping crterion. if stop==-1, then no stopping criterion is used
qmax: int, optional
the maximum number of desired clusters (in the limit of the stopping
criterion)
verbose : bool, optional
If True, print diagnostic information
Returns
-------
u: array of shape (F.V)
labelling of the graph vertices according to the criterion
cost array of shape (F.V - 1)
the cost of each merge step during the clustering procedure
Notes
-----
See ward_quick_segment for more information
Caveat : only approximate
"""
u, cost = ward_quick_segment(F, F.field, stop, qmax, verbose)
return u, cost
def ward_quick_segment(G, feature, stop=-1, qmax=1, verbose=False):
"""
Agglomerative function based on a topology-defining graph
and a feature matrix.
Parameters
----------
G: labs.graph.WeightedGraph instance
the input graph (a topological graph essentially)
feature array of shape (G.V,dim_feature)
vectorial information related to the graph vertices
stop1 : int or float, optional
the stopping crterion if stop==-1, then no stopping criterion is used
qmax : int, optional
the maximum number of desired clusters (in the limit of the stopping
criterion)
verbose : bool, optional
If True, print diagnostic information
Returns
-------
u: array of shape (G.V)
labelling of the graph vertices according to the criterion
cost: array of shape (G.V - 1)
the cost of each merge step during the clustering procedure
Notes
-----
Hopefully a quicker version
A euclidean distance is used in the feature space
Caveat : only approximate
"""
# basic check
if feature.ndim == 1:
feature = np.reshape(feature, (-1, 1))
if feature.shape[0] != G.V:
raise ValueError(
"Incompatible dimension for the feature matrix and the graph")
n = G.V
if stop == - 1:
stop = np.inf
qmax = int(np.minimum(qmax, n - 1))
t = ward_quick(G, feature, verbose)
if verbose:
t.plot()
u1 = np.zeros(n, np.int)
u2 = np.zeros(n, np.int)
if stop >= 0:
u1 = t.partition(stop)
if qmax > 0:
u2 = t.split(qmax)
if u1.max() < u2.max():
u = u2
else:
u = u1
cost = t.get_height()
cost = cost[t.isleaf() == False]
return u, cost
def ward_segment(G, feature, stop=-1, qmax=1, verbose=False):
"""
Agglomerative function based on a topology-defining graph
and a feature matrix.
Parameters
----------
G : graph object
the input graph (a topological graph essentially)
feature : array of shape (G.V,dim_feature)
some vectorial information related to the graph vertices
stop : int or float, optional
the stopping crterion. if stop==-1, then no stopping criterion is used
qmax : int, optional
the maximum number of desired clusters (in the limit of the stopping
criterion)
verbose : bool, optional
If True, print diagnostic information
Returns
-------
u: array of shape (G.V):
a labelling of the graph vertices according to the criterion
cost: array of shape (G.V - 1)
the cost of each merge step during the clustering procedure
Notes
-----
A euclidean distance is used in the feature space
Caveat : when the number of cc in G (nbcc) is greter than qmax, u contains
nbcc values, not qmax !
"""
# basic check
if feature.ndim == 1:
feature = np.reshape(feature, (-1, 1))
if feature.shape[0] != G.V:
raise ValueError(
"Incompatible dimension for the feature matrix and the graph")
# prepare a graph with twice the number of vertices
n = G.V
if qmax == -1:
qmax = n - 1
if stop == -1:
stop = np.inf
qmax = int(np.minimum(qmax, n - 1))
t = ward(G, feature, verbose)
u1 = np.zeros(n, np.int)
u2 = np.zeros(n, np.int)
if stop >= 0:
u1 = t.partition(stop)
if qmax > 0:
u2 = t.split(qmax)
if u1.max() < u2.max():
u = u2
else:
u = u1
cost = t.get_height()
cost = cost[t.isleaf() == False]
return u, cost
def ward(G, feature, verbose=False):
"""
Agglomerative function based on a topology-defining graph
and a feature matrix.
Parameters
----------
G : graph
the input graph (a topological graph essentially)
feature : array of shape (G.V,dim_feature)
vectorial information related to the graph vertices
verbose : bool, optional
If True, print diagnostic information
Returns
--------
t : ``WeightedForest`` instance
structure that represents the dendrogram
Notes
-----
When G has more than 1 connected component, t is no longer a tree. This
case is handled cleanly now
"""
warn('Function ward from ' +
'nipy.algorithms.clustering.hierrachical_clustering ' +
'deprecated, will be removed',
FutureWarning,
stacklevel=2)
# basic check
if feature.ndim == 1:
feature = np.reshape(feature, (-1, 1))
if feature.shape[0] != G.V:
raise ValueError(
"Incompatible dimension for the feature matrix and the graph")
Features = [np.ones(2 * G.V), np.zeros((2 * G.V, feature.shape[1])),
np.zeros((2 * G.V, feature.shape[1]))]
Features[1][:G.V] = feature
Features[2][:G.V] = feature ** 2
# prepare a graph with twice the number of vertices
# this graph will contain the connectivity information
# along the merges.
n = G.V
nbcc = G.cc().max() + 1
K = _auxiliary_graph(G, Features)
# prepare some variables that are useful tp speed up the algorithm
parent = np.arange(2 * n - nbcc).astype(np.int)
height = np.zeros(2 * n - nbcc)
linc = K.left_incidence()
rinc = K.right_incidence()
# iteratively merge clusters
for q in range(n - nbcc):
# 1. find the lightest edge
m = (K.weights).argmin()
cost = K.weights[m]
k = q + n
i = K.edges[m, 0]
j = K.edges[m, 1]
height[k] = cost
if verbose:
print q, i, j, m, cost
# 2. remove the current edge
K.edges[m] = - 1
K.weights[m] = np.inf
linc[i].remove(m)
rinc[j].remove(m)
ml = linc[j]
if np.sum(K.edges[ml, 1] == i) > 0:
m = ml[np.flatnonzero(K.edges[ml, 1] == i)]
K.edges[m] = -1
K.weights[m] = np.inf
linc[j].remove(m)
rinc[i].remove(m)
# 3. merge the edges with third part edges
parent[i] = k
parent[j] = k
for p in range(3):
Features[p][k] = Features[p][i] + Features[p][j]
linc, rinc = _remap(K, i, j, k, Features, linc, rinc)
# build a tree to encode the results
t = WeightedForest(2 * n - nbcc, parent, height)
return t
#--------------------------------------------------------------------------
#----------------------- Visualization ------------------------------------
# -------------------------------------------------------------------------
def _label_(f, parent, left, labelled):
temp = np.nonzero(parent == f)
if np.size(temp) > 0:
i = temp[0][np.nonzero(left[temp[0]] == 1)]
j = temp[0][np.nonzero(left[temp[0]] == 0)]
labelled = _label_(i, parent, left, labelled)
labelled[f] = labelled.max() + 1
labelled = _label_(j, parent, left, labelled)
if labelled[f] < 0:
labelled[f] = labelled.max() + 1
return labelled
def _label(parent):
# find the root
root = np.nonzero(parent == np.arange(np.size(parent)))[0]
# define left
left = np.zeros(np.size(parent))
for f in range(np.size(parent)):
temp = np.nonzero(parent == f)
if np.size(temp) > 0:
left[temp[0][0]] = 1
left[root] = .5
# define labelled
labelled = - np.ones(np.size(parent))
# compute labelled
for j in range(np.size(root)):
labelled = _label_(root[j], parent, left, labelled)
return labelled
| |
__author__ = 'Georgios Rizos (georgerizos@iti.gr)'
from reveal_popularity_prediction.features import comment_tree
from reveal_popularity_prediction.features import user_graph
from reveal_popularity_prediction.features import temporal
from reveal_popularity_prediction.features import author
def wrapper_comment_count(graph_snapshot_input):
comment_count = comment_tree.calculate_comment_count(graph_snapshot_input["comment_tree"])
return comment_count
def wrapper_max_depth(graph_snapshot_input):
basic_max_depth = comment_tree.calculate_max_depth(graph_snapshot_input["comment_tree"])
return basic_max_depth
def wrapper_avg_depth(graph_snapshot_input):
avg_depth = comment_tree.calculate_avg_depth(graph_snapshot_input["comment_tree"])
return avg_depth
def wrapper_max_width(graph_snapshot_input):
max_width = comment_tree.calculate_max_width(graph_snapshot_input["comment_tree"])
return max_width
def wrapper_avg_width(graph_snapshot_input):
avg_width = comment_tree.calculate_avg_width(graph_snapshot_input["comment_tree"])
return avg_width
def wrapper_max_depth_over_max_width(graph_snapshot_input):
max_depth_over_max_width = comment_tree.calculate_max_depth_over_max_width(graph_snapshot_input["comment_tree"])
return max_depth_over_max_width
def wrapper_avg_depth_over_width(graph_snapshot_input):
avg_depth_over_width = comment_tree.calculate_avg_depth_over_width(graph_snapshot_input["comment_tree"])
return avg_depth_over_width
def wrapper_comment_tree_hirsch(graph_snapshot_input):
comment_tree_hirsch = comment_tree.calculate_comment_tree_hirsch(graph_snapshot_input["comment_tree"])
return comment_tree_hirsch
def wrapper_comment_tree_wiener(graph_snapshot_input):
comment_tree_wiener = comment_tree.calculate_comment_tree_wiener(graph_snapshot_input["comment_tree"])
return comment_tree_wiener
def wrapper_comment_tree_randic(graph_snapshot_input):
comment_tree_randic = comment_tree.calculate_comment_tree_randic(graph_snapshot_input["comment_tree"])
return comment_tree_randic
def wrapper_user_count(graph_snapshot_input):
user_count = user_graph.calculate_user_count(graph_snapshot_input["user_graph"])
return user_count
def wrapper_user_graph_hirsch(graph_snapshot_input):
user_graph_hirsch = user_graph.calculate_user_graph_hirsch(graph_snapshot_input["user_graph"])
return user_graph_hirsch
def wrapper_user_graph_randic(graph_snapshot_input):
user_graph_randic = user_graph.calculate_user_graph_randic(graph_snapshot_input["user_graph"])
return user_graph_randic
def wrapper_norm_outdegree_entropy(graph_snapshot_input):
norm_outdegree_entropy = user_graph.calculate_norm_outdegree_entropy(graph_snapshot_input["user_graph"])
return norm_outdegree_entropy
def wrapper_outdegree_entropy(graph_snapshot_input):
outdegree_entropy = user_graph.calculate_outdegree_entropy(graph_snapshot_input["user_graph"])
return outdegree_entropy
def wrapper_indegree_entropy(graph_snapshot_input):
indegree_entropy = user_graph.calculate_indegree_entropy(graph_snapshot_input["user_graph"])
return indegree_entropy
def wrapper_norm_indegree_entropy(graph_snapshot_input):
norm_indegree_entropy = user_graph.calculate_norm_indegree_entropy(graph_snapshot_input["user_graph"])
return norm_indegree_entropy
def wrapper_avg_time_differences_1st_half(graph_snapshot_input):
avg_time_differences_1st_half = temporal.calculate_avg_time_differences_1st_half(graph_snapshot_input["timestamp_list"])
return avg_time_differences_1st_half
def wrapper_avg_time_differences_2nd_half(graph_snapshot_input):
avg_time_differences_2nd_half = temporal.calculate_avg_time_differences_2nd_half(graph_snapshot_input["timestamp_list"])
return avg_time_differences_2nd_half
def wrapper_time_differences_std(graph_snapshot_input):
time_differences_std = temporal.calculate_time_differences_std(graph_snapshot_input["timestamp_list"])
return time_differences_std
def wrapper_last_comment_lifetime(graph_snapshot_input):
last_comment_lifetime = temporal.calculate_last_comment_lifetime(graph_snapshot_input["timestamp_list"],
graph_snapshot_input["tweet_timestamp"])
return last_comment_lifetime
def wrapper_author_privacy_status_youtube(social_context_input):
author_privacy_status_youtube = author.calculate_author_privacy_status_youtube(social_context_input["author"])
return author_privacy_status_youtube
def wrapper_author_is_linked_youtube(social_context_input):
author_is_linked_youtube = author.calculate_author_is_linked_youtube(social_context_input["author"])
return author_is_linked_youtube
def wrapper_author_long_uploads_status_youtube(social_context_input):
author_long_uploads_status_youtube = author.calculate_author_long_uploads_status_youtube(social_context_input["author"])
return author_long_uploads_status_youtube
def wrapper_author_comment_count_youtube(social_context_input):
author_comment_count_youtube = author.calculate_author_comment_count_youtube(social_context_input["author"])
return author_comment_count_youtube
def wrapper_author_comment_rate_youtube(social_context_input):
author_comment_rate_youtube = author.calculate_author_comment_rate_youtube(social_context_input["author"], social_context_input["initial_post"])
return author_comment_rate_youtube
def wrapper_author_view_count_youtube(social_context_input):
author_view_count_youtube = author.calculate_author_view_count_youtube(social_context_input["author"], social_context_input["initial_post"])
return author_view_count_youtube
def wrapper_author_view_rate_youtube(social_context_input):
author_view_rate_youtube = author.calculate_author_view_rate_youtube(social_context_input["author"], social_context_input["initial_post"])
return author_view_rate_youtube
def wrapper_author_video_upload_count_youtube(social_context_input):
author_video_upload_count_youtube = author.calculate_author_video_upload_count_youtube(social_context_input["author"])
return author_video_upload_count_youtube
def wrapper_author_video_upload_rate_youtube(social_context_input):
author_video_upload_rate_youtube = author.calculate_author_video_upload_rate_youtube(social_context_input["author"], social_context_input["initial_post"])
return author_video_upload_rate_youtube
def wrapper_author_subscriber_count_youtube(social_context_input):
author_subscriber_count_youtube = author.calculate_author_subscriber_count_youtube(social_context_input["author"])
return author_subscriber_count_youtube
def wrapper_author_subscriber_rate_youtube(social_context_input):
author_subscriber_rate_youtube = author.calculate_author_subscriber_rate_youtube(social_context_input["author"], social_context_input["initial_post"])
return author_subscriber_rate_youtube
def wrapper_author_hidden_subscriber_count_youtube(social_context_input):
author_hidden_subscriber_count_youtube = author.calculate_author_hidden_subscriber_count_youtube(social_context_input["author"])
return author_hidden_subscriber_count_youtube
def wrapper_author_channel_lifetime_youtube(social_context_input):
author_channel_lifetime_youtube = author.calculate_author_channel_lifetime_youtube(social_context_input["author"], social_context_input["initial_post"])
return author_channel_lifetime_youtube
def wrapper_author_has_verified_mail_reddit(social_context_input):
author_has_verified_mail_reddit = author.calculate_author_has_verified_mail_reddit(social_context_input["author"])
return author_has_verified_mail_reddit
def wrapper_author_account_lifetime_reddit(social_context_input):
author_account_lifetime_reddit = author.calculate_author_account_lifetime_reddit(social_context_input["author"], social_context_input["initial_post"])
return author_account_lifetime_reddit
def wrapper_author_hide_from_robots_reddit(social_context_input):
author_hide_from_robots_reddit = author.calculate_author_hide_from_robots_reddit(social_context_input["author"])
return author_hide_from_robots_reddit
def wrapper_author_is_mod_reddit(social_context_input):
author_is_mod_reddit = author.calculate_author_is_mod_reddit(social_context_input["author"])
return author_is_mod_reddit
def wrapper_author_link_karma_reddit(social_context_input):
author_link_karma_reddit = author.calculate_author_link_karma_reddit(social_context_input["author"])
return author_link_karma_reddit
def wrapper_author_link_karma_rate_reddit(social_context_input):
author_link_karma_rate_reddit = author.calculate_author_link_karma_rate_reddit(social_context_input["author"], social_context_input["initial_post"])
return author_link_karma_rate_reddit
def wrapper_author_comment_karma_reddit(social_context_input):
author_comment_karma_reddit = author.calculate_author_comment_karma_reddit(social_context_input["author"])
return author_comment_karma_reddit
def wrapper_author_comment_karma_rate_reddit(social_context_input):
author_comment_karma_rate_reddit = author.calculate_author_comment_karma_rate_reddit(social_context_input["author"], social_context_input["initial_post"])
return author_comment_karma_rate_reddit
def wrapper_author_is_gold_reddit(social_context_input):
author_is_gold_reddit = author.calculate_author_is_gold_reddit(social_context_input["author"])
return author_is_gold_reddit
| |
"""SCons.Tool.tex
Tool-specific initialization for TeX.
Generates .dvi files from .tex files
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/tex.py 2009/09/04 16:33:07 david"
import os.path
import re
import string
import shutil
import SCons.Action
import SCons.Node
import SCons.Node.FS
import SCons.Util
import SCons.Scanner.LaTeX
Verbose = False
must_rerun_latex = True
# these are files that just need to be checked for changes and then rerun latex
check_suffixes = ['.toc', '.lof', '.lot', '.out', '.nav', '.snm']
# these are files that require bibtex or makeindex to be run when they change
all_suffixes = check_suffixes + ['.bbl', '.idx', '.nlo', '.glo', '.acn']
#
# regular expressions used to search for Latex features
# or outputs that require rerunning latex
#
# search for all .aux files opened by latex (recorded in the .fls file)
openout_aux_re = re.compile(r"INPUT *(.*\.aux)")
#printindex_re = re.compile(r"^[^%]*\\printindex", re.MULTILINE)
#printnomenclature_re = re.compile(r"^[^%]*\\printnomenclature", re.MULTILINE)
#printglossary_re = re.compile(r"^[^%]*\\printglossary", re.MULTILINE)
# search to find rerun warnings
warning_rerun_str = '(^LaTeX Warning:.*Rerun)|(^Package \w+ Warning:.*Rerun)'
warning_rerun_re = re.compile(warning_rerun_str, re.MULTILINE)
# search to find citation rerun warnings
rerun_citations_str = "^LaTeX Warning:.*\n.*Rerun to get citations correct"
rerun_citations_re = re.compile(rerun_citations_str, re.MULTILINE)
# search to find undefined references or citations warnings
undefined_references_str = '(^LaTeX Warning:.*undefined references)|(^Package \w+ Warning:.*undefined citations)'
undefined_references_re = re.compile(undefined_references_str, re.MULTILINE)
# used by the emitter
auxfile_re = re.compile(r".", re.MULTILINE)
tableofcontents_re = re.compile(r"^[^%\n]*\\tableofcontents", re.MULTILINE)
makeindex_re = re.compile(r"^[^%\n]*\\makeindex", re.MULTILINE)
bibliography_re = re.compile(r"^[^%\n]*\\bibliography", re.MULTILINE)
listoffigures_re = re.compile(r"^[^%\n]*\\listoffigures", re.MULTILINE)
listoftables_re = re.compile(r"^[^%\n]*\\listoftables", re.MULTILINE)
hyperref_re = re.compile(r"^[^%\n]*\\usepackage.*\{hyperref\}", re.MULTILINE)
makenomenclature_re = re.compile(r"^[^%\n]*\\makenomenclature", re.MULTILINE)
makeglossary_re = re.compile(r"^[^%\n]*\\makeglossary", re.MULTILINE)
makeglossaries_re = re.compile(r"^[^%\n]*\\makeglossaries", re.MULTILINE)
makeacronyms_re = re.compile(r"^[^%\n]*\\makeglossaries", re.MULTILINE)
beamer_re = re.compile(r"^[^%\n]*\\documentclass\{beamer\}", re.MULTILINE)
# search to find all files included by Latex
include_re = re.compile(r'^[^%\n]*\\(?:include|input){([^}]*)}', re.MULTILINE)
# search to find all graphics files included by Latex
includegraphics_re = re.compile(r'^[^%\n]*\\(?:includegraphics(?:\[[^\]]+\])?){([^}]*)}', re.MULTILINE)
# search to find all files opened by Latex (recorded in .log file)
openout_re = re.compile(r"OUTPUT *(.*)")
# list of graphics file extensions for TeX and LaTeX
TexGraphics = SCons.Scanner.LaTeX.TexGraphics
LatexGraphics = SCons.Scanner.LaTeX.LatexGraphics
# An Action sufficient to build any generic tex file.
TeXAction = None
# An action to build a latex file. This action might be needed more
# than once if we are dealing with labels and bibtex.
LaTeXAction = None
# An action to run BibTeX on a file.
BibTeXAction = None
# An action to run MakeIndex on a file.
MakeIndexAction = None
# An action to run MakeIndex (for nomencl) on a file.
MakeNclAction = None
# An action to run MakeIndex (for glossary) on a file.
MakeGlossaryAction = None
# An action to run MakeIndex (for acronyms) on a file.
MakeAcronymsAction = None
# Used as a return value of modify_env_var if the variable is not set.
_null = SCons.Scanner.LaTeX._null
modify_env_var = SCons.Scanner.LaTeX.modify_env_var
def FindFile(name,suffixes,paths,env,requireExt=False):
if requireExt:
name,ext = SCons.Util.splitext(name)
# if the user gave an extension use it.
if ext:
name = name + ext
if Verbose:
print " searching for '%s' with extensions: " % name,suffixes
for path in paths:
testName = os.path.join(path,name)
if Verbose:
print " look for '%s'" % testName
if os.path.exists(testName):
if Verbose:
print " found '%s'" % testName
return env.fs.File(testName)
else:
name_ext = SCons.Util.splitext(testName)[1]
if name_ext:
continue
# if no suffix try adding those passed in
for suffix in suffixes:
testNameExt = testName + suffix
if Verbose:
print " look for '%s'" % testNameExt
if os.path.exists(testNameExt):
if Verbose:
print " found '%s'" % testNameExt
return env.fs.File(testNameExt)
if Verbose:
print " did not find '%s'" % name
return None
def InternalLaTeXAuxAction(XXXLaTeXAction, target = None, source= None, env=None):
"""A builder for LaTeX files that checks the output in the aux file
and decides how many times to use LaTeXAction, and BibTeXAction."""
global must_rerun_latex
# This routine is called with two actions. In this file for DVI builds
# with LaTeXAction and from the pdflatex.py with PDFLaTeXAction
# set this up now for the case where the user requests a different extension
# for the target filename
if (XXXLaTeXAction == LaTeXAction):
callerSuffix = ".dvi"
else:
callerSuffix = env['PDFSUFFIX']
basename = SCons.Util.splitext(str(source[0]))[0]
basedir = os.path.split(str(source[0]))[0]
basefile = os.path.split(str(basename))[1]
abspath = os.path.abspath(basedir)
targetext = os.path.splitext(str(target[0]))[1]
targetdir = os.path.split(str(target[0]))[0]
saved_env = {}
for var in SCons.Scanner.LaTeX.LaTeX.env_variables:
saved_env[var] = modify_env_var(env, var, abspath)
# Create base file names with the target directory since the auxiliary files
# will be made there. That's because the *COM variables have the cd
# command in the prolog. We check
# for the existence of files before opening them--even ones like the
# aux file that TeX always creates--to make it possible to write tests
# with stubs that don't necessarily generate all of the same files.
targetbase = os.path.join(targetdir, basefile)
# if there is a \makeindex there will be a .idx and thus
# we have to run makeindex at least once to keep the build
# happy even if there is no index.
# Same for glossaries and nomenclature
src_content = source[0].get_text_contents()
run_makeindex = makeindex_re.search(src_content) and not os.path.exists(targetbase + '.idx')
run_nomenclature = makenomenclature_re.search(src_content) and not os.path.exists(targetbase + '.nlo')
run_glossary = makeglossary_re.search(src_content) and not os.path.exists(targetbase + '.glo')
run_glossaries = makeglossaries_re.search(src_content) and not os.path.exists(targetbase + '.glo')
run_acronyms = makeacronyms_re.search(src_content) and not os.path.exists(targetbase + '.acn')
saved_hashes = {}
suffix_nodes = {}
for suffix in all_suffixes:
theNode = env.fs.File(targetbase + suffix)
suffix_nodes[suffix] = theNode
saved_hashes[suffix] = theNode.get_csig()
if Verbose:
print "hashes: ",saved_hashes
must_rerun_latex = True
#
# routine to update MD5 hash and compare
#
# TODO(1.5): nested scopes
def check_MD5(filenode, suffix, saved_hashes=saved_hashes, targetbase=targetbase):
global must_rerun_latex
# two calls to clear old csig
filenode.clear_memoized_values()
filenode.ninfo = filenode.new_ninfo()
new_md5 = filenode.get_csig()
if saved_hashes[suffix] == new_md5:
if Verbose:
print "file %s not changed" % (targetbase+suffix)
return False # unchanged
saved_hashes[suffix] = new_md5
must_rerun_latex = True
if Verbose:
print "file %s changed, rerunning Latex, new hash = " % (targetbase+suffix), new_md5
return True # changed
# generate the file name that latex will generate
resultfilename = targetbase + callerSuffix
count = 0
while (must_rerun_latex and count < int(env.subst('$LATEXRETRIES'))) :
result = XXXLaTeXAction(target, source, env)
if result != 0:
return result
count = count + 1
must_rerun_latex = False
# Decide if various things need to be run, or run again.
# Read the log file to find warnings/errors
logfilename = targetbase + '.log'
logContent = ''
if os.path.exists(logfilename):
logContent = open(logfilename, "rb").read()
# Read the fls file to find all .aux files
flsfilename = targetbase + '.fls'
flsContent = ''
auxfiles = []
if os.path.exists(flsfilename):
flsContent = open(flsfilename, "rb").read()
auxfiles = openout_aux_re.findall(flsContent)
if Verbose:
print "auxfiles ",auxfiles
# Now decide if bibtex will need to be run.
# The information that bibtex reads from the .aux file is
# pass-independent. If we find (below) that the .bbl file is unchanged,
# then the last latex saw a correct bibliography.
# Therefore only do this on the first pass
if count == 1:
for auxfilename in auxfiles:
target_aux = os.path.join(targetdir, auxfilename)
if os.path.exists(target_aux):
content = open(target_aux, "rb").read()
if string.find(content, "bibdata") != -1:
if Verbose:
print "Need to run bibtex"
bibfile = env.fs.File(targetbase)
result = BibTeXAction(bibfile, bibfile, env)
if result != 0:
print env['BIBTEX']," returned an error, check the blg file"
return result
must_rerun_latex = check_MD5(suffix_nodes['.bbl'],'.bbl')
break
# Now decide if latex will need to be run again due to index.
if check_MD5(suffix_nodes['.idx'],'.idx') or (count == 1 and run_makeindex):
# We must run makeindex
if Verbose:
print "Need to run makeindex"
idxfile = suffix_nodes['.idx']
result = MakeIndexAction(idxfile, idxfile, env)
if result != 0:
print env['MAKEINDEX']," returned an error, check the ilg file"
return result
# TO-DO: need to add a way for the user to extend this list for whatever
# auxiliary files they create in other (or their own) packages
# Harder is case is where an action needs to be called -- that should be rare (I hope?)
for index in check_suffixes:
check_MD5(suffix_nodes[index],index)
# Now decide if latex will need to be run again due to nomenclature.
if check_MD5(suffix_nodes['.nlo'],'.nlo') or (count == 1 and run_nomenclature):
# We must run makeindex
if Verbose:
print "Need to run makeindex for nomenclature"
nclfile = suffix_nodes['.nlo']
result = MakeNclAction(nclfile, nclfile, env)
if result != 0:
print env['MAKENCL']," (nomenclature) returned an error, check the nlg file"
return result
# Now decide if latex will need to be run again due to glossary.
if check_MD5(suffix_nodes['.glo'],'.glo') or (count == 1 and run_glossaries) or (count == 1 and run_glossary):
# We must run makeindex
if Verbose:
print "Need to run makeindex for glossary"
glofile = suffix_nodes['.glo']
result = MakeGlossaryAction(glofile, glofile, env)
if result != 0:
print env['MAKEGLOSSARY']," (glossary) returned an error, check the glg file"
return result
# Now decide if latex will need to be run again due to acronyms.
if check_MD5(suffix_nodes['.acn'],'.acn') or (count == 1 and run_acronyms):
# We must run makeindex
if Verbose:
print "Need to run makeindex for acronyms"
acrfile = suffix_nodes['.acn']
result = MakeAcronymsAction(acrfile, acrfile, env)
if result != 0:
print env['MAKEACRONYMS']," (acronymns) returned an error, check the alg file"
return result
# Now decide if latex needs to be run yet again to resolve warnings.
if warning_rerun_re.search(logContent):
must_rerun_latex = True
if Verbose:
print "rerun Latex due to latex or package rerun warning"
if rerun_citations_re.search(logContent):
must_rerun_latex = True
if Verbose:
print "rerun Latex due to 'Rerun to get citations correct' warning"
if undefined_references_re.search(logContent):
must_rerun_latex = True
if Verbose:
print "rerun Latex due to undefined references or citations"
if (count >= int(env.subst('$LATEXRETRIES')) and must_rerun_latex):
print "reached max number of retries on Latex ,",int(env.subst('$LATEXRETRIES'))
# end of while loop
# rename Latex's output to what the target name is
if not (str(target[0]) == resultfilename and os.path.exists(resultfilename)):
if os.path.exists(resultfilename):
print "move %s to %s" % (resultfilename, str(target[0]), )
shutil.move(resultfilename,str(target[0]))
# Original comment (when TEXPICTS was not restored):
# The TEXPICTS enviroment variable is needed by a dvi -> pdf step
# later on Mac OSX so leave it
#
# It is also used when searching for pictures (implicit dependencies).
# Why not set the variable again in the respective builder instead
# of leaving local modifications in the environment? What if multiple
# latex builds in different directories need different TEXPICTS?
for var in SCons.Scanner.LaTeX.LaTeX.env_variables:
if var == 'TEXPICTS':
continue
if saved_env[var] is _null:
try:
del env['ENV'][var]
except KeyError:
pass # was never set
else:
env['ENV'][var] = saved_env[var]
return result
def LaTeXAuxAction(target = None, source= None, env=None):
result = InternalLaTeXAuxAction( LaTeXAction, target, source, env )
return result
LaTeX_re = re.compile("\\\\document(style|class)")
def is_LaTeX(flist):
# Scan a file list to decide if it's TeX- or LaTeX-flavored.
for f in flist:
content = f.get_text_contents()
if LaTeX_re.search(content):
return 1
return 0
def TeXLaTeXFunction(target = None, source= None, env=None):
"""A builder for TeX and LaTeX that scans the source file to
decide the "flavor" of the source and then executes the appropriate
program."""
if is_LaTeX(source):
result = LaTeXAuxAction(target,source,env)
if result != 0:
print env['LATEX']," returned an error, check the log file"
else:
result = TeXAction(target,source,env)
if result != 0:
print env['TEX']," returned an error, check the log file"
return result
def TeXLaTeXStrFunction(target = None, source= None, env=None):
"""A strfunction for TeX and LaTeX that scans the source file to
decide the "flavor" of the source and then returns the appropriate
command string."""
if env.GetOption("no_exec"):
if is_LaTeX(source):
result = env.subst('$LATEXCOM',0,target,source)+" ..."
else:
result = env.subst("$TEXCOM",0,target,source)+" ..."
else:
result = ''
return result
def tex_eps_emitter(target, source, env):
"""An emitter for TeX and LaTeX sources when
executing tex or latex. It will accept .ps and .eps
graphics files
"""
(target, source) = tex_emitter_core(target, source, env, TexGraphics)
return (target, source)
def tex_pdf_emitter(target, source, env):
"""An emitter for TeX and LaTeX sources when
executing pdftex or pdflatex. It will accept graphics
files of types .pdf, .jpg, .png, .gif, and .tif
"""
(target, source) = tex_emitter_core(target, source, env, LatexGraphics)
return (target, source)
def ScanFiles(theFile, target, paths, file_tests, file_tests_search, env, graphics_extensions, targetdir):
# for theFile (a Node) update any file_tests and search for graphics files
# then find all included files and call ScanFiles for each of them
content = theFile.get_text_contents()
if Verbose:
print " scanning ",str(theFile)
for i in range(len(file_tests_search)):
if file_tests[i][0] is None:
file_tests[i][0] = file_tests_search[i].search(content)
# recursively call this on each of the included files
inc_files = [ ]
inc_files.extend( include_re.findall(content) )
if Verbose:
print "files included by '%s': "%str(theFile),inc_files
# inc_files is list of file names as given. need to find them
# using TEXINPUTS paths.
for src in inc_files:
srcNode = srcNode = FindFile(src,['.tex','.ltx','.latex'],paths,env,requireExt=False)
if srcNode is not None:
file_test = ScanFiles(srcNode, target, paths, file_tests, file_tests_search, env, graphics_extensions, targetdir)
if Verbose:
print " done scanning ",str(theFile)
return file_tests
def tex_emitter_core(target, source, env, graphics_extensions):
"""An emitter for TeX and LaTeX sources.
For LaTeX sources we try and find the common created files that
are needed on subsequent runs of latex to finish tables of contents,
bibliographies, indices, lists of figures, and hyperlink references.
"""
basename = SCons.Util.splitext(str(source[0]))[0]
basefile = os.path.split(str(basename))[1]
targetdir = os.path.split(str(target[0]))[0]
targetbase = os.path.join(targetdir, basefile)
basedir = os.path.split(str(source[0]))[0]
abspath = os.path.abspath(basedir)
target[0].attributes.path = abspath
#
# file names we will make use of in searching the sources and log file
#
emit_suffixes = ['.aux', '.log', '.ilg', '.blg', '.nls', '.nlg', '.gls', '.glg', '.alg'] + all_suffixes
auxfilename = targetbase + '.aux'
logfilename = targetbase + '.log'
flsfilename = targetbase + '.fls'
env.SideEffect(auxfilename,target[0])
env.SideEffect(logfilename,target[0])
env.SideEffect(flsfilename,target[0])
if Verbose:
print "side effect :",auxfilename,logfilename,flsfilename
env.Clean(target[0],auxfilename)
env.Clean(target[0],logfilename)
env.Clean(target[0],flsfilename)
content = source[0].get_text_contents()
idx_exists = os.path.exists(targetbase + '.idx')
nlo_exists = os.path.exists(targetbase + '.nlo')
glo_exists = os.path.exists(targetbase + '.glo')
acr_exists = os.path.exists(targetbase + '.acn')
# set up list with the regular expressions
# we use to find features used
file_tests_search = [auxfile_re,
makeindex_re,
bibliography_re,
tableofcontents_re,
listoffigures_re,
listoftables_re,
hyperref_re,
makenomenclature_re,
makeglossary_re,
makeglossaries_re,
makeacronyms_re,
beamer_re ]
# set up list with the file suffixes that need emitting
# when a feature is found
file_tests_suff = [['.aux'],
['.idx', '.ind', '.ilg'],
['.bbl', '.blg'],
['.toc'],
['.lof'],
['.lot'],
['.out'],
['.nlo', '.nls', '.nlg'],
['.glo', '.gls', '.glg'],
['.glo', '.gls', '.glg'],
['.acn', '.acr', '.alg'],
['.nav', '.snm', '.out', '.toc'] ]
# build the list of lists
file_tests = []
for i in range(len(file_tests_search)):
file_tests.append( [None, file_tests_suff[i]] )
# TO-DO: need to add a way for the user to extend this list for whatever
# auxiliary files they create in other (or their own) packages
# get path list from both env['TEXINPUTS'] and env['ENV']['TEXINPUTS']
savedpath = modify_env_var(env, 'TEXINPUTS', abspath)
paths = env['ENV']['TEXINPUTS']
if SCons.Util.is_List(paths):
pass
else:
# Split at os.pathsep to convert into absolute path
# TODO(1.5)
#paths = paths.split(os.pathsep)
paths = string.split(paths, os.pathsep)
# now that we have the path list restore the env
if savedpath is _null:
try:
del env['ENV']['TEXINPUTS']
except KeyError:
pass # was never set
else:
env['ENV']['TEXINPUTS'] = savedpath
if Verbose:
print "search path ",paths
file_tests = ScanFiles(source[0], target, paths, file_tests, file_tests_search, env, graphics_extensions, targetdir)
for (theSearch,suffix_list) in file_tests:
if theSearch:
for suffix in suffix_list:
env.SideEffect(targetbase + suffix,target[0])
if Verbose:
print "side effect :",targetbase + suffix
env.Clean(target[0],targetbase + suffix)
# read fls file to get all other files that latex creates and will read on the next pass
# remove files from list that we explicitly dealt with above
if os.path.exists(flsfilename):
content = open(flsfilename, "rb").read()
out_files = openout_re.findall(content)
myfiles = [auxfilename, logfilename, flsfilename, targetbase+'.dvi',targetbase+'.pdf']
for filename in out_files[:]:
if filename in myfiles:
out_files.remove(filename)
env.SideEffect(out_files,target[0])
if Verbose:
print "side effect :",out_files
env.Clean(target[0],out_files)
return (target, source)
TeXLaTeXAction = None
def generate(env):
"""Add Builders and construction variables for TeX to an Environment."""
global TeXLaTeXAction
if TeXLaTeXAction is None:
TeXLaTeXAction = SCons.Action.Action(TeXLaTeXFunction,
strfunction=TeXLaTeXStrFunction)
env.AppendUnique(LATEXSUFFIXES=SCons.Tool.LaTeXSuffixes)
generate_common(env)
import dvi
dvi.generate(env)
bld = env['BUILDERS']['DVI']
bld.add_action('.tex', TeXLaTeXAction)
bld.add_emitter('.tex', tex_eps_emitter)
def generate_common(env):
"""Add internal Builders and construction variables for LaTeX to an Environment."""
# A generic tex file Action, sufficient for all tex files.
global TeXAction
if TeXAction is None:
TeXAction = SCons.Action.Action("$TEXCOM", "$TEXCOMSTR")
# An Action to build a latex file. This might be needed more
# than once if we are dealing with labels and bibtex.
global LaTeXAction
if LaTeXAction is None:
LaTeXAction = SCons.Action.Action("$LATEXCOM", "$LATEXCOMSTR")
# Define an action to run BibTeX on a file.
global BibTeXAction
if BibTeXAction is None:
BibTeXAction = SCons.Action.Action("$BIBTEXCOM", "$BIBTEXCOMSTR")
# Define an action to run MakeIndex on a file.
global MakeIndexAction
if MakeIndexAction is None:
MakeIndexAction = SCons.Action.Action("$MAKEINDEXCOM", "$MAKEINDEXCOMSTR")
# Define an action to run MakeIndex on a file for nomenclatures.
global MakeNclAction
if MakeNclAction is None:
MakeNclAction = SCons.Action.Action("$MAKENCLCOM", "$MAKENCLCOMSTR")
# Define an action to run MakeIndex on a file for glossaries.
global MakeGlossaryAction
if MakeGlossaryAction is None:
MakeGlossaryAction = SCons.Action.Action("$MAKEGLOSSARYCOM", "$MAKEGLOSSARYCOMSTR")
# Define an action to run MakeIndex on a file for acronyms.
global MakeAcronymsAction
if MakeAcronymsAction is None:
MakeAcronymsAction = SCons.Action.Action("$MAKEACRONYMSCOM", "$MAKEACRONYMSCOMSTR")
env['TEX'] = 'tex'
env['TEXFLAGS'] = SCons.Util.CLVar('-interaction=nonstopmode -recorder')
env['TEXCOM'] = 'cd ${TARGET.dir} && $TEX $TEXFLAGS ${SOURCE.file}'
env['PDFTEX'] = 'pdftex'
env['PDFTEXFLAGS'] = SCons.Util.CLVar('-interaction=nonstopmode -recorder')
env['PDFTEXCOM'] = 'cd ${TARGET.dir} && $PDFTEX $PDFTEXFLAGS ${SOURCE.file}'
env['LATEX'] = 'latex'
env['LATEXFLAGS'] = SCons.Util.CLVar('-interaction=nonstopmode -recorder')
env['LATEXCOM'] = 'cd ${TARGET.dir} && $LATEX $LATEXFLAGS ${SOURCE.file}'
env['LATEXRETRIES'] = 3
env['PDFLATEX'] = 'pdflatex'
env['PDFLATEXFLAGS'] = SCons.Util.CLVar('-interaction=nonstopmode -recorder')
env['PDFLATEXCOM'] = 'cd ${TARGET.dir} && $PDFLATEX $PDFLATEXFLAGS ${SOURCE.file}'
env['BIBTEX'] = 'bibtex'
env['BIBTEXFLAGS'] = SCons.Util.CLVar('')
env['BIBTEXCOM'] = 'cd ${TARGET.dir} && $BIBTEX $BIBTEXFLAGS ${SOURCE.filebase}'
env['MAKEINDEX'] = 'makeindex'
env['MAKEINDEXFLAGS'] = SCons.Util.CLVar('')
env['MAKEINDEXCOM'] = 'cd ${TARGET.dir} && $MAKEINDEX $MAKEINDEXFLAGS ${SOURCE.file}'
env['MAKEGLOSSARY'] = 'makeindex'
env['MAKEGLOSSARYSTYLE'] = '${SOURCE.filebase}.ist'
env['MAKEGLOSSARYFLAGS'] = SCons.Util.CLVar('-s ${MAKEGLOSSARYSTYLE} -t ${SOURCE.filebase}.glg')
env['MAKEGLOSSARYCOM'] = 'cd ${TARGET.dir} && $MAKEGLOSSARY ${SOURCE.filebase}.glo $MAKEGLOSSARYFLAGS -o ${SOURCE.filebase}.gls'
env['MAKEACRONYMS'] = 'makeindex'
env['MAKEACRONYMSSTYLE'] = '${SOURCE.filebase}.ist'
env['MAKEACRONYMSFLAGS'] = SCons.Util.CLVar('-s ${MAKEACRONYMSSTYLE} -t ${SOURCE.filebase}.alg')
env['MAKEACRONYMSCOM'] = 'cd ${TARGET.dir} && $MAKEACRONYMS ${SOURCE.filebase}.acn $MAKEACRONYMSFLAGS -o ${SOURCE.filebase}.acr'
env['MAKENCL'] = 'makeindex'
env['MAKENCLSTYLE'] = 'nomencl.ist'
env['MAKENCLFLAGS'] = '-s ${MAKENCLSTYLE} -t ${SOURCE.filebase}.nlg'
env['MAKENCLCOM'] = 'cd ${TARGET.dir} && $MAKENCL ${SOURCE.filebase}.nlo $MAKENCLFLAGS -o ${SOURCE.filebase}.nls'
def exists(env):
return env.Detect('tex')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| |
# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
#
# Author: Sylvain Afchain <sylvain.afchain@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from neutron.agent.common import config
from neutron.agent.linux import interface
from neutron.agent.linux import iptables_manager
from neutron.common import constants as constants
from neutron.common import log
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.services.metering.drivers import abstract_driver
LOG = logging.getLogger(__name__)
NS_PREFIX = 'qrouter-'
WRAP_NAME = 'neutron-meter'
EXTERNAL_DEV_PREFIX = 'qg-'
TOP_CHAIN = WRAP_NAME + "-FORWARD"
RULE = '-r-'
LABEL = '-l-'
IptablesDriverOpts = [
cfg.StrOpt('interface_driver',
help=_("The driver used to manage the virtual "
"interface.")),
cfg.BoolOpt('use_namespaces', default=True,
help=_("Allow overlapping IP."))
]
config.register_root_helper(cfg.CONF)
cfg.CONF.register_opts(interface.OPTS)
cfg.CONF.register_opts(IptablesDriverOpts)
class IptablesManagerTransaction(object):
__transactions = {}
def __init__(self, im):
self.im = im
transaction = self.__transactions.get(im, 0)
transaction += 1
self.__transactions[im] = transaction
def __enter__(self):
return self.im
def __exit__(self, type, value, traceback):
transaction = self.__transactions.get(self.im)
if transaction == 1:
self.im.apply()
del self.__transactions[self.im]
else:
transaction -= 1
self.__transactions[self.im] = transaction
class RouterWithMetering(object):
def __init__(self, conf, router):
self.conf = conf
self.id = router['id']
self.router = router
self.root_helper = config.get_root_helper(self.conf)
self.iptables_manager = iptables_manager.IptablesManager(
root_helper=self.root_helper,
namespace=self.ns_name(),
binary_name=WRAP_NAME)
self.metering_labels = {}
def ns_name(self):
if self.conf.use_namespaces:
return NS_PREFIX + self.router['id']
class IptablesMeteringDriver(abstract_driver.MeteringAbstractDriver):
def __init__(self, plugin, conf):
self.plugin = plugin
self.conf = conf or cfg.CONF
self.routers = {}
if not self.conf.interface_driver:
raise SystemExit(_('An interface driver must be specified'))
LOG.info(_("Loading interface driver %s"), self.conf.interface_driver)
self.driver = importutils.import_object(self.conf.interface_driver,
self.conf)
def _update_router(self, router):
r = self.routers.get(router['id'],
RouterWithMetering(self.conf, router))
r.router = router
self.routers[r.id] = r
return r
@log.log
def update_routers(self, context, routers):
# disassociate removed routers
router_ids = [router['id'] for router in routers]
for router_id in self.routers:
if router_id not in router_ids:
self._process_disassociate_metering_label(router)
for router in routers:
old_gw_port_id = None
old_rm = self.routers.get(router['id'])
if old_rm:
old_gw_port_id = old_rm.router['gw_port_id']
gw_port_id = router['gw_port_id']
if gw_port_id != old_gw_port_id:
if old_rm:
with IptablesManagerTransaction(old_rm.iptables_manager):
self._process_disassociate_metering_label(router)
if gw_port_id:
self._process_associate_metering_label(router)
elif gw_port_id:
self._process_associate_metering_label(router)
@log.log
def remove_router(self, context, router_id):
if router_id in self.routers:
del self.routers[router_id]
def get_external_device_name(self, port_id):
return (EXTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN]
def _process_metering_label_rules(self, rm, rules, label_chain,
rules_chain):
im = rm.iptables_manager
ext_dev = self.get_external_device_name(rm.router['gw_port_id'])
if not ext_dev:
return
for rule in rules:
remote_ip = rule['remote_ip_prefix']
dir = '-i ' + ext_dev
if rule['direction'] == 'egress':
dir = '-o ' + ext_dev
if rule['excluded']:
ipt_rule = dir + ' -d ' + remote_ip + ' -j RETURN'
im.ipv4['filter'].add_rule(rules_chain, ipt_rule, wrap=False,
top=True)
else:
ipt_rule = dir + ' -d ' + remote_ip + ' -j ' + label_chain
im.ipv4['filter'].add_rule(rules_chain, ipt_rule,
wrap=False, top=False)
def _process_associate_metering_label(self, router):
self._update_router(router)
rm = self.routers.get(router['id'])
with IptablesManagerTransaction(rm.iptables_manager):
labels = router.get(constants.METERING_LABEL_KEY, [])
for label in labels:
label_id = label['id']
label_chain = iptables_manager.get_chain_name(WRAP_NAME +
LABEL + label_id,
wrap=False)
rm.iptables_manager.ipv4['filter'].add_chain(label_chain,
wrap=False)
rules_chain = iptables_manager.get_chain_name(WRAP_NAME +
RULE + label_id,
wrap=False)
rm.iptables_manager.ipv4['filter'].add_chain(rules_chain,
wrap=False)
rm.iptables_manager.ipv4['filter'].add_rule(TOP_CHAIN, '-j ' +
rules_chain,
wrap=False)
rm.iptables_manager.ipv4['filter'].add_rule(label_chain,
'',
wrap=False)
rules = label.get('rules')
if rules:
self._process_metering_label_rules(rm, rules,
label_chain,
rules_chain)
rm.metering_labels[label_id] = label
def _process_disassociate_metering_label(self, router):
rm = self.routers.get(router['id'])
if not rm:
return
with IptablesManagerTransaction(rm.iptables_manager):
labels = router.get(constants.METERING_LABEL_KEY, [])
for label in labels:
label_id = label['id']
if label_id not in rm.metering_labels:
continue
label_chain = iptables_manager.get_chain_name(WRAP_NAME +
LABEL + label_id,
wrap=False)
rules_chain = iptables_manager.get_chain_name(WRAP_NAME +
RULE + label_id,
wrap=False)
rm.iptables_manager.ipv4['filter'].remove_chain(label_chain,
wrap=False)
rm.iptables_manager.ipv4['filter'].remove_chain(rules_chain,
wrap=False)
del rm.metering_labels[label_id]
@log.log
def add_metering_label(self, context, routers):
for router in routers:
self._process_associate_metering_label(router)
@log.log
def update_metering_label_rules(self, context, routers):
for router in routers:
self._update_metering_label_rules(router)
def _update_metering_label_rules(self, router):
rm = self.routers.get(router['id'])
if not rm:
return
with IptablesManagerTransaction(rm.iptables_manager):
labels = router.get(constants.METERING_LABEL_KEY, [])
for label in labels:
label_id = label['id']
label_chain = iptables_manager.get_chain_name(WRAP_NAME +
LABEL + label_id,
wrap=False)
rules_chain = iptables_manager.get_chain_name(WRAP_NAME +
RULE + label_id,
wrap=False)
rm.iptables_manager.ipv4['filter'].empty_chain(rules_chain,
wrap=False)
rules = label.get('rules')
if rules:
self._process_metering_label_rules(rm, rules,
label_chain,
rules_chain)
@log.log
def remove_metering_label(self, context, routers):
for router in routers:
self._process_disassociate_metering_label(router)
@log.log
def get_traffic_counters(self, context, routers):
accs = {}
for router in routers:
rm = self.routers.get(router['id'])
if not rm:
continue
for label_id, label in rm.metering_labels.items():
chain = iptables_manager.get_chain_name(WRAP_NAME + LABEL +
label_id, wrap=False)
chain_acc = rm.iptables_manager.get_traffic_counters(
chain, wrap=False, zero=True)
if not chain_acc:
continue
acc = accs.get(label_id, {'pkts': 0, 'bytes': 0})
acc['pkts'] += chain_acc['pkts']
acc['bytes'] += chain_acc['bytes']
accs[label_id] = acc
return accs
| |
import json
import tornado.ioloop
import tornado.web
import tornado.httpserver
from tornado.platform.asyncio import AsyncIOMainLoop
from graph.filter_base import FilterBase, FilterState, FilterType
from graph.input_pin import InputPin
from graph.output_pin import OutputPin
class MainHandler(tornado.web.RequestHandler):
def initialize(self, filter, opin_get, opin_post, opin_put, opin_delete):
self._filter = filter
self._output_pin_get = opin_get
self._output_pin_post = opin_post
self._output_pin_put = opin_put
self._output_pin_delete = opin_delete
self._filter.register_active_handler(id(self), self)
#
self._meta_dict = {}
self._meta_dict[TornadoSource.METADATA_KEY_HANDLER_ID] = id(self)
self._meta_dict[TornadoSource.METADATA_KEY_REQUEST_URI] = self.request.uri
self._meta_dict[TornadoSource.METADATA_KEY_REQUEST_PATH] = self.request.path
self._meta_dict[TornadoSource.METADATA_KEY_REQUEST_METHOD] = self.request.method
self._meta_dict[TornadoSource.METADATA_KEY_REQUEST_PROTOCOL] = self.request.protocol
self._meta_dict[TornadoSource.METADATA_KEY_REQUEST_REMOTE_IP] = self.request.remote_ip
self._meta_dict[TornadoSource.METADATA_KEY_REQUEST_HOST] = self.request.host
headers_copy = {}
for key in self.request.headers:
headers_copy[key] = self.request.headers[key]
self._meta_dict[TornadoSource.METADATA_KEY_REQUEST_HEADERS] = headers_copy
self._content_type = self.request.headers.get('Content-Type')
if self._content_type is None:
self._content_type = ''
def compute_etag(self):
return None
@tornado.web.asynchronous
def get(self):
# Since GET shouldn't contain a body, we parse the arguments into json format and pass that instead
js = json.dumps({k: self.get_argument(k) for k in self.request.arguments})
try:
self._output_pin_get.send(TornadoSource.CONTENT_TYPE_APPLICATION_JSON, js, self._meta_dict)
except ValueError:
raise tornado.web.HTTPError(405)
@tornado.web.asynchronous
def post(self):
try:
self._output_pin_post.send(self._content_type, self.request.body, self._meta_dict)
except ValueError:
raise tornado.web.HTTPError(405)
@tornado.web.asynchronous
def put(self):
try:
self._output_pin_put.send(self._content_type, self.request.body, self._meta_dict)
except ValueError:
raise tornado.web.HTTPError(405)
@tornado.web.asynchronous
def delete(self):
try:
self._output_pin_delete.send(self._content_type, self.request.body, self._meta_dict)
except ValueError:
raise tornado.web.HTTPError(405)
def on_finish(self):
self._filter.unregister_active_handler(id(self))
super().on_finish()
def write_response(self, mime_type, payload, metadata_dict):
resp_code = metadata_dict.get(TornadoSource.METADATA_KEY_RESPONSE_STATUS)
if resp_code is not None:
if resp_code >= 400:
raise tornado.web.HTTPError(resp_code)
else:
self.set_status(resp_code)
self.set_header("Content-Type", mime_type)
self.set_header("Server", TornadoSource.SERVER_HEADER_FULL)
self.write(payload)
self.finish()
class TornadoSource(FilterBase):
"""
A Tornado instance represented as a source filter
Input Pins:
input1 - inputn - Accepts any mime type.
Output Pins:
output1_get - outputn_get - issues a GET downstream
output1_get - outputn_get - issues a GET downstream
output1_post - outputn_post - issues a POST downstream
output1_put - outputn_put - issues a PUT downstream
output1_delete - outputn_delete - issues a PUT downstream
"""
filter_pad_templates = {}
filter_meta = {}
CONFIG_KEY_URI_PATHS = 'uri_paths'
METADATA_KEY_HANDLER_ID = 'web_handler_id'
METADATA_KEY_REQUEST_URI = 'web_request_uri'
METADATA_KEY_REQUEST_PATH = 'web_request_path'
METADATA_KEY_REQUEST_METHOD = 'web_request_method'
METADATA_KEY_REQUEST_PROTOCOL = 'web_request_protocol'
METADATA_KEY_REQUEST_REMOTE_IP = 'web_request_remote_ip'
METADATA_KEY_REQUEST_HOST = 'web_request_host'
METADATA_KEY_REQUEST_HEADERS = 'web_request_headers'
METADATA_KEY_RESPONSE_STATUS = 'web_response_status'
METADATA_KEY_RESPONSE_CHARSET = 'web_response_charset'
METADATA_KEY_MIME_TYPE = 'mime-type'
METADATA_KEY_DB_TABLE_NAME = 'table_name_literal'
#
CONTENT_TYPE_APPLICATION_JSON = 'application/json'
#
SERVER_HEADER_APPENDED_COMPONENT = 'koolspin/rosetta'
SERVER_HEADER_FULL = 'Tornado/{0} {1}'.format(tornado.version, SERVER_HEADER_APPENDED_COMPONENT)
def __init__(self, name, config_dict, graph_manager):
super().__init__(name, config_dict, graph_manager, FilterType.source)
self._is_continuous = True
#
self._application = None
self._server = None
self._active_handlers = {}
self._uri_paths = self._config_dict.get(TornadoSource.CONFIG_KEY_URI_PATHS)
for i in range(len(self._uri_paths)):
mime_type_map = {}
mime_type_map['*'] = self.recv
input_pin_name = 'input{0}'.format(i+1)
ipin = InputPin(input_pin_name, mime_type_map, self)
self._add_input_pin(ipin)
#
output_pin_name = 'output{0}_get'.format(i+1)
opin = OutputPin(output_pin_name, True)
self._add_output_pin(opin)
output_pin_name = 'output{0}_post'.format(i+1)
opin = OutputPin(output_pin_name, True)
self._add_output_pin(opin)
output_pin_name = 'output{0}_put'.format(i+1)
opin = OutputPin(output_pin_name, True)
self._add_output_pin(opin)
output_pin_name = 'output{0}_delete'.format(i+1)
opin = OutputPin(output_pin_name, True)
self._add_output_pin(opin)
def run(self):
super().run()
AsyncIOMainLoop().install()
uri_list = []
for i in range(len(self._uri_paths)):
output_pin_name = 'output{0}_get'.format(i+1)
opin_get = self.get_output_pin(output_pin_name)
output_pin_name = 'output{0}_post'.format(i+1)
opin_post = self.get_output_pin(output_pin_name)
output_pin_name = 'output{0}_put'.format(i+1)
opin_put = self.get_output_pin(output_pin_name)
output_pin_name = 'output{0}_delete'.format(i+1)
opin_delete = self.get_output_pin(output_pin_name)
t = (self._uri_paths[i], MainHandler, dict(filter=self, opin_get=opin_get, opin_post=opin_post, opin_put=opin_put, opin_delete=opin_delete))
uri_list.append(t)
self._application = tornado.web.Application(uri_list)
self._server = tornado.httpserver.HTTPServer(self._application)
self._set_filter_state(FilterState.running)
def graph_is_running(self):
super().graph_is_running()
self._server.listen(8888)
def stop(self):
super().stop()
if self._server is not None:
self._server.stop()
self._server = None
self._set_filter_state(FilterState.stopped)
def recv(self, mime_type, payload, metadata_dict):
handler_id = metadata_dict.get(TornadoSource.METADATA_KEY_HANDLER_ID)
if handler_id is not None:
handler = self._get_active_handler(handler_id)
if handler is not None:
handler.write_response(mime_type, payload, metadata_dict)
def register_active_handler(self, handler_id, handler):
self._active_handlers[handler_id] = handler
def unregister_active_handler(self, handler_id):
del self._active_handlers[handler_id]
def _get_active_handler(self, handler_id):
return self._active_handlers.get(handler_id)
@staticmethod
def get_filter_metadata():
return FilterBase.filter_meta
@staticmethod
def get_filter_pad_templates():
return FilterBase.filter_pad_templates
| |
# -*- coding: utf-8 -*-
import json
import mimetypes
import os
from datetime import datetime
from zipfile import ZipFile
from django import forms
from django.conf import settings
from django.core.validators import URLValidator
from django.forms import widgets
from django.forms.extras.widgets import SelectDateWidget
from django.forms.models import modelformset_factory
from django.template.defaultfilters import filesizeformat
from django.utils import six
from django.utils.functional import lazy
from django.utils.safestring import mark_safe
from django.utils.translation import trans_real as translation
import commonware
import happyforms
import waffle
from jinja2 import escape as jinja2_escape
from jinja2.filters import do_dictsort
from mpconstants import regions as mpconstants_regions
from quieter_formset.formset import BaseModelFormSet
from tower import ugettext as _, ugettext_lazy as _lazy, ungettext as ngettext
import lib.iarc
import mkt
from lib.video import tasks as vtasks
from mkt import get_user
from mkt.access import acl
from mkt.api.models import Access
from mkt.constants import (CATEGORY_CHOICES, MAX_PACKAGED_APP_SIZE,
ratingsbodies)
from mkt.developers.utils import prioritize_app
from mkt.files.models import FileUpload
from mkt.files.utils import WebAppParser
from mkt.regions import REGIONS_CHOICES_SORTED_BY_NAME
from mkt.regions.utils import parse_region
from mkt.reviewers.models import RereviewQueue
from mkt.site.fields import SeparatedValuesField
from mkt.site.forms import WebappChoiceField
from mkt.site.utils import remove_icons, slug_validator, slugify
from mkt.tags.models import Tag
from mkt.tags.utils import can_edit_restricted_tags, clean_tags
from mkt.translations.fields import TransField
from mkt.translations.forms import TranslationFormMixin
from mkt.translations.models import Translation
from mkt.translations.widgets import TranslationTextarea, TransTextarea
from mkt.versions.models import Version
from mkt.webapps.models import (WebappUser, BlockedSlug, IARCInfo, Preview,
Webapp)
from mkt.webapps.tasks import (index_webapps, set_storefront_data,
update_manifests)
from . import tasks
log = commonware.log.getLogger('mkt.developers')
def region_error(region):
return forms.ValidationError(_('You cannot select {region}.').format(
region=unicode(parse_region(region).name)
))
def toggle_app_for_special_regions(request, app, enabled_regions=None):
"""Toggle for special regions (e.g., China)."""
if not waffle.flag_is_active(request, 'special-regions'):
return
for region in mkt.regions.SPECIAL_REGIONS:
status = app.geodata.get_status(region)
if enabled_regions is not None:
if region.id in enabled_regions:
# If it's not already enabled, mark as pending.
if status != mkt.STATUS_PUBLIC:
# Developer requested for it to be in China.
status = mkt.STATUS_PENDING
value, changed = app.geodata.set_status(region, status)
if changed:
log.info(u'[Webapp:%s] App marked as pending '
u'special region (%s).' % (app, region.slug))
value, changed = app.geodata.set_nominated_date(
region, save=True)
log.info(u'[Webapp:%s] Setting nomination date to '
u'now for region (%s).' % (app, region.slug))
else:
# Developer cancelled request for approval.
status = mkt.STATUS_NULL
value, changed = app.geodata.set_status(
region, status, save=True)
if changed:
log.info(u'[Webapp:%s] App marked as null special '
u'region (%s).' % (app, region.slug))
if status == mkt.STATUS_PUBLIC:
# Reviewer approved for it to be in China.
aer = app.webappexcludedregion.filter(region=region.id)
if aer.exists():
aer.delete()
log.info(u'[Webapp:%s] App included in new special '
u'region (%s).' % (app, region.slug))
else:
# Developer requested for it to be in China.
aer, created = app.webappexcludedregion.get_or_create(
region=region.id)
if created:
log.info(u'[Webapp:%s] App excluded from new special '
u'region (%s).' % (app, region.slug))
class AuthorForm(happyforms.ModelForm):
def clean_user(self):
user = self.cleaned_data['user']
if not user.read_dev_agreement:
raise forms.ValidationError(
_('All team members must have read and agreed to the '
'developer agreement.'))
return user
class Meta:
model = WebappUser
exclude = ('webapp',)
class BaseModelFormSet(BaseModelFormSet):
"""
Override the parent's is_valid to prevent deleting all forms.
"""
def is_valid(self):
# clean() won't get called in is_valid() if all the rows are getting
# deleted. We can't allow deleting everything.
rv = super(BaseModelFormSet, self).is_valid()
return rv and not any(self.errors) and not bool(self.non_form_errors())
class BaseAuthorFormSet(BaseModelFormSet):
def clean(self):
if any(self.errors):
return
# cleaned_data could be None if it's the empty extra form.
data = filter(None, [f.cleaned_data for f in self.forms
if not f.cleaned_data.get('DELETE', False)])
if not any(d['role'] == mkt.AUTHOR_ROLE_OWNER for d in data):
raise forms.ValidationError(_('Must have at least one owner.'))
if not any(d['listed'] for d in data):
raise forms.ValidationError(
_('At least one team member must be listed.'))
users = [d['user'] for d in data]
if sorted(users) != sorted(set(users)):
raise forms.ValidationError(
_('A team member can only be listed once.'))
AuthorFormSet = modelformset_factory(WebappUser, formset=BaseAuthorFormSet,
form=AuthorForm, can_delete=True, extra=0)
class DeleteForm(happyforms.Form):
reason = forms.CharField(required=False)
def __init__(self, request):
super(DeleteForm, self).__init__(request.POST)
def trap_duplicate(request, manifest_url):
# See if this user has any other apps with the same manifest.
owned = (request.user.webappuser_set
.filter(webapp__manifest_url=manifest_url))
if not owned:
return
try:
app = owned[0].webapp
except Webapp.DoesNotExist:
return
error_url = app.get_dev_url()
msg = None
if app.status == mkt.STATUS_PUBLIC:
msg = _(u'Oops, looks like you already submitted that manifest '
'for %s, which is currently public. '
'<a href="%s">Edit app</a>')
elif app.status == mkt.STATUS_PENDING:
msg = _(u'Oops, looks like you already submitted that manifest '
'for %s, which is currently pending. '
'<a href="%s">Edit app</a>')
elif app.status == mkt.STATUS_NULL:
msg = _(u'Oops, looks like you already submitted that manifest '
'for %s, which is currently incomplete. '
'<a href="%s">Resume app</a>')
elif app.status == mkt.STATUS_REJECTED:
msg = _(u'Oops, looks like you already submitted that manifest '
'for %s, which is currently rejected. '
'<a href="%s">Edit app</a>')
elif app.status == mkt.STATUS_DISABLED:
msg = _(u'Oops, looks like you already submitted that manifest '
'for %s, which is currently banned on Marketplace. '
'<a href="%s">Edit app</a>')
elif app.disabled_by_user:
msg = _(u'Oops, looks like you already submitted that manifest '
'for %s, which is currently disabled. '
'<a href="%s">Edit app</a>')
if msg:
return msg % (jinja2_escape(app.name), error_url)
def verify_app_domain(manifest_url, exclude=None, packaged=False):
if packaged or waffle.switch_is_active('webapps-unique-by-domain'):
domain = Webapp.domain_from_url(manifest_url)
qs = Webapp.objects.filter(app_domain=domain)
if exclude:
qs = qs.exclude(pk=exclude.pk)
if qs.exists():
raise forms.ValidationError(
_('An app already exists on this domain; '
'only one app per domain is allowed.'))
class PreviewForm(happyforms.ModelForm):
file_upload = forms.FileField(required=False)
upload_hash = forms.CharField(required=False)
# This lets us POST the data URIs of the unsaved previews so we can still
# show them if there were form errors.
unsaved_image_data = forms.CharField(required=False,
widget=forms.HiddenInput)
unsaved_image_type = forms.CharField(required=False,
widget=forms.HiddenInput)
def save(self, webapp, commit=True):
if self.cleaned_data:
self.instance.webapp = webapp
if self.cleaned_data.get('DELETE'):
# Existing preview.
if self.instance.id:
self.instance.delete()
# User has no desire to save this preview.
return
super(PreviewForm, self).save(commit=commit)
if self.cleaned_data['upload_hash']:
upload_hash = self.cleaned_data['upload_hash']
upload_path = os.path.join(settings.TMP_PATH, 'preview',
upload_hash)
filetype = (os.path.splitext(upload_hash)[1][1:]
.replace('-', '/'))
if filetype in mkt.VIDEO_TYPES:
self.instance.update(filetype=filetype)
vtasks.resize_video.delay(upload_path, self.instance.pk,
user_pk=mkt.get_user().pk)
else:
self.instance.update(filetype='image/png')
tasks.resize_preview.delay(upload_path, self.instance.pk,
set_modified_on=[self.instance])
class Meta:
model = Preview
fields = ('file_upload', 'upload_hash', 'id', 'position')
class JSONField(forms.Field):
def to_python(self, value):
if value == '':
return None
try:
if isinstance(value, basestring):
return json.loads(value)
except ValueError:
pass
return value
class JSONMultipleChoiceField(forms.MultipleChoiceField, JSONField):
widget = forms.CheckboxSelectMultiple
class AdminSettingsForm(PreviewForm):
DELETE = forms.BooleanField(required=False)
mozilla_contact = SeparatedValuesField(forms.EmailField, separator=',',
required=False)
vip_app = forms.BooleanField(required=False)
priority_review = forms.BooleanField(required=False)
banner_regions = JSONMultipleChoiceField(
required=False, choices=mkt.regions.REGIONS_CHOICES_NAME)
banner_message = TransField(required=False)
class Meta:
model = Preview
fields = ('file_upload', 'upload_hash', 'position')
def __init__(self, *args, **kw):
# Note that this form is not inheriting from WebappFormBase, so we have
# to get rid of 'version' ourselves instead of letting the parent class
# do it.
kw.pop('version', None)
# Get the object for the app's promo `Preview` and pass it to the form.
if kw.get('instance'):
webapp = kw.pop('instance')
self.instance = webapp
self.promo = webapp.get_promo()
self.request = kw.pop('request', None)
# Note: After calling `super`, `self.instance` becomes the `Preview`
# object.
super(AdminSettingsForm, self).__init__(*args, **kw)
self.initial['vip_app'] = webapp.vip_app
self.initial['priority_review'] = webapp.priority_review
if self.instance:
self.initial['mozilla_contact'] = webapp.mozilla_contact
self.initial['banner_regions'] = webapp.geodata.banner_regions or []
self.initial['banner_message'] = webapp.geodata.banner_message_id
@property
def regions_by_id(self):
return mkt.regions.REGIONS_CHOICES_ID_DICT
def clean_position(self):
return -1
def clean_banner_regions(self):
try:
regions = map(int, self.cleaned_data.get('banner_regions'))
except (TypeError, ValueError):
# input data is not a list or data contains non-integers.
raise forms.ValidationError(_('Invalid region(s) selected.'))
return list(regions)
def clean_mozilla_contact(self):
contact = self.cleaned_data.get('mozilla_contact')
if self.cleaned_data.get('mozilla_contact') is None:
return u''
return contact
def save(self, webapp, commit=True):
if (self.cleaned_data.get('DELETE') and
'upload_hash' not in self.changed_data and self.promo.id):
self.promo.delete()
elif self.promo and 'upload_hash' in self.changed_data:
self.promo.delete()
elif self.cleaned_data.get('upload_hash'):
super(AdminSettingsForm, self).save(webapp, True)
updates = {
'vip_app': self.cleaned_data.get('vip_app'),
}
contact = self.cleaned_data.get('mozilla_contact')
if contact is not None:
updates['mozilla_contact'] = contact
if (self.cleaned_data.get('priority_review') and
not webapp.priority_review):
# webapp.priority_review gets updated within prioritize_app().
prioritize_app(webapp, self.request.user)
else:
updates['priority_review'] = self.cleaned_data.get(
'priority_review')
webapp.update(**updates)
geodata = webapp.geodata
geodata.banner_regions = self.cleaned_data.get('banner_regions')
geodata.banner_message = self.cleaned_data.get('banner_message')
geodata.save()
uses_flash = self.cleaned_data.get('flash')
af = webapp.get_latest_file()
if af is not None:
af.update(uses_flash=bool(uses_flash))
index_webapps.delay([webapp.id])
return webapp
class BasePreviewFormSet(BaseModelFormSet):
def clean(self):
if any(self.errors):
return
at_least_one = False
for form in self.forms:
if (not form.cleaned_data.get('DELETE') and
form.cleaned_data.get('upload_hash') is not None):
at_least_one = True
if not at_least_one:
raise forms.ValidationError(
_('You must upload at least one screenshot or video.'))
PreviewFormSet = modelformset_factory(Preview, formset=BasePreviewFormSet,
form=PreviewForm, can_delete=True,
extra=1)
class NewManifestForm(happyforms.Form):
manifest = forms.URLField()
def __init__(self, *args, **kwargs):
self.is_standalone = kwargs.pop('is_standalone', False)
super(NewManifestForm, self).__init__(*args, **kwargs)
def clean_manifest(self):
manifest = self.cleaned_data['manifest']
# Skip checking the domain for the standalone validator.
if not self.is_standalone:
verify_app_domain(manifest)
return manifest
class NewPackagedAppForm(happyforms.Form):
upload = forms.FileField()
def __init__(self, *args, **kwargs):
self.max_size = kwargs.pop('max_size', MAX_PACKAGED_APP_SIZE)
self.user = kwargs.pop('user', get_user())
self.webapp = kwargs.pop('webapp', None)
self.file_upload = None
super(NewPackagedAppForm, self).__init__(*args, **kwargs)
def clean_upload(self):
upload = self.cleaned_data['upload']
errors = []
if upload.size > self.max_size:
errors.append({
'type': 'error',
'message': _('Packaged app too large for submission. Packages '
'must be smaller than %s.' % filesizeformat(
self.max_size)),
'tier': 1,
})
# Immediately raise an error, do not process the rest of the view,
# which would read the file.
raise self.persist_errors(errors, upload)
manifest = None
try:
# Be careful to keep this as in-memory zip reading.
manifest = ZipFile(upload, 'r').read('manifest.webapp')
except Exception as e:
errors.append({
'type': 'error',
'message': _('Error extracting manifest from zip file.'),
'tier': 1,
})
origin = None
if manifest:
try:
origin = WebAppParser.decode_manifest(manifest).get('origin')
except forms.ValidationError as e:
errors.append({
'type': 'error',
'message': ''.join(e.messages),
'tier': 1,
})
if origin:
try:
verify_app_domain(origin, packaged=True, exclude=self.webapp)
except forms.ValidationError, e:
errors.append({
'type': 'error',
'message': ''.join(e.messages),
'tier': 1,
})
if errors:
raise self.persist_errors(errors, upload)
# Everything passed validation.
self.file_upload = FileUpload.from_post(
upload, upload.name, upload.size, user=self.user)
def persist_errors(self, errors, upload):
"""
Persist the error with this into FileUpload (but do not persist
the file contents, which are too large) and return a ValidationError.
"""
validation = {
'errors': len(errors),
'success': False,
'messages': errors,
}
self.file_upload = FileUpload.objects.create(
user=self.user, name=getattr(upload, 'name', ''),
validation=json.dumps(validation))
# Return a ValidationError to be raised by the view.
return forms.ValidationError(' '.join(e['message'] for e in errors))
class WebappFormBase(TranslationFormMixin, happyforms.ModelForm):
def __init__(self, *args, **kw):
self.request = kw.pop('request')
self.version = kw.pop('version', None)
super(WebappFormBase, self).__init__(*args, **kw)
class Meta:
models = Webapp
fields = ('name', 'slug')
class AppFormBasic(WebappFormBase):
"""Form to edit basic app info."""
slug = forms.CharField(max_length=30, widget=forms.TextInput)
manifest_url = forms.URLField()
hosted_url = forms.CharField(
label=_lazy(u'Hosted URL:'), required=False,
help_text=_lazy(
u'A URL to where your app is hosted on the web, if it exists. This'
u' allows users to try out your app before installing it.'))
description = TransField(
required=True,
label=_lazy(u'Provide a detailed description of your app'),
help_text=_lazy(u'This description will appear on the details page.'),
widget=TransTextarea)
tags = forms.CharField(
label=_lazy(u'Search Keywords:'), required=False,
widget=forms.Textarea(attrs={'rows': 3}),
help_text=_lazy(
u'The search keywords are used to return search results in the '
u'Firefox Marketplace. Be sure to include a keywords that '
u'accurately reflect your app.'))
class Meta:
model = Webapp
fields = ('slug', 'manifest_url', 'hosted_url', 'description', 'tags')
def __init__(self, *args, **kw):
# Force the form to use app_slug. We want to keep
# this under "slug" so all the js continues to work.
kw.setdefault('initial', {})['slug'] = kw['instance'].app_slug
super(AppFormBasic, self).__init__(*args, **kw)
self.old_manifest_url = self.instance.manifest_url
if self.instance.is_packaged:
# Manifest URL cannot be changed for packaged apps.
del self.fields['manifest_url']
self.initial['tags'] = ', '.join(self.get_tags(self.instance))
def clean_tags(self):
return clean_tags(self.request, self.cleaned_data['tags'])
def get_tags(self, webapp):
if can_edit_restricted_tags(self.request):
return list(webapp.tags.values_list('tag_text', flat=True))
else:
return list(webapp.tags.filter(restricted=False)
.values_list('tag_text', flat=True))
def _post_clean(self):
# Switch slug to app_slug in cleaned_data and self._meta.fields so
# we can update the app_slug field for webapps.
try:
self._meta.fields = list(self._meta.fields)
slug_idx = self._meta.fields.index('slug')
data = self.cleaned_data
if 'slug' in data:
data['app_slug'] = data.pop('slug')
self._meta.fields[slug_idx] = 'app_slug'
super(AppFormBasic, self)._post_clean()
finally:
self._meta.fields[slug_idx] = 'slug'
def clean_slug(self):
slug = self.cleaned_data['slug']
slug_validator(slug, lower=False)
if slug != self.instance.app_slug:
if Webapp.objects.filter(app_slug=slug).exists():
raise forms.ValidationError(
_('This slug is already in use. Please choose another.'))
if BlockedSlug.blocked(slug):
raise forms.ValidationError(_('The slug cannot be "%s". '
'Please choose another.' % slug))
return slug.lower()
def clean_manifest_url(self):
manifest_url = self.cleaned_data['manifest_url']
# Only verify if manifest changed.
if 'manifest_url' in self.changed_data:
verify_app_domain(manifest_url, exclude=self.instance)
return manifest_url
def save(self, webapp, commit=False):
# We ignore `commit`, since we need it to be `False` so we can save
# the ManyToMany fields on our own.
webappform = super(AppFormBasic, self).save(commit=False)
webappform.save()
if 'manifest_url' in self.changed_data:
before_url = self.old_manifest_url
after_url = self.cleaned_data['manifest_url']
# If a non-admin edited the manifest URL, add to Re-review Queue.
if not acl.action_allowed(self.request, 'Admin', '%'):
log.info(u'[Webapp:%s] (Re-review) Manifest URL changed '
u'from %s to %s'
% (self.instance, before_url, after_url))
msg = (_(u'Manifest URL changed from {before_url} to '
u'{after_url}')
.format(before_url=before_url, after_url=after_url))
RereviewQueue.flag(self.instance,
mkt.LOG.REREVIEW_MANIFEST_URL_CHANGE, msg)
# Refetch the new manifest.
log.info('Manifest %s refreshed for %s'
% (webapp.manifest_url, webapp))
update_manifests.delay([self.instance.id])
tags_new = self.cleaned_data['tags']
tags_old = [slugify(t, spaces=True) for t in self.get_tags(webapp)]
add_tags = set(tags_new) - set(tags_old)
del_tags = set(tags_old) - set(tags_new)
# Add new tags.
for t in add_tags:
Tag(tag_text=t).save_tag(webapp)
# Remove old tags.
for t in del_tags:
Tag(tag_text=t).remove_tag(webapp)
return webappform
class AppFormDetails(WebappFormBase):
LOCALES = [(translation.to_locale(k).replace('_', '-'), v)
for k, v in do_dictsort(settings.LANGUAGES)]
default_locale = forms.TypedChoiceField(required=False, choices=LOCALES)
homepage = TransField.adapt(forms.URLField)(required=False)
privacy_policy = TransField(
widget=TransTextarea(), required=True,
label=_lazy(u"Please specify your app's Privacy Policy"))
class Meta:
model = Webapp
fields = ('default_locale', 'homepage', 'privacy_policy')
def clean(self):
# Make sure we have the required translations in the new locale.
required = ['name', 'description']
data = self.cleaned_data
if not self.errors and 'default_locale' in self.changed_data:
fields = dict((k, getattr(self.instance, k + '_id'))
for k in required)
locale = data['default_locale']
ids = filter(None, fields.values())
qs = (Translation.objects.filter(locale=locale, id__in=ids,
localized_string__isnull=False)
.values_list('id', flat=True))
missing = [k for k, v in fields.items() if v not in qs]
if missing:
raise forms.ValidationError(
_('Before changing your default locale you must have a '
'name and description in that locale. '
'You are missing %s.') % ', '.join(map(repr, missing)))
return data
class AppFormMedia(WebappFormBase):
icon_upload_hash = forms.CharField(required=False)
unsaved_icon_data = forms.CharField(required=False,
widget=forms.HiddenInput)
class Meta:
model = Webapp
fields = ('icon_upload_hash', 'icon_type')
def save(self, webapp, commit=True):
if self.cleaned_data['icon_upload_hash']:
upload_hash = self.cleaned_data['icon_upload_hash']
upload_path = os.path.join(settings.TMP_PATH, 'icon', upload_hash)
dirname = webapp.get_icon_dir()
destination = os.path.join(dirname, '%s' % webapp.id)
remove_icons(destination)
tasks.resize_icon.delay(upload_path, destination,
mkt.CONTENT_ICON_SIZES,
set_modified_on=[webapp])
return super(AppFormMedia, self).save(commit)
class AppSupportFormMixin(object):
def get_default_translation_for(self, field_name):
"""
Return the cleaned_data for the specified field_name, using the
field's default_locale.
"""
default_locale = self.fields[field_name].default_locale
return self.cleaned_data.get(field_name, {}).get(default_locale, '')
def clean_support_fields(self):
"""
Make sure either support email or support url are present.
"""
if ('support_email' in self._errors or
'support_url' in self._errors):
# If there are already errors for those fields, bail out, that
# means at least one of them was filled, the user just needs to
# correct the error.
return
support_email = self.get_default_translation_for('support_email')
support_url = self.get_default_translation_for('support_url')
if not support_email and not support_url:
# Mark the fields as invalid, add an error message on a special
# 'support' field that the template will use if necessary, not on
# both fields individually.
self._errors['support'] = self.error_class(
[_('You must provide either a website, an email, or both.')])
self._errors['support_email'] = self.error_class([''])
self._errors['support_url'] = self.error_class([''])
def clean(self):
cleaned_data = super(AppSupportFormMixin, self).clean()
self.clean_support_fields()
return cleaned_data
class AppFormSupport(AppSupportFormMixin, WebappFormBase):
support_url = TransField.adapt(forms.URLField)(required=False)
support_email = TransField.adapt(forms.EmailField)(required=False)
class Meta:
model = Webapp
fields = ('support_email', 'support_url')
class AppAppealForm(happyforms.Form):
"""
If a developer's app is rejected he can make changes and request
another review.
"""
notes = forms.CharField(
label=_lazy(u'Your comments'),
required=False, widget=forms.Textarea(attrs={'rows': 2}))
def __init__(self, *args, **kw):
self.product = kw.pop('product', None)
super(AppAppealForm, self).__init__(*args, **kw)
def save(self):
version = self.product.versions.latest()
notes = self.cleaned_data['notes']
if notes:
mkt.log(mkt.LOG.WEBAPP_RESUBMIT, self.product, version,
details={'comments': notes})
else:
mkt.log(mkt.LOG.WEBAPP_RESUBMIT, self.product, version)
# Mark app and file as pending again.
self.product.update(status=mkt.WEBAPPS_UNREVIEWED_STATUS)
version.all_files[0].update(status=mkt.WEBAPPS_UNREVIEWED_STATUS)
return version
class PublishForm(happyforms.Form):
# Publish choice wording is slightly different here than with the
# submission flow because the app may have already been published.
mark_safe_lazy = lazy(mark_safe, six.text_type)
PUBLISH_CHOICES = (
(mkt.PUBLISH_IMMEDIATE,
mark_safe_lazy(_lazy(
u'<b>Published</b>: Visible to everyone in the Marketplace and '
u'included in search results and listing pages.'))),
(mkt.PUBLISH_HIDDEN,
mark_safe_lazy(_lazy(
u'<b>Unlisted</b>: Visible to only people with the URL and '
u'does not appear in search results and listing pages.'))),
)
# Used for setting initial form values.
PUBLISH_MAPPING = {
mkt.STATUS_PUBLIC: mkt.PUBLISH_IMMEDIATE,
mkt.STATUS_UNLISTED: mkt.PUBLISH_HIDDEN,
mkt.STATUS_APPROVED: mkt.PUBLISH_PRIVATE,
}
# Use in form processing to set status.
STATUS_MAPPING = dict((v, k) for k, v in PUBLISH_MAPPING.items())
publish_type = forms.TypedChoiceField(
required=False, choices=PUBLISH_CHOICES, widget=forms.RadioSelect(),
initial=0, coerce=int, label=_lazy('App Visibility:'))
limited = forms.BooleanField(
required=False, label=_lazy(
u'<b>Limit to my team</b>: Visible to only Team Members.'))
def __init__(self, *args, **kwargs):
self.webapp = kwargs.pop('webapp')
super(PublishForm, self).__init__(*args, **kwargs)
limited = False
publish = self.PUBLISH_MAPPING.get(self.webapp.status,
mkt.PUBLISH_IMMEDIATE)
if self.webapp.status == mkt.STATUS_APPROVED:
# Special case if app is currently private.
limited = True
publish = mkt.PUBLISH_HIDDEN
# Determine the current selection via STATUS to publish choice mapping.
self.fields['publish_type'].initial = publish
self.fields['limited'].initial = limited
# Make the limited label safe so we can display the HTML.
self.fields['limited'].label = mark_safe(self.fields['limited'].label)
def save(self):
publish = self.cleaned_data['publish_type']
limited = self.cleaned_data['limited']
if publish == mkt.PUBLISH_HIDDEN and limited:
publish = mkt.PUBLISH_PRIVATE
status = self.STATUS_MAPPING[publish]
self.webapp.update(status=status)
mkt.log(mkt.LOG.CHANGE_STATUS, self.webapp.get_status_display(),
self.webapp)
# Call update_version, so various other bits of data update.
self.webapp.update_version()
# Call to update names and locales if changed.
self.webapp.update_name_from_package_manifest()
self.webapp.update_supported_locales()
set_storefront_data.delay(self.webapp.pk)
class RegionForm(forms.Form):
regions = forms.MultipleChoiceField(
required=False, choices=[], widget=forms.CheckboxSelectMultiple,
label=_lazy(u'Choose the regions your app will be listed in:'),
error_messages={'required':
_lazy(u'You must select at least one region.')})
special_regions = forms.MultipleChoiceField(
required=False, widget=forms.CheckboxSelectMultiple,
choices=[(x.id, x.name) for x in mkt.regions.SPECIAL_REGIONS])
enable_new_regions = forms.BooleanField(
required=False, label=_lazy(u'Enable new regions'))
restricted = forms.TypedChoiceField(
required=False, initial=0, coerce=int,
choices=[(0, _lazy('Make my app available in most regions')),
(1, _lazy('Choose where my app is made available'))],
widget=forms.RadioSelect(attrs={'class': 'choices'}))
def __init__(self, *args, **kw):
self.product = kw.pop('product', None)
self.request = kw.pop('request', None)
super(RegionForm, self).__init__(*args, **kw)
self.fields['regions'].choices = REGIONS_CHOICES_SORTED_BY_NAME()
# This is the list of the user's exclusions as we don't
# want the user's choices to be altered by external
# exclusions e.g. payments availability.
user_exclusions = list(
self.product.webappexcludedregion.values_list('region', flat=True)
)
# If we have excluded regions, uncheck those.
# Otherwise, default to everything checked.
self.regions_before = self.product.get_region_ids(
restofworld=True,
excluded=user_exclusions
)
self.initial = {
'regions': sorted(self.regions_before),
'restricted': int(self.product.geodata.restricted),
'enable_new_regions': self.product.enable_new_regions,
}
# The checkboxes for special regions are
#
# - checked ... if an app has not been requested for approval in
# China or the app has been rejected in China.
#
# - unchecked ... if an app has been requested for approval in
# China or the app has been approved in China.
unchecked_statuses = (mkt.STATUS_NULL, mkt.STATUS_REJECTED)
for region in self.special_region_objs:
if self.product.geodata.get_status(region) in unchecked_statuses:
# If it's rejected in this region, uncheck its checkbox.
if region.id in self.initial['regions']:
self.initial['regions'].remove(region.id)
elif region.id not in self.initial['regions']:
# If it's pending/public, check its checkbox.
self.initial['regions'].append(region.id)
@property
def regions_by_id(self):
return mkt.regions.REGIONS_CHOICES_ID_DICT
@property
def special_region_objs(self):
return mkt.regions.SPECIAL_REGIONS
@property
def special_region_ids(self):
return mkt.regions.SPECIAL_REGION_IDS
@property
def low_memory_regions(self):
return any(region.low_memory for region in self.regions_by_id.values())
@property
def special_region_statuses(self):
"""Returns the null/pending/public status for each region."""
statuses = {}
for region in self.special_region_objs:
statuses[region.id] = self.product.geodata.get_status_slug(region)
return statuses
@property
def special_region_messages(self):
"""Returns the L10n messages for each region's status."""
return self.product.geodata.get_status_messages()
def is_toggling(self):
if not self.request or not hasattr(self.request, 'POST'):
return False
value = self.request.POST.get('toggle-paid')
return value if value in ('free', 'paid') else False
def _product_is_paid(self):
return (self.product.premium_type in mkt.WEBAPP_PREMIUMS or
self.product.premium_type == mkt.WEBAPP_FREE_INAPP)
def clean_regions(self):
regions = self.cleaned_data['regions']
if not self.is_toggling():
if not regions:
raise forms.ValidationError(
_('You must select at least one region.'))
return regions
def save(self):
# Don't save regions if we are toggling.
if self.is_toggling():
return
regions = [int(x) for x in self.cleaned_data['regions']]
special_regions = [
int(x) for x in self.cleaned_data['special_regions']
]
restricted = int(self.cleaned_data['restricted'] or 0)
if restricted:
before = set(self.regions_before)
after = set(regions)
log.info(u'[Webapp:%s] App marked as restricted.' % self.product)
# Add new region exclusions.
to_add = before - after
for region in to_add:
aer, created = self.product.webappexcludedregion.get_or_create(
region=region)
if created:
log.info(u'[Webapp:%s] Excluded from new region (%s).'
% (self.product, region))
# Remove old region exclusions.
to_remove = after - before
for region in to_remove:
self.product.webappexcludedregion.filter(
region=region).delete()
log.info(u'[Webapp:%s] No longer excluded from region (%s).'
% (self.product, region))
# If restricted, check how we should handle new regions.
if self.cleaned_data['enable_new_regions']:
self.product.update(enable_new_regions=True)
log.info(u'[Webapp:%s] will be added to future regions.'
% self.product)
else:
self.product.update(enable_new_regions=False)
log.info(u'[Webapp:%s] will not be added to future regions.'
% self.product)
else:
# If not restricted, set `enable_new_regions` to True and remove
# currently excluded regions.
self.product.update(enable_new_regions=True)
self.product.webappexcludedregion.all().delete()
log.info(u'[Webapp:%s] App marked as unrestricted.' % self.product)
self.product.geodata.update(restricted=restricted)
# Toggle region exclusions/statuses for special regions (e.g., China).
toggle_app_for_special_regions(self.request, self.product,
special_regions)
class CategoryForm(happyforms.Form):
categories = forms.MultipleChoiceField(label=_lazy(u'Categories'),
choices=CATEGORY_CHOICES,
widget=forms.CheckboxSelectMultiple)
def __init__(self, *args, **kw):
self.request = kw.pop('request', None)
self.product = kw.pop('product', None)
super(CategoryForm, self).__init__(*args, **kw)
self.cats_before = (list(self.product.categories)
if self.product.categories else [])
self.initial['categories'] = self.cats_before
def max_categories(self):
return mkt.MAX_CATEGORIES
def clean_categories(self):
categories = self.cleaned_data['categories']
set_categories = set(categories)
total = len(set_categories)
max_cat = mkt.MAX_CATEGORIES
if total > max_cat:
# L10n: {0} is the number of categories.
raise forms.ValidationError(ngettext(
'You can have only {0} category.',
'You can have only {0} categories.',
max_cat).format(max_cat))
return categories
def save(self):
after = list(self.cleaned_data['categories'])
self.product.update(categories=after)
toggle_app_for_special_regions(self.request, self.product)
class DevAgreementForm(happyforms.Form):
read_dev_agreement = forms.BooleanField(label=_lazy(u'Agree'),
widget=forms.HiddenInput)
def __init__(self, *args, **kw):
self.instance = kw.pop('instance')
super(DevAgreementForm, self).__init__(*args, **kw)
def save(self):
self.instance.read_dev_agreement = datetime.now()
self.instance.save()
class DevNewsletterForm(happyforms.Form):
"""Devhub newsletter subscription form."""
email = forms.EmailField(
error_messages={'required':
_lazy(u'Please enter a valid email address.')},
widget=forms.TextInput(attrs={'required': '',
'placeholder':
_lazy(u'Your email address')}))
email_format = forms.ChoiceField(
widget=forms.RadioSelect(),
choices=(('H', 'HTML'), ('T', _lazy(u'Text'))),
initial='H')
privacy = forms.BooleanField(
error_messages={'required':
_lazy(u'You must agree to the Privacy Policy.')})
country = forms.ChoiceField(label=_lazy(u'Country'))
def __init__(self, locale, *args, **kw):
regions = mpconstants_regions.get_region(locale).REGIONS
regions = sorted(regions.iteritems(), key=lambda x: x[1])
super(DevNewsletterForm, self).__init__(*args, **kw)
self.fields['country'].choices = regions
self.fields['country'].initial = 'us'
class AppFormTechnical(WebappFormBase):
flash = forms.BooleanField(required=False)
is_offline = forms.BooleanField(required=False)
class Meta:
model = Webapp
fields = ('is_offline', 'public_stats',)
def __init__(self, *args, **kw):
super(AppFormTechnical, self).__init__(*args, **kw)
if self.version.all_files:
self.initial['flash'] = self.version.all_files[0].uses_flash
def save(self, webapp, commit=False):
uses_flash = self.cleaned_data.get('flash')
self.instance = super(AppFormTechnical, self).save(commit=True)
if self.version.all_files:
self.version.all_files[0].update(uses_flash=bool(uses_flash))
return self.instance
class TransactionFilterForm(happyforms.Form):
app = WebappChoiceField(queryset=None, required=False, label=_lazy(u'App'))
transaction_type = forms.ChoiceField(
required=False, label=_lazy(u'Transaction Type'),
choices=[(None, '')] + mkt.MKT_TRANSACTION_CONTRIB_TYPES.items())
transaction_id = forms.CharField(
required=False, label=_lazy(u'Transaction ID'))
current_year = datetime.today().year
years = [current_year - x for x in range(current_year - 2012)]
date_from = forms.DateTimeField(
required=False, widget=SelectDateWidget(years=years),
label=_lazy(u'From'))
date_to = forms.DateTimeField(
required=False, widget=SelectDateWidget(years=years),
label=_lazy(u'To'))
def __init__(self, *args, **kwargs):
self.apps = kwargs.pop('apps', [])
super(TransactionFilterForm, self).__init__(*args, **kwargs)
self.fields['app'].queryset = self.apps
class APIConsumerForm(happyforms.ModelForm):
app_name = forms.CharField(required=False)
oauth_leg = forms.ChoiceField(choices=(
('website', _lazy('Web site')),
('command', _lazy('Command line')))
)
redirect_uri = forms.CharField(validators=[URLValidator()], required=False)
class Meta:
model = Access
fields = ('app_name', 'redirect_uri')
def __init__(self, *args, **kwargs):
super(APIConsumerForm, self).__init__(*args, **kwargs)
if self.data.get('oauth_leg') == 'website':
for field in ['app_name', 'redirect_uri']:
self.fields[field].required = True
class AppVersionForm(happyforms.ModelForm):
releasenotes = TransField(widget=TransTextarea(), required=False)
approvalnotes = forms.CharField(
widget=TranslationTextarea(attrs={'rows': 4}), required=False)
publish_immediately = forms.BooleanField(
required=False,
label=_lazy(u'Make this the Active version of my app as soon as it '
u'has been reviewed and approved.'))
class Meta:
model = Version
fields = ('releasenotes', 'approvalnotes')
def __init__(self, *args, **kwargs):
super(AppVersionForm, self).__init__(*args, **kwargs)
self.fields['publish_immediately'].initial = (
self.instance.webapp.publish_type == mkt.PUBLISH_IMMEDIATE)
def save(self, *args, **kwargs):
rval = super(AppVersionForm, self).save(*args, **kwargs)
if self.instance.all_files[0].status == mkt.STATUS_PENDING:
# If version is pending, allow changes to publish_type.
if self.cleaned_data.get('publish_immediately'):
publish_type = mkt.PUBLISH_IMMEDIATE
else:
publish_type = mkt.PUBLISH_PRIVATE
self.instance.webapp.update(publish_type=publish_type)
return rval
class PreloadTestPlanForm(happyforms.Form):
agree = forms.BooleanField(
widget=forms.CheckboxInput,
label=_lazy(
u'Please consider my app as a candidate to be pre-loaded on a '
u'Firefox OS device. I agree to the terms and conditions outlined '
u'above. I understand that this document is not a commitment to '
u'pre-load my app.'
))
test_plan = forms.FileField(
label=_lazy(u'Upload Your Test Plan (.pdf, .xls under 2.5MB)'),
widget=forms.FileInput(attrs={'class': 'button'}))
def clean(self):
"""Validate test_plan file."""
content_types = [
'application/pdf',
'application/vnd.pdf',
'application/ms-excel',
'application/vnd.ms-excel',
'application/vnd.openxmlformats-officedocument.spreadsheetml.'
'sheet'
]
max_upload_size = 2621440 # 2.5MB
if 'test_plan' not in self.files:
raise forms.ValidationError(_('Test plan required.'))
file = self.files['test_plan']
content_type = mimetypes.guess_type(file.name)[0]
if content_type in content_types:
if file._size > max_upload_size:
msg = _('File too large. Keep size under %s. Current size %s.')
msg = msg % (filesizeformat(max_upload_size),
filesizeformat(file._size))
self._errors['test_plan'] = self.error_class([msg])
raise forms.ValidationError(msg)
else:
msg = (_('Invalid file type {0}. Only {1} files are supported.')
.format(content_type, ', '.join(content_types)))
self._errors['test_plan'] = self.error_class([msg])
raise forms.ValidationError(msg)
return self.cleaned_data
class IARCGetAppInfoForm(happyforms.Form):
submission_id = forms.CharField()
security_code = forms.CharField(max_length=10)
def __init__(self, app, *args, **kwargs):
self.app = app
super(IARCGetAppInfoForm, self).__init__(*args, **kwargs)
def clean_submission_id(self):
submission_id = (
# Also allow "subm-1234" since that's what IARC tool displays.
self.cleaned_data['submission_id'].lower().replace('subm-', ''))
if submission_id.isdigit():
return int(submission_id)
raise forms.ValidationError(_('Please enter a valid submission ID.'))
def clean(self):
cleaned_data = super(IARCGetAppInfoForm, self).clean()
app = self.app
iarc_id = cleaned_data.get('submission_id')
if not app or not iarc_id:
return cleaned_data
if (not settings.IARC_ALLOW_CERT_REUSE and
IARCInfo.objects.filter(submission_id=iarc_id)
.exclude(webapp=app).exists()):
del cleaned_data['submission_id']
raise forms.ValidationError(
_('This IARC certificate is already being used for another '
'app. Please create a new IARC Ratings Certificate.'))
return cleaned_data
def save(self, *args, **kwargs):
app = self.app
iarc_id = self.cleaned_data['submission_id']
iarc_code = self.cleaned_data['security_code']
if settings.DEBUG and iarc_id == 0:
# A local developer is being lazy. Skip the hard work.
app.set_iarc_info(iarc_id, iarc_code)
app.set_descriptors([])
app.set_interactives([])
app.set_content_ratings({ratingsbodies.ESRB: ratingsbodies.ESRB_E})
return
# Generate XML.
xml = lib.iarc.utils.render_xml(
'get_app_info.xml',
{'submission_id': iarc_id, 'security_code': iarc_code})
# Process that shizzle.
client = lib.iarc.client.get_iarc_client('services')
resp = client.Get_App_Info(XMLString=xml)
# Handle response.
data = lib.iarc.utils.IARC_XML_Parser().parse_string(resp)
if data.get('rows'):
row = data['rows'][0]
if 'submission_id' not in row:
# [{'ActionStatus': 'No records found. Please try another
# 'criteria.', 'rowId: 1}].
msg = _('Invalid submission ID or security code.')
self._errors['submission_id'] = self.error_class([msg])
log.info('[IARC] Bad GetAppInfo: %s' % row)
raise forms.ValidationError(msg)
# We found a rating, so store the id and code for future use.
app.set_iarc_info(iarc_id, iarc_code)
app.set_descriptors(row.get('descriptors', []))
app.set_interactives(row.get('interactives', []))
app.set_content_ratings(row.get('ratings', {}))
else:
msg = _('Invalid submission ID or security code.')
self._errors['submission_id'] = self.error_class([msg])
log.info('[IARC] Bad GetAppInfo. No rows: %s' % data)
raise forms.ValidationError(msg)
class ContentRatingForm(happyforms.Form):
since = forms.DateTimeField()
class MOTDForm(happyforms.Form):
motd = forms.CharField(widget=widgets.Textarea())
| |
# (c) Copyright IBM Corp. 2021
# (c) Copyright Instana Inc. 2020
from __future__ import absolute_import
import urllib3
import unittest
import requests
import tests.apps.flask_app
from ..helpers import testenv
from instana.singletons import agent, tracer
class TestUrllib3(unittest.TestCase):
def setUp(self):
""" Clear all spans before a test run """
self.http = urllib3.PoolManager()
self.recorder = tracer.recorder
self.recorder.clear_spans()
def tearDown(self):
""" Do nothing for now """
return None
def test_vanilla_requests(self):
r = self.http.request('GET', testenv["wsgi_server"] + '/')
self.assertEqual(r.status, 200)
spans = self.recorder.queued_spans()
self.assertEqual(1, len(spans))
def test_get_request(self):
with tracer.start_active_span('test'):
r = self.http.request('GET', testenv["wsgi_server"] + '/')
spans = self.recorder.queued_spans()
self.assertEqual(3, len(spans))
wsgi_span = spans[0]
urllib3_span = spans[1]
test_span = spans[2]
assert(r)
self.assertEqual(200, r.status)
self.assertIsNone(tracer.active_span)
# Same traceId
self.assertEqual(test_span.t, urllib3_span.t)
self.assertEqual(urllib3_span.t, wsgi_span.t)
# Parent relationships
self.assertEqual(urllib3_span.p, test_span.s)
self.assertEqual(wsgi_span.p, urllib3_span.s)
# Error logging
self.assertIsNone(test_span.ec)
self.assertIsNone(urllib3_span.ec)
self.assertIsNone(wsgi_span.ec)
# wsgi
self.assertEqual("wsgi", wsgi_span.n)
self.assertEqual('127.0.0.1:' + str(testenv["wsgi_port"]), wsgi_span.data["http"]["host"])
self.assertEqual('/', wsgi_span.data["http"]["url"])
self.assertEqual('GET', wsgi_span.data["http"]["method"])
self.assertEqual(200, wsgi_span.data["http"]["status"])
self.assertIsNone(wsgi_span.data["http"]["error"])
self.assertIsNone(wsgi_span.stack)
# urllib3
self.assertEqual("test", test_span.data["sdk"]["name"])
self.assertEqual("urllib3", urllib3_span.n)
self.assertEqual(200, urllib3_span.data["http"]["status"])
self.assertEqual(testenv["wsgi_server"] + "/", urllib3_span.data["http"]["url"])
self.assertEqual("GET", urllib3_span.data["http"]["method"])
self.assertIsNotNone(urllib3_span.stack)
self.assertTrue(type(urllib3_span.stack) is list)
self.assertTrue(len(urllib3_span.stack) > 1)
def test_get_request_with_query(self):
with tracer.start_active_span('test'):
r = self.http.request('GET', testenv["wsgi_server"] + '/?one=1&two=2')
spans = self.recorder.queued_spans()
self.assertEqual(3, len(spans))
wsgi_span = spans[0]
urllib3_span = spans[1]
test_span = spans[2]
assert(r)
self.assertEqual(200, r.status)
self.assertIsNone(tracer.active_span)
# Same traceId
self.assertEqual(test_span.t, urllib3_span.t)
self.assertEqual(urllib3_span.t, wsgi_span.t)
# Parent relationships
self.assertEqual(urllib3_span.p, test_span.s)
self.assertEqual(wsgi_span.p, urllib3_span.s)
# Error logging
self.assertIsNone(test_span.ec)
self.assertIsNone(urllib3_span.ec)
self.assertIsNone(wsgi_span.ec)
# wsgi
self.assertEqual("wsgi", wsgi_span.n)
self.assertEqual('127.0.0.1:' + str(testenv["wsgi_port"]), wsgi_span.data["http"]["host"])
self.assertEqual('/', wsgi_span.data["http"]["url"])
self.assertEqual('GET', wsgi_span.data["http"]["method"])
self.assertEqual(200, wsgi_span.data["http"]["status"])
self.assertIsNone(wsgi_span.data["http"]["error"])
self.assertIsNone(wsgi_span.stack)
# urllib3
self.assertEqual("test", test_span.data["sdk"]["name"])
self.assertEqual("urllib3", urllib3_span.n)
self.assertEqual(200, urllib3_span.data["http"]["status"])
self.assertEqual(testenv["wsgi_server"] + "/", urllib3_span.data["http"]["url"])
self.assertTrue(urllib3_span.data["http"]["params"] in ["one=1&two=2", "two=2&one=1"] )
self.assertEqual("GET", urllib3_span.data["http"]["method"])
self.assertIsNotNone(urllib3_span.stack)
self.assertTrue(type(urllib3_span.stack) is list)
self.assertTrue(len(urllib3_span.stack) > 1)
def test_get_request_with_alt_query(self):
with tracer.start_active_span('test'):
r = self.http.request('GET', testenv["wsgi_server"] + '/', fields={'one': '1', 'two': 2})
spans = self.recorder.queued_spans()
self.assertEqual(3, len(spans))
wsgi_span = spans[0]
urllib3_span = spans[1]
test_span = spans[2]
assert(r)
self.assertEqual(200, r.status)
self.assertIsNone(tracer.active_span)
# Same traceId
self.assertEqual(test_span.t, urllib3_span.t)
self.assertEqual(urllib3_span.t, wsgi_span.t)
# Parent relationships
self.assertEqual(urllib3_span.p, test_span.s)
self.assertEqual(wsgi_span.p, urllib3_span.s)
# Error logging
self.assertIsNone(test_span.ec)
self.assertIsNone(urllib3_span.ec)
self.assertIsNone(wsgi_span.ec)
# wsgi
self.assertEqual("wsgi", wsgi_span.n)
self.assertEqual('127.0.0.1:' + str(testenv["wsgi_port"]), wsgi_span.data["http"]["host"])
self.assertEqual('/', wsgi_span.data["http"]["url"])
self.assertEqual('GET', wsgi_span.data["http"]["method"])
self.assertEqual(200, wsgi_span.data["http"]["status"])
self.assertIsNone(wsgi_span.data["http"]["error"])
self.assertIsNone(wsgi_span.stack)
# urllib3
self.assertEqual("test", test_span.data["sdk"]["name"])
self.assertEqual("urllib3", urllib3_span.n)
self.assertEqual(200, urllib3_span.data["http"]["status"])
self.assertEqual(testenv["wsgi_server"] + "/", urllib3_span.data["http"]["url"])
self.assertTrue(urllib3_span.data["http"]["params"] in ["one=1&two=2", "two=2&one=1"] )
self.assertEqual("GET", urllib3_span.data["http"]["method"])
self.assertIsNotNone(urllib3_span.stack)
self.assertTrue(type(urllib3_span.stack) is list)
self.assertTrue(len(urllib3_span.stack) > 1)
def test_put_request(self):
with tracer.start_active_span('test'):
r = self.http.request('PUT', testenv["wsgi_server"] + '/notfound')
spans = self.recorder.queued_spans()
self.assertEqual(3, len(spans))
wsgi_span = spans[0]
urllib3_span = spans[1]
test_span = spans[2]
assert(r)
self.assertEqual(404, r.status)
self.assertIsNone(tracer.active_span)
# Same traceId
self.assertEqual(test_span.t, urllib3_span.t)
self.assertEqual(urllib3_span.t, wsgi_span.t)
# Parent relationships
self.assertEqual(urllib3_span.p, test_span.s)
self.assertEqual(wsgi_span.p, urllib3_span.s)
# Error logging
self.assertIsNone(test_span.ec)
self.assertIsNone(urllib3_span.ec)
self.assertIsNone(wsgi_span.ec)
# wsgi
self.assertEqual("wsgi", wsgi_span.n)
self.assertEqual('127.0.0.1:' + str(testenv["wsgi_port"]), wsgi_span.data["http"]["host"])
self.assertEqual('/notfound', wsgi_span.data["http"]["url"])
self.assertEqual('PUT', wsgi_span.data["http"]["method"])
self.assertEqual(404, wsgi_span.data["http"]["status"])
self.assertIsNone(wsgi_span.data["http"]["error"])
self.assertIsNone(wsgi_span.stack)
# urllib3
self.assertEqual("test", test_span.data["sdk"]["name"])
self.assertEqual("urllib3", urllib3_span.n)
self.assertEqual(404, urllib3_span.data["http"]["status"])
self.assertEqual(testenv["wsgi_server"] + "/notfound", urllib3_span.data["http"]["url"])
self.assertEqual("PUT", urllib3_span.data["http"]["method"])
self.assertIsNotNone(urllib3_span.stack)
self.assertTrue(type(urllib3_span.stack) is list)
self.assertTrue(len(urllib3_span.stack) > 1)
def test_301_redirect(self):
with tracer.start_active_span('test'):
r = self.http.request('GET', testenv["wsgi_server"] + '/301')
spans = self.recorder.queued_spans()
self.assertEqual(5, len(spans))
wsgi_span2 = spans[0]
urllib3_span2 = spans[1]
wsgi_span1 = spans[2]
urllib3_span1 = spans[3]
test_span = spans[4]
assert(r)
self.assertEqual(200, r.status)
self.assertIsNone(tracer.active_span)
# Same traceId
traceId = test_span.t
self.assertEqual(traceId, urllib3_span1.t)
self.assertEqual(traceId, wsgi_span1.t)
self.assertEqual(traceId, urllib3_span2.t)
self.assertEqual(traceId, wsgi_span2.t)
# Parent relationships
self.assertEqual(urllib3_span1.p, test_span.s)
self.assertEqual(wsgi_span1.p, urllib3_span1.s)
self.assertEqual(urllib3_span2.p, test_span.s)
self.assertEqual(wsgi_span2.p, urllib3_span2.s)
# Error logging
self.assertIsNone(test_span.ec)
self.assertIsNone(urllib3_span1.ec)
self.assertIsNone(wsgi_span1.ec)
self.assertIsNone(urllib3_span2.ec)
self.assertIsNone(wsgi_span2.ec)
# wsgi
self.assertEqual("wsgi", wsgi_span1.n)
self.assertEqual('127.0.0.1:' + str(testenv["wsgi_port"]), wsgi_span1.data["http"]["host"])
self.assertEqual('/', wsgi_span1.data["http"]["url"])
self.assertEqual('GET', wsgi_span1.data["http"]["method"])
self.assertEqual(200, wsgi_span1.data["http"]["status"])
self.assertIsNone(wsgi_span1.data["http"]["error"])
self.assertIsNone(wsgi_span1.stack)
self.assertEqual("wsgi", wsgi_span2.n)
self.assertEqual('127.0.0.1:' + str(testenv["wsgi_port"]), wsgi_span2.data["http"]["host"])
self.assertEqual('/301', wsgi_span2.data["http"]["url"])
self.assertEqual('GET', wsgi_span2.data["http"]["method"])
self.assertEqual(301, wsgi_span2.data["http"]["status"])
self.assertIsNone(wsgi_span2.data["http"]["error"])
self.assertIsNone(wsgi_span2.stack)
# urllib3
self.assertEqual("test", test_span.data["sdk"]["name"])
self.assertEqual("urllib3", urllib3_span1.n)
self.assertEqual(200, urllib3_span1.data["http"]["status"])
self.assertEqual(testenv["wsgi_server"] + "/", urllib3_span1.data["http"]["url"])
self.assertEqual("GET", urllib3_span1.data["http"]["method"])
self.assertIsNotNone(urllib3_span1.stack)
self.assertTrue(type(urllib3_span1.stack) is list)
self.assertTrue(len(urllib3_span1.stack) > 1)
self.assertEqual("urllib3", urllib3_span2.n)
self.assertEqual(301, urllib3_span2.data["http"]["status"])
self.assertEqual(testenv["wsgi_server"] + "/301", urllib3_span2.data["http"]["url"])
self.assertEqual("GET", urllib3_span2.data["http"]["method"])
self.assertIsNotNone(urllib3_span2.stack)
self.assertTrue(type(urllib3_span2.stack) is list)
self.assertTrue(len(urllib3_span2.stack) > 1)
def test_302_redirect(self):
with tracer.start_active_span('test'):
r = self.http.request('GET', testenv["wsgi_server"] + '/302')
spans = self.recorder.queued_spans()
self.assertEqual(5, len(spans))
wsgi_span2 = spans[0]
urllib3_span2 = spans[1]
wsgi_span1 = spans[2]
urllib3_span1 = spans[3]
test_span = spans[4]
assert(r)
self.assertEqual(200, r.status)
self.assertIsNone(tracer.active_span)
# Same traceId
traceId = test_span.t
self.assertEqual(traceId, urllib3_span1.t)
self.assertEqual(traceId, wsgi_span1.t)
self.assertEqual(traceId, urllib3_span2.t)
self.assertEqual(traceId, wsgi_span2.t)
# Parent relationships
self.assertEqual(urllib3_span1.p, test_span.s)
self.assertEqual(wsgi_span1.p, urllib3_span1.s)
self.assertEqual(urllib3_span2.p, test_span.s)
self.assertEqual(wsgi_span2.p, urllib3_span2.s)
# Error logging
self.assertIsNone(test_span.ec)
self.assertIsNone(urllib3_span1.ec)
self.assertIsNone(wsgi_span1.ec)
self.assertIsNone(urllib3_span2.ec)
self.assertIsNone(wsgi_span2.ec)
# wsgi
self.assertEqual("wsgi", wsgi_span1.n)
self.assertEqual('127.0.0.1:' + str(testenv["wsgi_port"]), wsgi_span1.data["http"]["host"])
self.assertEqual('/', wsgi_span1.data["http"]["url"])
self.assertEqual('GET', wsgi_span1.data["http"]["method"])
self.assertEqual(200, wsgi_span1.data["http"]["status"])
self.assertIsNone(wsgi_span1.data["http"]["error"])
self.assertIsNone(wsgi_span1.stack)
self.assertEqual("wsgi", wsgi_span2.n)
self.assertEqual('127.0.0.1:' + str(testenv["wsgi_port"]), wsgi_span2.data["http"]["host"])
self.assertEqual('/302', wsgi_span2.data["http"]["url"])
self.assertEqual('GET', wsgi_span2.data["http"]["method"])
self.assertEqual(302, wsgi_span2.data["http"]["status"])
self.assertIsNone(wsgi_span2.data["http"]["error"])
self.assertIsNone(wsgi_span2.stack)
# urllib3
self.assertEqual("test", test_span.data["sdk"]["name"])
self.assertEqual("urllib3", urllib3_span1.n)
self.assertEqual(200, urllib3_span1.data["http"]["status"])
self.assertEqual(testenv["wsgi_server"] + "/", urllib3_span1.data["http"]["url"])
self.assertEqual("GET", urllib3_span1.data["http"]["method"])
self.assertIsNotNone(urllib3_span1.stack)
self.assertTrue(type(urllib3_span1.stack) is list)
self.assertTrue(len(urllib3_span1.stack) > 1)
self.assertEqual("urllib3", urllib3_span2.n)
self.assertEqual(302, urllib3_span2.data["http"]["status"])
self.assertEqual(testenv["wsgi_server"] + "/302", urllib3_span2.data["http"]["url"])
self.assertEqual("GET", urllib3_span2.data["http"]["method"])
self.assertIsNotNone(urllib3_span2.stack)
self.assertTrue(type(urllib3_span2.stack) is list)
self.assertTrue(len(urllib3_span2.stack) > 1)
def test_5xx_request(self):
with tracer.start_active_span('test'):
r = self.http.request('GET', testenv["wsgi_server"] + '/504')
spans = self.recorder.queued_spans()
self.assertEqual(3, len(spans))
wsgi_span = spans[0]
urllib3_span = spans[1]
test_span = spans[2]
assert(r)
self.assertEqual(504, r.status)
self.assertIsNone(tracer.active_span)
# Same traceId
traceId = test_span.t
self.assertEqual(traceId, urllib3_span.t)
self.assertEqual(traceId, wsgi_span.t)
# Parent relationships
self.assertEqual(urllib3_span.p, test_span.s)
self.assertEqual(wsgi_span.p, urllib3_span.s)
# Error logging
self.assertIsNone(test_span.ec)
self.assertEqual(1, urllib3_span.ec)
self.assertEqual(1, wsgi_span.ec)
# wsgi
self.assertEqual("wsgi", wsgi_span.n)
self.assertEqual('127.0.0.1:' + str(testenv["wsgi_port"]), wsgi_span.data["http"]["host"])
self.assertEqual('/504', wsgi_span.data["http"]["url"])
self.assertEqual('GET', wsgi_span.data["http"]["method"])
self.assertEqual(504, wsgi_span.data["http"]["status"])
self.assertIsNone(wsgi_span.data["http"]["error"])
self.assertIsNone(wsgi_span.stack)
# urllib3
self.assertEqual("test", test_span.data["sdk"]["name"])
self.assertEqual("urllib3", urllib3_span.n)
self.assertEqual(504, urllib3_span.data["http"]["status"])
self.assertEqual(testenv["wsgi_server"] + "/504", urllib3_span.data["http"]["url"])
self.assertEqual("GET", urllib3_span.data["http"]["method"])
self.assertIsNotNone(urllib3_span.stack)
self.assertTrue(type(urllib3_span.stack) is list)
self.assertTrue(len(urllib3_span.stack) > 1)
def test_exception_logging(self):
with tracer.start_active_span('test'):
try:
r = self.http.request('GET', testenv["wsgi_server"] + '/exception')
except Exception:
pass
spans = self.recorder.queued_spans()
self.assertEqual(4, len(spans))
wsgi_span = spans[1]
urllib3_span = spans[2]
test_span = spans[3]
assert(r)
self.assertEqual(500, r.status)
self.assertIsNone(tracer.active_span)
# Same traceId
traceId = test_span.t
self.assertEqual(traceId, urllib3_span.t)
self.assertEqual(traceId, wsgi_span.t)
# Parent relationships
self.assertEqual(urllib3_span.p, test_span.s)
self.assertEqual(wsgi_span.p, urllib3_span.s)
# Error logging
self.assertIsNone(test_span.ec)
self.assertEqual(1, urllib3_span.ec)
self.assertEqual(1, wsgi_span.ec)
# wsgi
self.assertEqual("wsgi", wsgi_span.n)
self.assertEqual('127.0.0.1:' + str(testenv["wsgi_port"]), wsgi_span.data["http"]["host"])
self.assertEqual('/exception', wsgi_span.data["http"]["url"])
self.assertEqual('GET', wsgi_span.data["http"]["method"])
self.assertEqual(500, wsgi_span.data["http"]["status"])
self.assertIsNone(wsgi_span.data["http"]["error"])
self.assertIsNone(wsgi_span.stack)
# urllib3
self.assertEqual("test", test_span.data["sdk"]["name"])
self.assertEqual("urllib3", urllib3_span.n)
self.assertEqual(500, urllib3_span.data["http"]["status"])
self.assertEqual(testenv["wsgi_server"] + "/exception", urllib3_span.data["http"]["url"])
self.assertEqual("GET", urllib3_span.data["http"]["method"])
self.assertIsNotNone(urllib3_span.stack)
self.assertTrue(type(urllib3_span.stack) is list)
self.assertTrue(len(urllib3_span.stack) > 1)
def test_client_error(self):
r = None
with tracer.start_active_span('test'):
try:
r = self.http.request('GET', 'http://doesnotexist.asdf:5000/504',
retries=False,
timeout=urllib3.Timeout(connect=0.5, read=0.5))
except Exception:
pass
spans = self.recorder.queued_spans()
self.assertEqual(2, len(spans))
urllib3_span = spans[0]
test_span = spans[1]
self.assertIsNone(r)
# Parent relationships
self.assertEqual(urllib3_span.p, test_span.s)
# Same traceId
traceId = test_span.t
self.assertEqual(traceId, urllib3_span.t)
self.assertEqual("test", test_span.data["sdk"]["name"])
self.assertEqual("urllib3", urllib3_span.n)
self.assertIsNone(urllib3_span.data["http"]["status"])
self.assertEqual("http://doesnotexist.asdf:5000/504", urllib3_span.data["http"]["url"])
self.assertEqual("GET", urllib3_span.data["http"]["method"])
self.assertIsNotNone(urllib3_span.stack)
self.assertTrue(type(urllib3_span.stack) is list)
self.assertTrue(len(urllib3_span.stack) > 1)
# Error logging
self.assertIsNone(test_span.ec)
self.assertEqual(1, urllib3_span.ec)
def test_requestspkg_get(self):
self.recorder.clear_spans()
with tracer.start_active_span('test'):
r = requests.get(testenv["wsgi_server"] + '/', timeout=2)
spans = self.recorder.queued_spans()
self.assertEqual(3, len(spans))
wsgi_span = spans[0]
urllib3_span = spans[1]
test_span = spans[2]
assert(r)
self.assertEqual(200, r.status_code)
self.assertIsNone(tracer.active_span)
# Same traceId
self.assertEqual(test_span.t, urllib3_span.t)
self.assertEqual(urllib3_span.t, wsgi_span.t)
# Parent relationships
self.assertEqual(urllib3_span.p, test_span.s)
self.assertEqual(wsgi_span.p, urllib3_span.s)
# Error logging
self.assertIsNone(test_span.ec)
self.assertIsNone(urllib3_span.ec)
self.assertIsNone(wsgi_span.ec)
# wsgi
self.assertEqual("wsgi", wsgi_span.n)
self.assertEqual('127.0.0.1:' + str(testenv["wsgi_port"]), wsgi_span.data["http"]["host"])
self.assertEqual('/', wsgi_span.data["http"]["url"])
self.assertEqual('GET', wsgi_span.data["http"]["method"])
self.assertEqual(200, wsgi_span.data["http"]["status"])
self.assertIsNone(wsgi_span.data["http"]["error"])
self.assertIsNone(wsgi_span.stack)
# urllib3
self.assertEqual("test", test_span.data["sdk"]["name"])
self.assertEqual("urllib3", urllib3_span.n)
self.assertEqual(200, urllib3_span.data["http"]["status"])
self.assertEqual(testenv["wsgi_server"] + "/", urllib3_span.data["http"]["url"])
self.assertEqual("GET", urllib3_span.data["http"]["method"])
self.assertIsNotNone(urllib3_span.stack)
self.assertTrue(type(urllib3_span.stack) is list)
self.assertTrue(len(urllib3_span.stack) > 1)
def test_requestspkg_get_with_custom_headers(self):
my_custom_headers = dict()
my_custom_headers['X-PGL-1'] = '1'
with tracer.start_active_span('test'):
r = requests.get(testenv["wsgi_server"] + '/', timeout=2, headers=my_custom_headers)
spans = self.recorder.queued_spans()
self.assertEqual(3, len(spans))
wsgi_span = spans[0]
urllib3_span = spans[1]
test_span = spans[2]
assert(r)
self.assertEqual(200, r.status_code)
self.assertIsNone(tracer.active_span)
# Same traceId
self.assertEqual(test_span.t, urllib3_span.t)
self.assertEqual(urllib3_span.t, wsgi_span.t)
# Parent relationships
self.assertEqual(urllib3_span.p, test_span.s)
self.assertEqual(wsgi_span.p, urllib3_span.s)
# Error logging
self.assertIsNone(test_span.ec)
self.assertIsNone(urllib3_span.ec)
self.assertIsNone(wsgi_span.ec)
# wsgi
self.assertEqual("wsgi", wsgi_span.n)
self.assertEqual('127.0.0.1:' + str(testenv["wsgi_port"]), wsgi_span.data["http"]["host"])
self.assertEqual('/', wsgi_span.data["http"]["url"])
self.assertEqual('GET', wsgi_span.data["http"]["method"])
self.assertEqual(200, wsgi_span.data["http"]["status"])
self.assertIsNone(wsgi_span.data["http"]["error"])
self.assertIsNone(wsgi_span.stack)
# urllib3
self.assertEqual("test", test_span.data["sdk"]["name"])
self.assertEqual("urllib3", urllib3_span.n)
self.assertEqual(200, urllib3_span.data["http"]["status"])
self.assertEqual(testenv["wsgi_server"] + "/", urllib3_span.data["http"]["url"])
self.assertEqual("GET", urllib3_span.data["http"]["method"])
self.assertIsNotNone(urllib3_span.stack)
self.assertTrue(type(urllib3_span.stack) is list)
self.assertTrue(len(urllib3_span.stack) > 1)
def test_requestspkg_put(self):
with tracer.start_active_span('test'):
r = requests.put(testenv["wsgi_server"] + '/notfound')
spans = self.recorder.queued_spans()
self.assertEqual(3, len(spans))
wsgi_span = spans[0]
urllib3_span = spans[1]
test_span = spans[2]
self.assertEqual(404, r.status_code)
self.assertIsNone(tracer.active_span)
# Same traceId
self.assertEqual(test_span.t, urllib3_span.t)
self.assertEqual(urllib3_span.t, wsgi_span.t)
# Parent relationships
self.assertEqual(urllib3_span.p, test_span.s)
self.assertEqual(wsgi_span.p, urllib3_span.s)
# Error logging
self.assertIsNone(test_span.ec)
self.assertIsNone(urllib3_span.ec)
self.assertIsNone(wsgi_span.ec)
# wsgi
self.assertEqual("wsgi", wsgi_span.n)
self.assertEqual('127.0.0.1:' + str(testenv["wsgi_port"]), wsgi_span.data["http"]["host"])
self.assertEqual('/notfound', wsgi_span.data["http"]["url"])
self.assertEqual('PUT', wsgi_span.data["http"]["method"])
self.assertEqual(404, wsgi_span.data["http"]["status"])
self.assertIsNone(wsgi_span.data["http"]["error"])
self.assertIsNone(wsgi_span.stack)
# urllib3
self.assertEqual("test", test_span.data["sdk"]["name"])
self.assertEqual("urllib3", urllib3_span.n)
self.assertEqual(404, urllib3_span.data["http"]["status"])
self.assertEqual(testenv["wsgi_server"] + "/notfound", urllib3_span.data["http"]["url"])
self.assertEqual("PUT", urllib3_span.data["http"]["method"])
self.assertIsNotNone(urllib3_span.stack)
self.assertTrue(type(urllib3_span.stack) is list)
self.assertTrue(len(urllib3_span.stack) > 1)
def test_response_header_capture(self):
original_extra_http_headers = agent.options.extra_http_headers
agent.options.extra_http_headers = ['X-Capture-This']
with tracer.start_active_span('test'):
r = self.http.request('GET', testenv["wsgi_server"] + '/response_headers')
spans = self.recorder.queued_spans()
self.assertEqual(3, len(spans))
wsgi_span = spans[0]
urllib3_span = spans[1]
test_span = spans[2]
assert(r)
self.assertEqual(200, r.status)
self.assertIsNone(tracer.active_span)
# Same traceId
self.assertEqual(test_span.t, urllib3_span.t)
self.assertEqual(urllib3_span.t, wsgi_span.t)
# Parent relationships
self.assertEqual(urllib3_span.p, test_span.s)
self.assertEqual(wsgi_span.p, urllib3_span.s)
# Error logging
self.assertIsNone(test_span.ec)
self.assertIsNone(urllib3_span.ec)
self.assertIsNone(wsgi_span.ec)
# wsgi
self.assertEqual("wsgi", wsgi_span.n)
self.assertEqual('127.0.0.1:' + str(testenv["wsgi_port"]), wsgi_span.data["http"]["host"])
self.assertEqual('/response_headers', wsgi_span.data["http"]["url"])
self.assertEqual('GET', wsgi_span.data["http"]["method"])
self.assertEqual(200, wsgi_span.data["http"]["status"])
self.assertIsNone(wsgi_span.data["http"]["error"])
self.assertIsNone(wsgi_span.stack)
# urllib3
self.assertEqual("test", test_span.data["sdk"]["name"])
self.assertEqual("urllib3", urllib3_span.n)
self.assertEqual(200, urllib3_span.data["http"]["status"])
self.assertEqual(testenv["wsgi_server"] + "/response_headers", urllib3_span.data["http"]["url"])
self.assertEqual("GET", urllib3_span.data["http"]["method"])
self.assertIsNotNone(urllib3_span.stack)
self.assertTrue(type(urllib3_span.stack) is list)
self.assertTrue(len(urllib3_span.stack) > 1)
assert "X-Capture-This" in urllib3_span.data["http"]["header"]
self.assertEqual("Ok", urllib3_span.data["http"]["header"]["X-Capture-This"])
agent.options.extra_http_headers = original_extra_http_headers
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.