gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
from numpy.testing import assert_, assert_allclose, assert_equal
from pytest import raises as assert_raises
import numpy as np
from scipy.optimize._lsq.common import (
step_size_to_bound, find_active_constraints, make_strictly_feasible,
CL_scaling_vector, intersect_trust_region, build_quadratic_1d,
minimize_quadratic_1d, evaluate_quadratic, reflective_transformation,
left_multiplied_operator, right_multiplied_operator)
class TestBounds(object):
def test_step_size_to_bounds(self):
lb = np.array([-1.0, 2.5, 10.0])
ub = np.array([1.0, 5.0, 100.0])
x = np.array([0.0, 2.5, 12.0])
s = np.array([0.1, 0.0, 0.0])
step, hits = step_size_to_bound(x, s, lb, ub)
assert_equal(step, 10)
assert_equal(hits, [1, 0, 0])
s = np.array([0.01, 0.05, -1.0])
step, hits = step_size_to_bound(x, s, lb, ub)
assert_equal(step, 2)
assert_equal(hits, [0, 0, -1])
s = np.array([10.0, -0.0001, 100.0])
step, hits = step_size_to_bound(x, s, lb, ub)
assert_equal(step, np.array(-0))
assert_equal(hits, [0, -1, 0])
s = np.array([1.0, 0.5, -2.0])
step, hits = step_size_to_bound(x, s, lb, ub)
assert_equal(step, 1.0)
assert_equal(hits, [1, 0, -1])
s = np.zeros(3)
step, hits = step_size_to_bound(x, s, lb, ub)
assert_equal(step, np.inf)
assert_equal(hits, [0, 0, 0])
def test_find_active_constraints(self):
lb = np.array([0.0, -10.0, 1.0])
ub = np.array([1.0, 0.0, 100.0])
x = np.array([0.5, -5.0, 2.0])
active = find_active_constraints(x, lb, ub)
assert_equal(active, [0, 0, 0])
x = np.array([0.0, 0.0, 10.0])
active = find_active_constraints(x, lb, ub)
assert_equal(active, [-1, 1, 0])
active = find_active_constraints(x, lb, ub, rtol=0)
assert_equal(active, [-1, 1, 0])
x = np.array([1e-9, -1e-8, 100 - 1e-9])
active = find_active_constraints(x, lb, ub)
assert_equal(active, [0, 0, 1])
active = find_active_constraints(x, lb, ub, rtol=1.5e-9)
assert_equal(active, [-1, 0, 1])
lb = np.array([1.0, -np.inf, -np.inf])
ub = np.array([np.inf, 10.0, np.inf])
x = np.ones(3)
active = find_active_constraints(x, lb, ub)
assert_equal(active, [-1, 0, 0])
# Handles out-of-bound cases.
x = np.array([0.0, 11.0, 0.0])
active = find_active_constraints(x, lb, ub)
assert_equal(active, [-1, 1, 0])
active = find_active_constraints(x, lb, ub, rtol=0)
assert_equal(active, [-1, 1, 0])
def test_make_strictly_feasible(self):
lb = np.array([-0.5, -0.8, 2.0])
ub = np.array([0.8, 1.0, 3.0])
x = np.array([-0.5, 0.0, 2 + 1e-10])
x_new = make_strictly_feasible(x, lb, ub, rstep=0)
assert_(x_new[0] > -0.5)
assert_equal(x_new[1:], x[1:])
x_new = make_strictly_feasible(x, lb, ub, rstep=1e-4)
assert_equal(x_new, [-0.5 + 1e-4, 0.0, 2 * (1 + 1e-4)])
x = np.array([-0.5, -1, 3.1])
x_new = make_strictly_feasible(x, lb, ub)
assert_(np.all((x_new >= lb) & (x_new <= ub)))
x_new = make_strictly_feasible(x, lb, ub, rstep=0)
assert_(np.all((x_new >= lb) & (x_new <= ub)))
lb = np.array([-1, 100.0])
ub = np.array([1, 100.0 + 1e-10])
x = np.array([0, 100.0])
x_new = make_strictly_feasible(x, lb, ub, rstep=1e-8)
assert_equal(x_new, [0, 100.0 + 0.5e-10])
def test_scaling_vector(self):
lb = np.array([-np.inf, -5.0, 1.0, -np.inf])
ub = np.array([1.0, np.inf, 10.0, np.inf])
x = np.array([0.5, 2.0, 5.0, 0.0])
g = np.array([1.0, 0.1, -10.0, 0.0])
v, dv = CL_scaling_vector(x, g, lb, ub)
assert_equal(v, [1.0, 7.0, 5.0, 1.0])
assert_equal(dv, [0.0, 1.0, -1.0, 0.0])
class TestQuadraticFunction(object):
def setup_method(self):
self.J = np.array([
[0.1, 0.2],
[-1.0, 1.0],
[0.5, 0.2]])
self.g = np.array([0.8, -2.0])
self.diag = np.array([1.0, 2.0])
def test_build_quadratic_1d(self):
s = np.zeros(2)
a, b = build_quadratic_1d(self.J, self.g, s)
assert_equal(a, 0)
assert_equal(b, 0)
a, b = build_quadratic_1d(self.J, self.g, s, diag=self.diag)
assert_equal(a, 0)
assert_equal(b, 0)
s = np.array([1.0, -1.0])
a, b = build_quadratic_1d(self.J, self.g, s)
assert_equal(a, 2.05)
assert_equal(b, 2.8)
a, b = build_quadratic_1d(self.J, self.g, s, diag=self.diag)
assert_equal(a, 3.55)
assert_equal(b, 2.8)
s0 = np.array([0.5, 0.5])
a, b, c = build_quadratic_1d(self.J, self.g, s, diag=self.diag, s0=s0)
assert_equal(a, 3.55)
assert_allclose(b, 2.39)
assert_allclose(c, -0.1525)
def test_minimize_quadratic_1d(self):
a = 5
b = -1
t, y = minimize_quadratic_1d(a, b, 1, 2)
assert_equal(t, 1)
assert_allclose(y, a * t**2 + b * t, rtol=1e-15)
t, y = minimize_quadratic_1d(a, b, -2, -1)
assert_equal(t, -1)
assert_allclose(y, a * t**2 + b * t, rtol=1e-15)
t, y = minimize_quadratic_1d(a, b, -1, 1)
assert_equal(t, 0.1)
assert_allclose(y, a * t**2 + b * t, rtol=1e-15)
c = 10
t, y = minimize_quadratic_1d(a, b, -1, 1, c=c)
assert_equal(t, 0.1)
assert_allclose(y, a * t**2 + b * t + c, rtol=1e-15)
t, y = minimize_quadratic_1d(a, b, -np.inf, np.inf, c=c)
assert_equal(t, 0.1)
assert_allclose(y, a * t ** 2 + b * t + c, rtol=1e-15)
t, y = minimize_quadratic_1d(a, b, 0, np.inf, c=c)
assert_equal(t, 0.1)
assert_allclose(y, a * t ** 2 + b * t + c, rtol=1e-15)
t, y = minimize_quadratic_1d(a, b, -np.inf, 0, c=c)
assert_equal(t, 0)
assert_allclose(y, a * t ** 2 + b * t + c, rtol=1e-15)
a = -1
b = 0.2
t, y = minimize_quadratic_1d(a, b, -np.inf, np.inf)
assert_equal(y, -np.inf)
t, y = minimize_quadratic_1d(a, b, 0, np.inf)
assert_equal(t, np.inf)
assert_equal(y, -np.inf)
t, y = minimize_quadratic_1d(a, b, -np.inf, 0)
assert_equal(t, -np.inf)
assert_equal(y, -np.inf)
def test_evaluate_quadratic(self):
s = np.array([1.0, -1.0])
value = evaluate_quadratic(self.J, self.g, s)
assert_equal(value, 4.85)
value = evaluate_quadratic(self.J, self.g, s, diag=self.diag)
assert_equal(value, 6.35)
s = np.array([[1.0, -1.0],
[1.0, 1.0],
[0.0, 0.0]])
values = evaluate_quadratic(self.J, self.g, s)
assert_allclose(values, [4.85, -0.91, 0.0])
values = evaluate_quadratic(self.J, self.g, s, diag=self.diag)
assert_allclose(values, [6.35, 0.59, 0.0])
class TestTrustRegion(object):
def test_intersect(self):
Delta = 1.0
x = np.zeros(3)
s = np.array([1.0, 0.0, 0.0])
t_neg, t_pos = intersect_trust_region(x, s, Delta)
assert_equal(t_neg, -1)
assert_equal(t_pos, 1)
s = np.array([-1.0, 1.0, -1.0])
t_neg, t_pos = intersect_trust_region(x, s, Delta)
assert_allclose(t_neg, -3**-0.5)
assert_allclose(t_pos, 3**-0.5)
x = np.array([0.5, -0.5, 0])
s = np.array([0, 0, 1.0])
t_neg, t_pos = intersect_trust_region(x, s, Delta)
assert_allclose(t_neg, -2**-0.5)
assert_allclose(t_pos, 2**-0.5)
x = np.ones(3)
assert_raises(ValueError, intersect_trust_region, x, s, Delta)
x = np.zeros(3)
s = np.zeros(3)
assert_raises(ValueError, intersect_trust_region, x, s, Delta)
def test_reflective_transformation():
lb = np.array([-1, -2], dtype=float)
ub = np.array([5, 3], dtype=float)
y = np.array([0, 0])
x, g = reflective_transformation(y, lb, ub)
assert_equal(x, y)
assert_equal(g, np.ones(2))
y = np.array([-4, 4], dtype=float)
x, g = reflective_transformation(y, lb, np.array([np.inf, np.inf]))
assert_equal(x, [2, 4])
assert_equal(g, [-1, 1])
x, g = reflective_transformation(y, np.array([-np.inf, -np.inf]), ub)
assert_equal(x, [-4, 2])
assert_equal(g, [1, -1])
x, g = reflective_transformation(y, lb, ub)
assert_equal(x, [2, 2])
assert_equal(g, [-1, -1])
lb = np.array([-np.inf, -2])
ub = np.array([5, np.inf])
y = np.array([10, 10], dtype=float)
x, g = reflective_transformation(y, lb, ub)
assert_equal(x, [0, 10])
assert_equal(g, [-1, 1])
def test_linear_operators():
A = np.arange(6).reshape((3, 2))
d_left = np.array([-1, 2, 5])
DA = np.diag(d_left).dot(A)
J_left = left_multiplied_operator(A, d_left)
d_right = np.array([5, 10])
AD = A.dot(np.diag(d_right))
J_right = right_multiplied_operator(A, d_right)
x = np.array([-2, 3])
X = -2 * np.arange(2, 8).reshape((2, 3))
xt = np.array([0, -2, 15])
assert_allclose(DA.dot(x), J_left.matvec(x))
assert_allclose(DA.dot(X), J_left.matmat(X))
assert_allclose(DA.T.dot(xt), J_left.rmatvec(xt))
assert_allclose(AD.dot(x), J_right.matvec(x))
assert_allclose(AD.dot(X), J_right.matmat(X))
assert_allclose(AD.T.dot(xt), J_right.rmatvec(xt))
|
|
#!/usr/bin/env python
"""A TestCase that initializes the library with standard API methods."""
import unittest
import ee
class ApiTestCase(unittest.TestCase):
def setUp(self):
self.InitializeApi()
def InitializeApi(self):
"""Initializes the library with standard API methods.
This is normally invoked during setUp(), but subclasses may invoke
it manually instead if they prefer.
"""
self.last_download_call = None
self.last_thumb_call = None
self.last_table_call = None
def MockSend(path, params, unused_method=None, unused_raw=None):
if path == '/algorithms':
return BUILTIN_FUNCTIONS
elif path == '/value':
return {'value': 'fakeValue'}
elif path == '/mapid':
return {'mapid': 'fakeMapId'}
elif path == '/download':
# Hang on to the call arguments.
self.last_download_call = {'url': path, 'data': params}
return {'docid': '1', 'token': '2'}
elif path == '/thumb':
# Hang on to the call arguments.
self.last_thumb_call = {'url': path, 'data': params}
return {'thumbid': '3', 'token': '4'}
elif path == '/table':
# Hang on to the call arguments.
self.last_table_call = {'url': path, 'data': params}
return {'docid': '5', 'token': '6'}
else:
raise Exception('Unexpected API call to %s with %s' % (path, params))
ee.data.send_ = MockSend
ee.Reset()
ee.Initialize(None, '')
BUILTIN_FUNCTIONS = {
'Image.constant': {
'type': 'Algorithm',
'args': [
{
'description': '',
'name': 'value',
'type': 'Object'
}
],
'description': '',
'returns': 'Image'
},
'Image.load': {
'type': 'Algorithm',
'args': [
{
'description': '',
'name': 'id',
'type': 'String'
},
{
'default': None,
'description': '',
'optional': True,
'name': 'version',
'type': 'Long'
}
],
'description': '',
'returns': 'Image'
},
'Image.addBands': {
'type': 'Algorithm',
'args': [
{
'description': '',
'name': 'dstImg',
'type': 'Image'
},
{
'description': '',
'name': 'srcImg',
'type': 'Image'
},
{
'default': None,
'description': '',
'optional': True,
'name': 'names',
'type': 'List<String>'
},
{
'default': False,
'description': '',
'optional': True,
'name': 'overwrite',
'type': 'boolean'
}
],
'description': '',
'returns': 'Image'
},
'Image.clip': {
'type': 'Algorithm',
'args': [
{
'description': '',
'name': 'input',
'type': 'Image'
},
{
'default': None,
'description': '',
'optional': True,
'name': 'geometry',
'type': 'Object'
}
],
'description': '',
'returns': 'Image'
},
'Image.select': {
'type': 'Algorithm',
'args': [
{
'description': '',
'name': 'input',
'type': 'Image'
},
{
'description': '',
'name': 'bandSelectors',
'type': 'List<Object>'
},
{
'default': None,
'description': '',
'optional': True,
'name': 'newNames',
'type': 'List<String>'
}
],
'description': '',
'returns': 'Image'
},
'Image.parseExpression': {
'type': 'Algorithm',
'args': [
{
'description': '',
'name': 'expression',
'type': 'String'
},
{
'default': 'image',
'description': '',
'optional': True,
'name': 'argName',
'type': 'String'
},
{
'default': None,
'description': '',
'optional': True,
'name': 'vars',
'type': 'List<String>'
}
],
'description': '',
'returns': 'Algorithm'
},
'Feature': {
'type': 'Algorithm',
'args': [
{
'default': None,
'description': '',
'optional': True,
'name': 'geometry',
'type': 'Geometry'
},
{
'default': {},
'description': '',
'optional': True,
'name': 'metadata',
'type': 'Dictionary<Object>'
}
],
'description': '',
'returns': 'Feature'
},
'Feature.get': {
'type': 'Algorithm',
'returns': '<any>',
'hidden': False,
'args': [
{
'type': 'Element',
'description': '',
'name': 'object'
},
{
'type': 'String',
'description': '',
'name': 'property'
}
],
'description': ''
},
'Collection': {
'type': 'Algorithm',
'args': [
{
'description': '',
'name': 'features',
'type': 'List<Feature>'
}
],
'description': '',
'returns': 'FeatureCollection'
},
'Collection.loadTable': {
'type': 'Algorithm',
'args': [
{
'default': None,
'description': '',
'optional': True,
'name': 'tableId',
'type': 'Object'
},
{
'default': None,
'description': '',
'optional': True,
'name': 'geometryColumn',
'type': 'String'
},
{
'default': None,
'description': '',
'optional': True,
'name': 'version',
'type': 'Long'
}
],
'description': '',
'returns': 'FeatureCollection'
},
'Collection.filter': {
'type': 'Algorithm',
'args': [
{
'description': '',
'name': 'collection',
'type': 'FeatureCollection'
},
{
'description': '',
'name': 'filter',
'type': 'Filter'
}
],
'description': '',
'returns': 'FeatureCollection'
},
'Collection.limit': {
'type': 'Algorithm',
'args': [
{
'description': '',
'name': 'collection',
'type': 'FeatureCollection'
},
{
'default': -1,
'description': '',
'optional': True,
'name': 'limit',
'type': 'int'
},
{
'default': None,
'description': '',
'optional': True,
'name': 'key',
'type': 'String'
},
{
'default': True,
'description': '',
'optional': True,
'name': 'ascending',
'type': 'boolean'
}
],
'description': '',
'returns': 'FeatureCollection'
},
'Collection.map': {
'type': 'Algorithm',
'args': [
{
'description': '',
'name': 'collection',
'type': 'FeatureCollection'
},
{
'description': '',
'name': 'baseAlgorithm',
'type': 'Algorithm'
},
{
'default': None,
'description': '',
'optional': True,
'name': 'dynamicArgs',
'type': 'Dictionary<String>'
},
{
'default': {},
'description': '',
'optional': True,
'name': 'constantArgs',
'type': 'Dictionary<Object>'
},
{
'default': None,
'description': '',
'optional': True,
'name': 'destination',
'type': 'String'
}
],
'description': '',
'returns': 'FeatureCollection'
},
'Collection.iterate': {
'type': 'Algorithm',
'args': [
{
'description': '',
'name': 'collection',
'type': 'FeatureCollection'
},
{
'description': '',
'name': 'function',
'type': 'Algorithm'
},
{
'default': None,
'description': '',
'optional': True,
'name': 'first',
'type': 'Object'
}
],
'description': '',
'returns': 'Object',
},
'ImageCollection.load': {
'type': 'Algorithm',
'args': [
{
'description': '',
'name': 'id',
'type': 'String'
},
{
'default': None,
'description': '',
'optional': True,
'name': 'version',
'type': 'Long'
}
],
'description': '',
'returns': 'ImageCollection'
},
'ImageCollection.fromImages': {
'type': 'Algorithm',
'args': [
{
'description': '',
'name': 'images',
'type': 'List<Image>'
}
],
'description': '',
'returns': 'ImageCollection'
},
'ImageCollection.mosaic': {
'type': 'Algorithm',
'args': [
{
'description': '',
'name': 'collection',
'type': 'ImageCollection'
}
],
'description': '',
'returns': 'Image'
},
'Collection.geometry': {
'type': 'Algorithm',
'args': [
{
'description': '',
'name': 'collection',
'type': 'FeatureCollection'
},
{
'default': {
'type': 'ErrorMargin',
'unit': 'meters',
'value': 0
},
'description': '',
'optional': True,
'name': 'maxError',
'type': 'ErrorMargin'
}
],
'description': '',
'returns': 'Geometry'
},
'Collection.draw': {
'type': 'Algorithm',
'args': [
{
'description': '',
'name': 'collection',
'type': 'FeatureCollection'
},
{
'description': '',
'name': 'color',
'type': 'String'
},
{
'default': 3,
'description': '',
'optional': True,
'name': 'pointRadius',
'type': 'int'
},
{
'default': 2,
'description': '',
'optional': True,
'name': 'strokeWidth',
'type': 'int'
}
],
'description': '',
'returns': 'Image'
},
'DateRange': {
'type': 'Algorithm',
'args': [
{
'description': '',
'name': 'start',
'type': 'Date'
},
{
'default': None,
'description': '',
'optional': True,
'name': 'end',
'type': 'Date'
}
],
'description': '',
'returns': 'DateRange'
},
'Date': {
'returns': 'Date',
'hidden': False,
'args': [
{
'type': 'Object',
'description': '',
'name': 'value'
},
{
'type': 'String',
'default': None,
'description': '',
'optional': True,
'name': 'timeZone'
}
],
'type': 'Algorithm',
'description': ''
},
'ErrorMargin': {
'type': 'Algorithm',
'args': [
{
'description': '',
'name': 'value',
'type': 'Double'
},
{
'default': 'meters',
'description': '',
'optional': True,
'name': 'unit',
'type': 'String'
}
],
'description': '',
'returns': 'ErrorMargin'
},
'Filter.intersects': {
'type': 'Algorithm',
'args': [
{
'default': None,
'description': '',
'optional': True,
'name': 'leftField',
'type': 'String'
},
{
'default': None,
'description': '',
'optional': True,
'name': 'rightValue',
'type': 'Object'
},
{
'default': None,
'description': '',
'optional': True,
'name': 'rightField',
'type': 'String'
},
{
'default': None,
'description': '',
'optional': True,
'name': 'leftValue',
'type': 'Object'
},
{
'default': {
'type': 'ErrorMargin',
'unit': 'meters',
'value': 0.1
},
'description': '',
'optional': True,
'name': 'maxError',
'type': 'ErrorMargin'
}
],
'description': '',
'returns': 'Filter'
},
'Filter.dateRangeContains': {
'type': 'Algorithm',
'args': [
{
'default': None,
'description': '',
'optional': True,
'name': 'leftField',
'type': 'String'
},
{
'default': None,
'description': '',
'optional': True,
'name': 'rightValue',
'type': 'Object'
},
{
'default': None,
'description': '',
'optional': True,
'name': 'rightField',
'type': 'String'
},
{
'default': None,
'description': '',
'optional': True,
'name': 'leftValue',
'type': 'Object'
}
],
'description': '',
'returns': 'Filter'
},
'Filter.or': {
'type': 'Algorithm',
'args': [
{
'description': '',
'name': 'filters',
'type': 'List<Filter>'
}
],
'description': '',
'returns': 'Filter'
},
'Filter.and': {
'type': 'Algorithm',
'args': [
{
'description': '',
'name': 'filters',
'type': 'List<Filter>'
}
],
'description': '',
'returns': 'Filter'
},
'Filter.not': {
'type': 'Algorithm',
'args': [
{
'description': '',
'name': 'filter',
'type': 'Filter'
}
],
'description': '',
'returns': 'Filter'
},
'Filter.equals': {
'type': 'Algorithm',
'args': [
{
'default': None,
'description': '',
'optional': True,
'name': 'leftField',
'type': 'String'
},
{
'default': None,
'description': '',
'optional': True,
'name': 'rightValue',
'type': 'Object'
},
{
'default': None,
'description': '',
'optional': True,
'name': 'rightField',
'type': 'String'
},
{
'default': None,
'description': '',
'optional': True,
'name': 'leftValue',
'type': 'Object'
}
],
'description': '',
'returns': 'Filter'
},
'Filter.lessThan': {
'type': 'Algorithm',
'args': [
{
'default': None,
'description': '',
'optional': True,
'name': 'leftField',
'type': 'String'
},
{
'default': None,
'description': '',
'optional': True,
'name': 'rightValue',
'type': 'Object'
},
{
'default': None,
'description': '',
'optional': True,
'name': 'rightField',
'type': 'String'
},
{
'default': None,
'description': '',
'optional': True,
'name': 'leftValue',
'type': 'Object'
}
],
'description': '',
'returns': 'Filter'
},
'Filter.greaterThan': {
'type': 'Algorithm',
'args': [
{
'default': None,
'description': '',
'optional': True,
'name': 'leftField',
'type': 'String'
},
{
'default': None,
'description': '',
'optional': True,
'name': 'rightValue',
'type': 'Object'
},
{
'default': None,
'description': '',
'optional': True,
'name': 'rightField',
'type': 'String'
},
{
'default': None,
'description': '',
'optional': True,
'name': 'leftValue',
'type': 'Object'
}
],
'description': '',
'returns': 'Filter'
},
'Filter.stringContains': {
'type': 'Algorithm',
'args': [
{
'default': None,
'description': '',
'optional': True,
'name': 'leftField',
'type': 'String'
},
{
'default': None,
'description': '',
'optional': True,
'name': 'rightValue',
'type': 'Object'
},
{
'default': None,
'description': '',
'optional': True,
'name': 'rightField',
'type': 'String'
},
{
'default': None,
'description': '',
'optional': True,
'name': 'leftValue',
'type': 'Object'
}
],
'description': '',
'returns': 'Filter'
},
'Filter.stringStartsWith': {
'type': 'Algorithm',
'args': [
{
'default': None,
'description': '',
'optional': True,
'name': 'leftField',
'type': 'String'
},
{
'default': None,
'description': '',
'optional': True,
'name': 'rightValue',
'type': 'Object'
},
{
'default': None,
'description': '',
'optional': True,
'name': 'rightField',
'type': 'String'
},
{
'default': None,
'description': '',
'optional': True,
'name': 'leftValue',
'type': 'Object'
}
],
'description': '',
'returns': 'Filter'
},
'Filter.stringEndsWith': {
'type': 'Algorithm',
'args': [
{
'default': None,
'description': '',
'optional': True,
'name': 'leftField',
'type': 'String'
},
{
'default': None,
'description': '',
'optional': True,
'name': 'rightValue',
'type': 'Object'
},
{
'default': None,
'description': '',
'optional': True,
'name': 'rightField',
'type': 'String'
},
{
'default': None,
'description': '',
'optional': True,
'name': 'leftValue',
'type': 'Object'
}
],
'description': '',
'returns': 'Filter'
},
'Filter.listContains': {
'type': 'Algorithm',
'args': [
{
'default': None,
'description': '',
'optional': True,
'name': 'leftField',
'type': 'String'
},
{
'default': None,
'description': '',
'optional': True,
'name': 'rightValue',
'type': 'Object'
},
{
'default': None,
'description': '',
'optional': True,
'name': 'rightField',
'type': 'String'
},
{
'default': None,
'description': '',
'optional': True,
'name': 'leftValue',
'type': 'Object'
}
],
'description': '',
'returns': 'Filter'
},
'Image.mask': {
'type': 'Algorithm',
'args': [
{
'name': 'image',
'type': 'Image',
'description': ''
},
{
'name': 'mask',
'type': 'Image',
'description': '',
'optional': True,
'default': None
}
],
'description': '',
'returns': 'Image'
},
# These two functions (Dictionary.get and Image.reduceRegion) are here
# to force the creation of the Dictionary class.
'Dictionary.get': {
'returns': 'Object',
'args': [
{
'type': 'Dictionary<Object>',
'description': '',
'name': 'map'
},
{
'type': 'String',
'description': '',
'name': 'property'
}
],
'type': 'Algorithm',
'description': '',
},
'Image.reduceRegion': {
'returns': 'Dictionary<Object>',
'hidden': False,
'args': [
{
'type': 'Image',
'description': '',
'name': 'image'
},
{
'type': 'ReducerOld',
'description': '',
'name': 'reducer'
},
{
'default': None,
'type': 'Geometry',
'optional': True,
'description': '',
'name': 'geometry'
},
{
'default': None,
'type': 'Double',
'optional': True,
'description': '',
'name': 'scale'
},
{
'default': 'EPSG:4326',
'type': 'String',
'optional': True,
'description': '',
'name': 'crs'
},
{
'default': None,
'type': 'double[]',
'optional': True,
'description': '',
'name': 'crsTransform'
},
{
'default': False,
'type': 'boolean',
'optional': True,
'description': '',
'name': 'bestEffort'
}
],
'type': 'Algorithm',
'description': ''
},
# Algorithms for testing ee.String.
'String': {
'returns': 'String',
'hidden': False,
'args': [
{
'type': 'Object',
'description': '',
'name': 'input'
}
],
'type': 'Algorithm',
'description': ''
},
'String.cat': {
'returns': 'String',
'hidden': False,
'args': [
{
'type': 'String',
'description': '',
'name': 'string1'
},
{
'type': 'String',
'description': '',
'name': 'string2'
}
],
'type': 'Algorithm',
'description': ''
},
# An algorithm for testing computed Geometries.
'Geometry.bounds': {
'returns': 'Geometry',
'hidden': False,
'args': [
{
'type': 'Geometry',
'description': '',
'name': 'geometry'
},
{
'default': None,
'type': 'ErrorMargin',
'optional': True,
'description': '',
'name': 'maxError'
},
{
'default': None,
'type': 'Projection',
'optional': True,
'description': '',
'name': 'proj'
}
],
'type': 'Algorithm',
'description': ''
},
'Geometry.centroid': {
'returns': 'Geometry',
'args': [
{
'description': '',
'name': 'geometry',
'type': 'Geometry'
},
{
'default': None,
'description': '',
'optional': True,
'name': 'maxError',
'type': 'ErrorMargin'
},
{
'default': None,
'description': '',
'optional': True,
'name': 'proj',
'type': 'Projection'
}
],
'description': '',
'type': 'Algorithm',
},
# Element property setting, used by the client-side override.
'Element.set': {
'returns': 'Element',
'hidden': False,
'args': [
{
'type': 'Element',
'description': '',
'name': 'object'
},
{
'type': 'String',
'description': '',
'name': 'key'
},
{
'type': 'Object',
'description': '',
'name': 'value'
}
],
'type': 'Algorithm',
'description': ''
},
'Element.setMulti': {
'returns': 'Element',
'hidden': False,
'args': [
{
'type': 'Element',
'description': '',
'name': 'object'
},
{
'type': 'Dictionary<Object>',
'description': '',
'name': 'properties'
}
],
'type': 'Algorithm',
'description': ''
},
'Image.geometry': {
'returns': 'Geometry',
'args': [
{
'description': '',
'name': 'feature',
'type': 'Element'
},
{
'default': None,
'description': '',
'optional': True,
'name': 'maxError',
'type': 'ErrorMargin'
},
{
'default': None,
'description': '',
'optional': True,
'name': 'proj',
'type': 'Projection'
},
{
'default': None,
'description': '',
'optional': True,
'name': 'geodesics',
'type': 'Boolean'
}
],
'type': 'Algorithm',
'description': '',
},
'Number.add': {
'returns': 'Number',
'hidden': False,
'args': [
{
'type': 'Number',
'description': '',
'name': 'left'
},
{
'type': 'Number',
'description': '',
'name': 'right'
}
],
'type': 'Algorithm',
'description': ''
},
'Array': {
'returns': 'Array',
'hidden': False,
'args': [
{
'name': 'values',
'type': 'Object'
},
{
'name': 'pixelType',
'type': 'PixelType',
'optional': True,
'default': None
}
],
'type': 'Algorithm',
'description': ''
},
'List.slice': {
'returns': 'List<Object>',
'args': [
{
'type': 'List<Object>',
'name': 'list'
},
{
'type': 'Integer',
'name': 'start'
},
{
'default': None,
'type': 'Integer',
'optional': True,
'name': 'end'
}
],
'type': 'Algorithm',
'description': '',
},
'List.map': {
'type': 'Algorithm',
'args': [
{
'description': '',
'name': 'list',
'type': 'List'
},
{
'description': '',
'name': 'baseAlgorithm',
'type': 'Algorithm'
},
],
'description': '',
'returns': 'List'
},
'Projection': {
'returns': 'Projection',
'type': 'Algorithm',
'description': '',
'args': [
{
'name': 'crs',
'type': 'Object',
'description': ''
},
{
'name': 'transform',
'default': None,
'type': 'List<Number>',
'optional': True,
'description': ''
},
{
'name': 'transformWkt',
'default': None,
'type': 'String',
'optional': True,
'description': '',
}
]
},
'Image.cast': {
'type': 'Algorithm',
'args': [
{
'description': '',
'name': 'image',
'type': 'Image'
},
{
'description': '',
'name': 'bandTypes',
'type': 'Dictionary'
},
{
'default': None,
'description': '',
'optional': True,
'name': 'bandOrder',
'type': 'List'
}
],
'description': '',
'returns': 'Image'
},
'Describe': {
'type': 'Algorithm',
'args': [
{
'description': '',
'name': 'input',
'type': 'Object'
}
],
'description': '',
'returns': 'Object',
},
}
# A sample of encoded EE API JSON, used by SerializerTest and DeserializerTest.
ENCODED_JSON_SAMPLE = {
'type': 'CompoundValue',
'scope': [
['0', {
'type': 'Invocation',
'functionName': 'Date',
'arguments': {
'value': 1234567890000
}
}],
['1', {
'type': 'LineString',
'coordinates': [[1, 2], [3, 4]],
'crs': {
'type': 'name',
'properties': {
'name': 'SR-ORG:6974'
}
}
}],
['2', {
'type': 'Polygon',
'coordinates': [
[[0, 0], [10, 0], [10, 10], [0, 10], [0, 0]],
[[5, 6], [7, 6], [7, 8], [5, 8]],
[[1, 1], [2, 1], [2, 2], [1, 2]]
]
}],
['3', {
'type': 'Bytes',
'value': 'aGVsbG8='
}],
['4', {
'type': 'Invocation',
'functionName': 'String.cat',
'arguments': {
'string1': 'x',
'string2': 'y'
}
}],
['5', {
'type': 'Dictionary',
'value': {
'foo': 'bar',
'baz': {'type': 'ValueRef', 'value': '4'}
}
}],
['6', {
'type': 'Function',
'argumentNames': ['x', 'y'],
'body': {'type': 'ArgumentRef', 'value': 'y'}
}],
['7', [
None,
True,
5,
7,
3.4,
2.5,
'hello',
{'type': 'ValueRef', 'value': '0'},
{'type': 'ValueRef', 'value': '1'},
{'type': 'ValueRef', 'value': '2'},
{'type': 'ValueRef', 'value': '3'},
{'type': 'ValueRef', 'value': '5'},
{'type': 'ValueRef', 'value': '4'},
{'type': 'ValueRef', 'value': '6'}
]]
],
'value': {'type': 'ValueRef', 'value': '7'}
}
|
|
try:
from . import generic as g
except BaseException:
import generic as g
class PlyTest(g.unittest.TestCase):
def test_ply_dtype(self):
# make sure all ply dtype strings are valid dtypes
dtypes = g.trimesh.exchange.ply.dtypes
for d in dtypes.values():
# will raise if dtype string not valid
g.np.dtype(d)
def test_ply(self):
m = g.get_mesh('machinist.XAML')
assert m.visual.kind == 'face'
assert m.visual.face_colors.ptp(axis=0).max() > 0
export = m.export(file_type='ply')
reconstructed = g.wrapload(export, file_type='ply')
assert reconstructed.visual.kind == 'face'
assert g.np.allclose(reconstructed.visual.face_colors,
m.visual.face_colors)
m = g.get_mesh('reference.ply')
assert m.visual.kind == 'vertex'
assert m.visual.vertex_colors.ptp(axis=0).max() > 0
export = m.export(file_type='ply')
reconstructed = g.wrapload(export, file_type='ply')
assert reconstructed.visual.kind == 'vertex'
assert g.np.allclose(reconstructed.visual.vertex_colors,
m.visual.vertex_colors)
def test_points(self):
# Test reading point clouds from PLY files
m = g.get_mesh('points_ascii.ply')
assert isinstance(m, g.trimesh.PointCloud)
assert m.vertices.shape == (5, 3)
m = g.get_mesh('points_bin.ply')
assert m.vertices.shape == (5, 3)
assert isinstance(m, g.trimesh.PointCloud)
m = g.get_mesh('points_emptyface.ply')
assert m.vertices.shape == (1024, 3)
assert isinstance(m, g.trimesh.PointCloud)
def test_list_properties(self):
"""
Test reading point clouds with the following metadata:
- lists of differing length
- multiple list properties
- single-element properties that come after list properties
"""
m = g.get_mesh('points_ascii_with_lists.ply')
point_list = m.metadata['ply_raw']['point_list']['data']
assert g.np.array_equal(
point_list['point_indices1'][0], g.np.array([10, 11, 12], dtype=g.np.uint32))
assert g.np.array_equal(
point_list['point_indices1'][1], g.np.array([10, 11], dtype=g.np.uint32))
assert g.np.array_equal(
point_list['point_indices2'][0], g.np.array([13, 14], dtype=g.np.uint32))
assert g.np.array_equal(
point_list['point_indices2'][1], g.np.array([12, 13, 14], dtype=g.np.uint32))
assert g.np.array_equal(
point_list['some_float'], g.np.array([1.1, 2.2], dtype=g.np.float32))
def test_vertex_attributes(self):
"""
Test writing vertex attributes to a ply, by reading them back and asserting the
written attributes array matches
"""
m = g.get_mesh('box.STL')
test_1d_attribute = g.np.copy(m.vertices[:, 0])
test_nd_attribute = g.np.copy(m.vertices)
m.vertex_attributes['test_1d_attribute'] = test_1d_attribute
m.vertex_attributes['test_nd_attribute'] = test_nd_attribute
export = m.export(file_type='ply')
reconstructed = g.wrapload(export,
file_type='ply')
vertex_attributes = reconstructed.metadata['ply_raw']['vertex']['data']
result_1d = vertex_attributes['test_1d_attribute']
result_nd = vertex_attributes['test_nd_attribute']['f1']
g.np.testing.assert_almost_equal(result_1d, test_1d_attribute)
g.np.testing.assert_almost_equal(result_nd, test_nd_attribute)
def test_face_attributes(self):
# Test writing face attributes to a ply, by reading
# them back and asserting the written attributes array matches
m = g.get_mesh('box.STL')
test_1d_attribute = g.np.copy(m.face_angles[:, 0])
test_nd_attribute = g.np.copy(m.face_angles)
m.face_attributes['test_1d_attribute'] = test_1d_attribute
m.face_attributes['test_nd_attribute'] = test_nd_attribute
export = m.export(file_type='ply')
reconstructed = g.wrapload(export, file_type='ply')
face_attributes = reconstructed.metadata['ply_raw']['face']['data']
result_1d = face_attributes['test_1d_attribute']
result_nd = face_attributes['test_nd_attribute']['f1']
g.np.testing.assert_almost_equal(result_1d, test_1d_attribute)
g.np.testing.assert_almost_equal(result_nd, test_nd_attribute)
no_attr = m.export(file_type='ply', include_attributes=False)
assert len(no_attr) < len(export)
def test_cases(self):
a = g.get_mesh('featuretype.STL')
b = g.get_mesh('featuretype.ply')
assert a.faces.shape == b.faces.shape
# has mixed quads and triangles
m = g.get_mesh('suzanne.ply')
assert len(m.faces) > 0
def test_ascii_color(self):
mesh = g.trimesh.creation.box()
en = g.wrapload(mesh.export(file_type='ply', encoding="ascii"),
file_type='ply')
assert en.visual.kind is None
color = [255, 0, 0, 255]
mesh.visual.vertex_colors = color
# try exporting and reloading raw
eb = g.wrapload(mesh.export(file_type='ply'), file_type='ply')
assert g.np.allclose(eb.visual.vertex_colors[0], color)
assert eb.visual.kind == 'vertex'
ea = g.wrapload(mesh.export(file_type='ply', encoding='ascii'),
file_type='ply')
assert g.np.allclose(ea.visual.vertex_colors, color)
assert ea.visual.kind == 'vertex'
def test_empty_or_pointcloud(self):
# demo files to check
empty_files = ['ply_empty_ascii.ply',
'ply_empty_bin.ply',
'ply_empty_header.ply',
'ply_points_ascii.ply',
'ply_points_bin.ply']
for empty_file in empty_files:
e = g.get_mesh('emptyIO/' + empty_file)
# create export
export = e.export(file_type='ply')
reconstructed = g.wrapload(export, file_type='ply')
if 'empty' in empty_file:
# result should be an empty scene without vertices
assert isinstance(e, g.trimesh.Scene)
assert not hasattr(e, 'vertices')
# export should not contain geometry
assert isinstance(reconstructed, g.trimesh.Scene)
assert not hasattr(reconstructed, 'vertices')
elif 'points' in empty_file:
# result should be a point cloud instance
assert isinstance(e, g.trimesh.PointCloud)
assert hasattr(e, 'vertices')
# point cloud export should contain vertices
assert isinstance(reconstructed, g.trimesh.PointCloud)
assert hasattr(reconstructed, 'vertices')
def test_blender_uv(self):
# test texture coordinate loading for Blender exported ply files
mesh_names = []
# test texture coordinate loading for simple triangulated Blender-export
mesh_names.append('cube_blender_uv.ply')
# same mesh but re-exported from meshlab as binary ply (and with changed header)
mesh_names.append('cube_blender_uv_meshlab.ply')
# test texture coordinate loading for mesh with mixed quads and triangles
mesh_names.append('suzanne.ply')
for mesh_name in mesh_names:
m = g.get_mesh(mesh_name)
assert hasattr(m, 'visual') and hasattr(m.visual, 'uv')
assert m.visual.uv.shape[0] == m.vertices.shape[0]
def test_fix_texture(self):
# test loading of face indices when uv-coordinates are also contained
m1 = g.get_mesh('plane.ply')
m2 = g.get_mesh('plane_tri.ply')
assert m1.faces.shape == (2, 3)
assert m2.faces.shape == (2, 3)
if __name__ == '__main__':
g.trimesh.util.attach_to_log()
g.unittest.main()
|
|
import gzip
import logging
import os
import pickle
from email.utils import mktime_tz, parsedate_tz
from importlib import import_module
from time import time
from weakref import WeakKeyDictionary
from w3lib.http import headers_raw_to_dict, headers_dict_to_raw
from scrapy.http import Headers, Response
from scrapy.responsetypes import responsetypes
from scrapy.utils.httpobj import urlparse_cached
from scrapy.utils.project import data_path
from scrapy.utils.python import to_bytes, to_unicode
from scrapy.utils.request import request_fingerprint
logger = logging.getLogger(__name__)
class DummyPolicy:
def __init__(self, settings):
self.ignore_schemes = settings.getlist('HTTPCACHE_IGNORE_SCHEMES')
self.ignore_http_codes = [int(x) for x in settings.getlist('HTTPCACHE_IGNORE_HTTP_CODES')]
def should_cache_request(self, request):
return urlparse_cached(request).scheme not in self.ignore_schemes
def should_cache_response(self, response, request):
return response.status not in self.ignore_http_codes
def is_cached_response_fresh(self, cachedresponse, request):
return True
def is_cached_response_valid(self, cachedresponse, response, request):
return True
class RFC2616Policy:
MAXAGE = 3600 * 24 * 365 # one year
def __init__(self, settings):
self.always_store = settings.getbool('HTTPCACHE_ALWAYS_STORE')
self.ignore_schemes = settings.getlist('HTTPCACHE_IGNORE_SCHEMES')
self._cc_parsed = WeakKeyDictionary()
self.ignore_response_cache_controls = [
to_bytes(cc) for cc in settings.getlist('HTTPCACHE_IGNORE_RESPONSE_CACHE_CONTROLS')
]
def _parse_cachecontrol(self, r):
if r not in self._cc_parsed:
cch = r.headers.get(b'Cache-Control', b'')
parsed = parse_cachecontrol(cch)
if isinstance(r, Response):
for key in self.ignore_response_cache_controls:
parsed.pop(key, None)
self._cc_parsed[r] = parsed
return self._cc_parsed[r]
def should_cache_request(self, request):
if urlparse_cached(request).scheme in self.ignore_schemes:
return False
cc = self._parse_cachecontrol(request)
# obey user-agent directive "Cache-Control: no-store"
if b'no-store' in cc:
return False
# Any other is eligible for caching
return True
def should_cache_response(self, response, request):
# What is cacheable - https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9.1
# Response cacheability - https://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html#sec13.4
# Status code 206 is not included because cache can not deal with partial contents
cc = self._parse_cachecontrol(response)
# obey directive "Cache-Control: no-store"
if b'no-store' in cc:
return False
# Never cache 304 (Not Modified) responses
elif response.status == 304:
return False
# Cache unconditionally if configured to do so
elif self.always_store:
return True
# Any hint on response expiration is good
elif b'max-age' in cc or b'Expires' in response.headers:
return True
# Firefox fallbacks this statuses to one year expiration if none is set
elif response.status in (300, 301, 308):
return True
# Other statuses without expiration requires at least one validator
elif response.status in (200, 203, 401):
return b'Last-Modified' in response.headers or b'ETag' in response.headers
# Any other is probably not eligible for caching
# Makes no sense to cache responses that does not contain expiration
# info and can not be revalidated
else:
return False
def is_cached_response_fresh(self, cachedresponse, request):
cc = self._parse_cachecontrol(cachedresponse)
ccreq = self._parse_cachecontrol(request)
if b'no-cache' in cc or b'no-cache' in ccreq:
return False
now = time()
freshnesslifetime = self._compute_freshness_lifetime(cachedresponse, request, now)
currentage = self._compute_current_age(cachedresponse, request, now)
reqmaxage = self._get_max_age(ccreq)
if reqmaxage is not None:
freshnesslifetime = min(freshnesslifetime, reqmaxage)
if currentage < freshnesslifetime:
return True
if b'max-stale' in ccreq and b'must-revalidate' not in cc:
# From RFC2616: "Indicates that the client is willing to
# accept a response that has exceeded its expiration time.
# If max-stale is assigned a value, then the client is
# willing to accept a response that has exceeded its
# expiration time by no more than the specified number of
# seconds. If no value is assigned to max-stale, then the
# client is willing to accept a stale response of any age."
staleage = ccreq[b'max-stale']
if staleage is None:
return True
try:
if currentage < freshnesslifetime + max(0, int(staleage)):
return True
except ValueError:
pass
# Cached response is stale, try to set validators if any
self._set_conditional_validators(request, cachedresponse)
return False
def is_cached_response_valid(self, cachedresponse, response, request):
# Use the cached response if the new response is a server error,
# as long as the old response didn't specify must-revalidate.
if response.status >= 500:
cc = self._parse_cachecontrol(cachedresponse)
if b'must-revalidate' not in cc:
return True
# Use the cached response if the server says it hasn't changed.
return response.status == 304
def _set_conditional_validators(self, request, cachedresponse):
if b'Last-Modified' in cachedresponse.headers:
request.headers[b'If-Modified-Since'] = cachedresponse.headers[b'Last-Modified']
if b'ETag' in cachedresponse.headers:
request.headers[b'If-None-Match'] = cachedresponse.headers[b'ETag']
def _get_max_age(self, cc):
try:
return max(0, int(cc[b'max-age']))
except (KeyError, ValueError):
return None
def _compute_freshness_lifetime(self, response, request, now):
# Reference nsHttpResponseHead::ComputeFreshnessLifetime
# https://dxr.mozilla.org/mozilla-central/source/netwerk/protocol/http/nsHttpResponseHead.cpp#706
cc = self._parse_cachecontrol(response)
maxage = self._get_max_age(cc)
if maxage is not None:
return maxage
# Parse date header or synthesize it if none exists
date = rfc1123_to_epoch(response.headers.get(b'Date')) or now
# Try HTTP/1.0 Expires header
if b'Expires' in response.headers:
expires = rfc1123_to_epoch(response.headers[b'Expires'])
# When parsing Expires header fails RFC 2616 section 14.21 says we
# should treat this as an expiration time in the past.
return max(0, expires - date) if expires else 0
# Fallback to heuristic using last-modified header
# This is not in RFC but on Firefox caching implementation
lastmodified = rfc1123_to_epoch(response.headers.get(b'Last-Modified'))
if lastmodified and lastmodified <= date:
return (date - lastmodified) / 10
# This request can be cached indefinitely
if response.status in (300, 301, 308):
return self.MAXAGE
# Insufficient information to compute fresshness lifetime
return 0
def _compute_current_age(self, response, request, now):
# Reference nsHttpResponseHead::ComputeCurrentAge
# https://dxr.mozilla.org/mozilla-central/source/netwerk/protocol/http/nsHttpResponseHead.cpp#658
currentage = 0
# If Date header is not set we assume it is a fast connection, and
# clock is in sync with the server
date = rfc1123_to_epoch(response.headers.get(b'Date')) or now
if now > date:
currentage = now - date
if b'Age' in response.headers:
try:
age = int(response.headers[b'Age'])
currentage = max(currentage, age)
except ValueError:
pass
return currentage
class DbmCacheStorage:
def __init__(self, settings):
self.cachedir = data_path(settings['HTTPCACHE_DIR'], createdir=True)
self.expiration_secs = settings.getint('HTTPCACHE_EXPIRATION_SECS')
self.dbmodule = import_module(settings['HTTPCACHE_DBM_MODULE'])
self.db = None
def open_spider(self, spider):
dbpath = os.path.join(self.cachedir, f'{spider.name}.db')
self.db = self.dbmodule.open(dbpath, 'c')
logger.debug("Using DBM cache storage in %(cachepath)s", {'cachepath': dbpath}, extra={'spider': spider})
def close_spider(self, spider):
self.db.close()
def retrieve_response(self, spider, request):
data = self._read_data(spider, request)
if data is None:
return # not cached
url = data['url']
status = data['status']
headers = Headers(data['headers'])
body = data['body']
respcls = responsetypes.from_args(headers=headers, url=url)
response = respcls(url=url, headers=headers, status=status, body=body)
return response
def store_response(self, spider, request, response):
key = self._request_key(request)
data = {
'status': response.status,
'url': response.url,
'headers': dict(response.headers),
'body': response.body,
}
self.db[f'{key}_data'] = pickle.dumps(data, protocol=4)
self.db[f'{key}_time'] = str(time())
def _read_data(self, spider, request):
key = self._request_key(request)
db = self.db
tkey = f'{key}_time'
if tkey not in db:
return # not found
ts = db[tkey]
if 0 < self.expiration_secs < time() - float(ts):
return # expired
return pickle.loads(db[f'{key}_data'])
def _request_key(self, request):
return request_fingerprint(request)
class FilesystemCacheStorage:
def __init__(self, settings):
self.cachedir = data_path(settings['HTTPCACHE_DIR'])
self.expiration_secs = settings.getint('HTTPCACHE_EXPIRATION_SECS')
self.use_gzip = settings.getbool('HTTPCACHE_GZIP')
self._open = gzip.open if self.use_gzip else open
def open_spider(self, spider):
logger.debug("Using filesystem cache storage in %(cachedir)s", {'cachedir': self.cachedir},
extra={'spider': spider})
def close_spider(self, spider):
pass
def retrieve_response(self, spider, request):
"""Return response if present in cache, or None otherwise."""
metadata = self._read_meta(spider, request)
if metadata is None:
return # not cached
rpath = self._get_request_path(spider, request)
with self._open(os.path.join(rpath, 'response_body'), 'rb') as f:
body = f.read()
with self._open(os.path.join(rpath, 'response_headers'), 'rb') as f:
rawheaders = f.read()
url = metadata.get('response_url')
status = metadata['status']
headers = Headers(headers_raw_to_dict(rawheaders))
respcls = responsetypes.from_args(headers=headers, url=url)
response = respcls(url=url, headers=headers, status=status, body=body)
return response
def store_response(self, spider, request, response):
"""Store the given response in the cache."""
rpath = self._get_request_path(spider, request)
if not os.path.exists(rpath):
os.makedirs(rpath)
metadata = {
'url': request.url,
'method': request.method,
'status': response.status,
'response_url': response.url,
'timestamp': time(),
}
with self._open(os.path.join(rpath, 'meta'), 'wb') as f:
f.write(to_bytes(repr(metadata)))
with self._open(os.path.join(rpath, 'pickled_meta'), 'wb') as f:
pickle.dump(metadata, f, protocol=4)
with self._open(os.path.join(rpath, 'response_headers'), 'wb') as f:
f.write(headers_dict_to_raw(response.headers))
with self._open(os.path.join(rpath, 'response_body'), 'wb') as f:
f.write(response.body)
with self._open(os.path.join(rpath, 'request_headers'), 'wb') as f:
f.write(headers_dict_to_raw(request.headers))
with self._open(os.path.join(rpath, 'request_body'), 'wb') as f:
f.write(request.body)
def _get_request_path(self, spider, request):
key = request_fingerprint(request)
return os.path.join(self.cachedir, spider.name, key[0:2], key)
def _read_meta(self, spider, request):
rpath = self._get_request_path(spider, request)
metapath = os.path.join(rpath, 'pickled_meta')
if not os.path.exists(metapath):
return # not found
mtime = os.stat(metapath).st_mtime
if 0 < self.expiration_secs < time() - mtime:
return # expired
with self._open(metapath, 'rb') as f:
return pickle.load(f)
def parse_cachecontrol(header):
"""Parse Cache-Control header
https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9
>>> parse_cachecontrol(b'public, max-age=3600') == {b'public': None,
... b'max-age': b'3600'}
True
>>> parse_cachecontrol(b'') == {}
True
"""
directives = {}
for directive in header.split(b','):
key, sep, val = directive.strip().partition(b'=')
if key:
directives[key.lower()] = val if sep else None
return directives
def rfc1123_to_epoch(date_str):
try:
date_str = to_unicode(date_str, encoding='ascii')
return mktime_tz(parsedate_tz(date_str))
except Exception:
return None
|
|
import unittest
from emma.model.account import Account
from emma.enumerations import Report, DeliveryType
from emma import get_report
from tests.model import MockAdapter
class ReportingTest(unittest.TestCase):
def setUp(self):
Account.default_adapter = MockAdapter
self.account = Account(
account_id="100",
public_key="xxx",
private_key="yyy")
def test_can_get_response_summary(self):
MockAdapter.expected = []
report = get_report(self.account, Report.ResponseSummary)
self.assertIsInstance(report, list)
self.assertEquals(self.account.adapter.called, 1)
self.assertEquals(
self.account.adapter.call,
('GET', '/response', {}))
def test_can_get_response_summary2(self):
MockAdapter.expected = []
report = get_report(self.account, Report.ResponseSummary, params={'include_archived': True})
self.assertIsInstance(report, list)
self.assertEquals(self.account.adapter.called, 1)
self.assertEquals(
self.account.adapter.call,
('GET', '/response', {'include_archived': True}))
def test_can_get_response_summary3(self):
MockAdapter.expected = []
report = get_report(self.account, Report.ResponseSummary, params={'range': "2011-04-01~2011-09-01"})
self.assertIsInstance(report, list)
self.assertEquals(self.account.adapter.called, 1)
self.assertEquals(
self.account.adapter.call,
('GET', '/response', {'range': "2011-04-01~2011-09-01"}))
def test_can_get_response_summary_for_mailing(self):
MockAdapter.expected = {}
report = get_report(self.account, Report.MailingSummary, 123)
self.assertIsInstance(report, dict)
self.assertEquals(self.account.adapter.called, 1)
self.assertEquals(
self.account.adapter.call,
('GET', '/response/123', {}))
def test_can_get_sent_list_for_mailing(self):
MockAdapter.expected = []
report = get_report(self.account, Report.SentList, 123)
self.assertIsInstance(report, list)
self.assertEquals(self.account.adapter.called, 1)
self.assertEquals(
self.account.adapter.call,
('GET', '/response/123/sends', {}))
def test_can_get_in_progress_list_for_mailing(self):
MockAdapter.expected = []
report = get_report(self.account, Report.InProgressList, 123)
self.assertIsInstance(report, list)
self.assertEquals(self.account.adapter.called, 1)
self.assertEquals(
self.account.adapter.call,
('GET', '/response/123/in_progress', {}))
def test_can_get_deliveries_list_for_mailing(self):
MockAdapter.expected = []
report = get_report(self.account, Report.DeliveredList, 123)
self.assertIsInstance(report, list)
self.assertEquals(self.account.adapter.called, 1)
self.assertEquals(
self.account.adapter.call,
('GET', '/response/123/deliveries', {}))
def test_can_get_deliveries_list_for_mailing2(self):
MockAdapter.expected = []
report = get_report(self.account, Report.DeliveredList, 123, {'del_status': DeliveryType.Delivered})
self.assertIsInstance(report, list)
self.assertEquals(self.account.adapter.called, 1)
self.assertEquals(
self.account.adapter.call,
('GET', '/response/123/deliveries', {'del_status': 'd'}))
def test_can_get_opens_list_for_mailing(self):
MockAdapter.expected = []
report = get_report(self.account, Report.OpenList, 123)
self.assertIsInstance(report, list)
self.assertEquals(self.account.adapter.called, 1)
self.assertEquals(
self.account.adapter.call,
('GET', '/response/123/opens', {}))
def test_can_get_links_list_for_mailing(self):
MockAdapter.expected = []
report = get_report(self.account, Report.LinkList, 123)
self.assertIsInstance(report, list)
self.assertEquals(self.account.adapter.called, 1)
self.assertEquals(
self.account.adapter.call,
('GET', '/response/123/links', {}))
def test_can_get_clicks_list_for_mailing(self):
MockAdapter.expected = []
report = get_report(self.account, Report.ClickList, 123)
self.assertIsInstance(report, list)
self.assertEquals(self.account.adapter.called, 1)
self.assertEquals(
self.account.adapter.call,
('GET', '/response/123/clicks', {}))
def test_can_get_clicks_list_for_mailing2(self):
MockAdapter.expected = []
report = get_report(self.account, Report.ClickList, 123, {'member_id': 1024})
self.assertIsInstance(report, list)
self.assertEquals(self.account.adapter.called, 1)
self.assertEquals(
self.account.adapter.call,
('GET', '/response/123/clicks', {'member_id': 1024}))
def test_can_get_clicks_list_for_mailing3(self):
MockAdapter.expected = []
report = get_report(self.account, Report.ClickList, 123, {'link_id': 1024})
self.assertIsInstance(report, list)
self.assertEquals(self.account.adapter.called, 1)
self.assertEquals(
self.account.adapter.call,
('GET', '/response/123/clicks', {'link_id': 1024}))
def test_can_get_forwards_list_for_mailing(self):
MockAdapter.expected = []
report = get_report(self.account, Report.ForwardList, 123)
self.assertIsInstance(report, list)
self.assertEquals(self.account.adapter.called, 1)
self.assertEquals(
self.account.adapter.call,
('GET', '/response/123/forwards', {}))
def test_can_get_optouts_list_for_mailing(self):
MockAdapter.expected = []
report = get_report(self.account, Report.OptOutList, 123)
self.assertIsInstance(report, list)
self.assertEquals(self.account.adapter.called, 1)
self.assertEquals(
self.account.adapter.call,
('GET', '/response/123/optouts', {}))
def test_can_get_signups_list_for_mailing(self):
MockAdapter.expected = []
report = get_report(self.account, Report.SignUpList, 123)
self.assertIsInstance(report, list)
self.assertEquals(self.account.adapter.called, 1)
self.assertEquals(
self.account.adapter.call,
('GET', '/response/123/signups', {}))
def test_can_get_shares_list_for_mailing(self):
MockAdapter.expected = []
report = get_report(self.account, Report.SharesList, 123)
self.assertIsInstance(report, list)
self.assertEquals(self.account.adapter.called, 1)
self.assertEquals(
self.account.adapter.call,
('GET', '/response/123/shares', {}))
def test_can_get_customer_shares_list_for_mailing(self):
MockAdapter.expected = []
report = get_report(self.account, Report.CustomerSharesList, 123)
self.assertIsInstance(report, list)
self.assertEquals(self.account.adapter.called, 1)
self.assertEquals(
self.account.adapter.call,
('GET', '/response/123/customer_shares', {}))
def test_can_get_customer_share_clicks_list_for_mailing(self):
MockAdapter.expected = []
report = get_report(self.account, Report.CustomerShareClicksList, 123)
self.assertIsInstance(report, list)
self.assertEquals(self.account.adapter.called, 1)
self.assertEquals(
self.account.adapter.call,
('GET', '/response/123/customer_share_clicks', {}))
def test_can_get_customer_share_for_mailing(self):
MockAdapter.expected = {}
report = get_report(self.account, Report.CustomerShare, 123)
self.assertIsInstance(report, dict)
self.assertEquals(self.account.adapter.called, 1)
self.assertEquals(
self.account.adapter.call,
('GET', '/response/123/customer_share', {}))
def test_can_get_shares_overview_for_mailing(self):
MockAdapter.expected = []
report = get_report(self.account, Report.SharesOverview, 123)
self.assertIsInstance(report, list)
self.assertEquals(self.account.adapter.called, 1)
self.assertEquals(
self.account.adapter.call,
('GET', '/response/123/shares/overview', {}))
|
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import collections
import contextlib
import fnmatch
import hashlib
import logging
import os
import platform
import posixpath
import shutil
import string
import subprocess
import sys
import tempfile
THIS_DIR = os.path.abspath(os.path.dirname(__file__))
ROOT_DIR = os.path.abspath(os.path.join(THIS_DIR, '..'))
DEVNULL = open(os.devnull, 'w')
IS_WIN = sys.platform.startswith('win')
BAT_EXT = '.bat' if IS_WIN else ''
# Top-level stubs to generate that fall through to executables within the Git
# directory.
WIN_GIT_STUBS = {
'git.bat': 'cmd\\git.exe',
'gitk.bat': 'cmd\\gitk.exe',
'ssh.bat': 'usr\\bin\\ssh.exe',
'ssh-keygen.bat': 'usr\\bin\\ssh-keygen.exe',
}
# Accumulated template parameters for generated stubs.
class Template(collections.namedtuple('Template', (
'PYTHON_RELDIR', 'PYTHON_BIN_RELDIR', 'PYTHON_BIN_RELDIR_UNIX',
'PYTHON3_BIN_RELDIR', 'PYTHON3_BIN_RELDIR_UNIX', 'GIT_BIN_RELDIR',
'GIT_BIN_RELDIR_UNIX', 'GIT_PROGRAM',
))):
@classmethod
def empty(cls):
return cls(**{k: None for k in cls._fields})
def maybe_install(self, name, dst_path):
"""Installs template |name| to |dst_path| if it has changed.
This loads the template |name| from THIS_DIR, resolves template parameters,
and installs it to |dst_path|. See `maybe_update` for more information.
Args:
name (str): The name of the template to install.
dst_path (str): The destination filesystem path.
Returns (bool): True if |dst_path| was updated, False otherwise.
"""
template_path = os.path.join(THIS_DIR, name)
with open(template_path, 'r', encoding='utf8') as fd:
t = string.Template(fd.read())
return maybe_update(t.safe_substitute(self._asdict()), dst_path)
def maybe_update(content, dst_path):
"""Writes |content| to |dst_path| if |dst_path| does not already match.
This function will ensure that there is a file at |dst_path| containing
|content|. If |dst_path| already exists and contains |content|, no operation
will be performed, preserving filesystem modification times and avoiding
potential write contention.
Args:
content (str): The file content.
dst_path (str): The destination filesystem path.
Returns (bool): True if |dst_path| was updated, False otherwise.
"""
# If the path already exists and matches the new content, refrain from writing
# a new one.
if os.path.exists(dst_path):
with open(dst_path, 'r', encoding='utf-8') as fd:
if fd.read() == content:
return False
logging.debug('Updating %r', dst_path)
with open(dst_path, 'w', encoding='utf-8') as fd:
fd.write(content)
os.chmod(dst_path, 0o755)
return True
def maybe_copy(src_path, dst_path):
"""Writes the content of |src_path| to |dst_path| if needed.
See `maybe_update` for more information.
Args:
src_path (str): The content source filesystem path.
dst_path (str): The destination filesystem path.
Returns (bool): True if |dst_path| was updated, False otherwise.
"""
with open(src_path, 'r', encoding='utf-8') as fd:
content = fd.read()
return maybe_update(content, dst_path)
def call_if_outdated(stamp_path, stamp_version, fn):
"""Invokes |fn| if the stamp at |stamp_path| doesn't match |stamp_version|.
This can be used to keep a filesystem record of whether an operation has been
performed. The record is stored at |stamp_path|. To invalidate a record,
change the value of |stamp_version|.
After |fn| completes successfully, |stamp_path| will be updated to match
|stamp_version|, preventing the same update from happening in the future.
Args:
stamp_path (str): The filesystem path of the stamp file.
stamp_version (str): The desired stamp version.
fn (callable): A callable to invoke if the current stamp version doesn't
match |stamp_version|.
Returns (bool): True if an update occurred.
"""
stamp_version = stamp_version.strip()
if os.path.isfile(stamp_path):
with open(stamp_path, 'r', encoding='utf-8') as fd:
current_version = fd.read().strip()
if current_version == stamp_version:
return False
fn()
with open(stamp_path, 'w', encoding='utf-8') as fd:
fd.write(stamp_version)
return True
def _in_use(path):
"""Checks if a Windows file is in use.
When Windows is using an executable, it prevents other writers from
modifying or deleting that executable. We can safely test for an in-use
file by opening it in write mode and checking whether or not there was
an error.
Returns (bool): True if the file was in use, False if not.
"""
try:
with open(path, 'r+'):
return False
except IOError:
return True
def _toolchain_in_use(toolchain_path):
"""Returns (bool): True if a toolchain rooted at |path| is in use.
"""
# Look for Python files that may be in use.
for python_dir in (
os.path.join(toolchain_path, 'python', 'bin'), # CIPD
toolchain_path, # Legacy ZIP distributions.
):
for component in (
os.path.join(python_dir, 'python.exe'),
os.path.join(python_dir, 'DLLs', 'unicodedata.pyd'),
):
if os.path.isfile(component) and _in_use(component):
return True
# Look for Pytho:n 3 files that may be in use.
python_dir = os.path.join(toolchain_path, 'python3', 'bin')
for component in (
os.path.join(python_dir, 'python3.exe'),
os.path.join(python_dir, 'DLLs', 'unicodedata.pyd'),
):
if os.path.isfile(component) and _in_use(component):
return True
return False
def _check_call(argv, stdin_input=None, **kwargs):
"""Wrapper for subprocess.check_call that adds logging."""
logging.info('running %r', argv)
if stdin_input is not None:
kwargs['stdin'] = subprocess.PIPE
proc = subprocess.Popen(argv, **kwargs)
proc.communicate(input=stdin_input)
if proc.returncode:
raise subprocess.CalledProcessError(proc.returncode, argv, None)
def _safe_rmtree(path):
if not os.path.exists(path):
return
def _make_writable_and_remove(path):
st = os.stat(path)
new_mode = st.st_mode | 0o200
if st.st_mode == new_mode:
return False
try:
os.chmod(path, new_mode)
os.remove(path)
return True
except Exception:
return False
def _on_error(function, path, excinfo):
if not _make_writable_and_remove(path):
logging.warning('Failed to %s: %s (%s)', function, path, excinfo)
shutil.rmtree(path, onerror=_on_error)
def clean_up_old_installations(skip_dir):
"""Removes Python installations other than |skip_dir|.
This includes an "in-use" check against the "python.exe" in a given directory
to avoid removing Python executables that are currently ruinning. We need
this because our Python bootstrap may be run after (and by) other software
that is using the bootstrapped Python!
"""
root_contents = os.listdir(ROOT_DIR)
for f in ('win_tools-*_bin', 'python27*_bin', 'git-*_bin', 'bootstrap-*_bin'):
for entry in fnmatch.filter(root_contents, f):
full_entry = os.path.join(ROOT_DIR, entry)
if full_entry == skip_dir or not os.path.isdir(full_entry):
continue
logging.info('Cleaning up old installation %r', entry)
if not _toolchain_in_use(full_entry):
_safe_rmtree(full_entry)
else:
logging.info('Toolchain at %r is in-use; skipping', full_entry)
# Version of "git_postprocess" system configuration (see |git_postprocess|).
GIT_POSTPROCESS_VERSION = '2'
def git_get_mingw_dir(git_directory):
"""Returns (str) The "mingw" directory in a Git installation, or None."""
for candidate in ('mingw64', 'mingw32'):
mingw_dir = os.path.join(git_directory, candidate)
if os.path.isdir(mingw_dir):
return mingw_dir
return None
def git_postprocess(template, git_directory):
# Update depot_tools files for "git help <command>"
mingw_dir = git_get_mingw_dir(git_directory)
if mingw_dir:
docsrc = os.path.join(ROOT_DIR, 'man', 'html')
git_docs_dir = os.path.join(mingw_dir, 'share', 'doc', 'git-doc')
for name in os.listdir(docsrc):
maybe_copy(
os.path.join(docsrc, name),
os.path.join(git_docs_dir, name))
else:
logging.info('Could not find mingw directory for %r.', git_directory)
# Create Git templates and configure its base layout.
for stub_name, relpath in WIN_GIT_STUBS.items():
stub_template = template._replace(GIT_PROGRAM=relpath)
stub_template.maybe_install(
'git.template.bat',
os.path.join(ROOT_DIR, stub_name))
# Set-up our system configuration environment. The following set of
# parameters is versioned by "GIT_POSTPROCESS_VERSION". If they change,
# update "GIT_POSTPROCESS_VERSION" accordingly.
def configure_git_system():
git_bat_path = os.path.join(ROOT_DIR, 'git.bat')
_check_call([git_bat_path, 'config', '--system', 'core.autocrlf', 'false'])
_check_call([git_bat_path, 'config', '--system', 'core.filemode', 'false'])
_check_call([git_bat_path, 'config', '--system', 'core.preloadindex',
'true'])
_check_call([git_bat_path, 'config', '--system', 'core.fscache', 'true'])
_check_call([git_bat_path, 'config', '--system', 'protocol.version', '2'])
call_if_outdated(
os.path.join(git_directory, '.git_postprocess'),
GIT_POSTPROCESS_VERSION,
configure_git_system)
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--verbose', action='store_true')
parser.add_argument('--bootstrap-name', required=True,
help='The directory of the Python installation.')
args = parser.parse_args(argv)
logging.basicConfig(level=logging.DEBUG if args.verbose else logging.WARN)
template = Template.empty()._replace(
PYTHON_RELDIR=os.path.join(args.bootstrap_name, 'python'),
PYTHON_BIN_RELDIR=os.path.join(args.bootstrap_name, 'python', 'bin'),
PYTHON_BIN_RELDIR_UNIX=posixpath.join(
args.bootstrap_name, 'python', 'bin'),
PYTHON3_BIN_RELDIR=os.path.join(args.bootstrap_name, 'python3', 'bin'),
PYTHON3_BIN_RELDIR_UNIX=posixpath.join(
args.bootstrap_name, 'python3', 'bin'),
GIT_BIN_RELDIR=os.path.join(args.bootstrap_name, 'git'),
GIT_BIN_RELDIR_UNIX=posixpath.join(args.bootstrap_name, 'git'))
bootstrap_dir = os.path.join(ROOT_DIR, args.bootstrap_name)
# Clean up any old Python and Git installations.
clean_up_old_installations(bootstrap_dir)
if IS_WIN:
git_postprocess(template, os.path.join(bootstrap_dir, 'git'))
templates = [
('git-bash.template.sh', 'git-bash', ROOT_DIR),
('python27.bat', 'python.bat', ROOT_DIR),
('python3.bat', 'python3.bat', ROOT_DIR),
]
for src_name, dst_name, dst_dir in templates:
# Re-evaluate and regenerate our root templated files.
template.maybe_install(src_name, os.path.join(dst_dir, dst_name))
# Emit our Python bin depot-tools-relative directory. This is read by
# python.bat, python3.bat, vpython[.bat] and vpython3[.bat] to identify the
# path of the current Python installation.
#
# We use this indirection so that upgrades can change this pointer to
# redirect "python.bat" to a new Python installation. We can't just update
# "python.bat" because batch file executions reload the batch file and seek
# to the previous cursor in between every command, so changing the batch
# file contents could invalidate any existing executions.
#
# The intention is that the batch file itself never needs to change when
# switching Python versions.
maybe_update(
template.PYTHON_BIN_RELDIR,
os.path.join(ROOT_DIR, 'python_bin_reldir.txt'))
maybe_update(
template.PYTHON3_BIN_RELDIR,
os.path.join(ROOT_DIR, 'python3_bin_reldir.txt'))
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
|
"""Tests for autocompletion."""
import imp
import sublime
import platform
from os import path
from EasyClangComplete.plugin.settings import settings_manager
from EasyClangComplete.plugin.utils import action_request
from EasyClangComplete.plugin.utils.subl import row_col
from EasyClangComplete.plugin.view_config import view_config_manager
from EasyClangComplete.tests import gui_test_wrapper
imp.reload(gui_test_wrapper)
imp.reload(row_col)
imp.reload(settings_manager)
imp.reload(view_config_manager)
imp.reload(action_request)
SettingsManager = settings_manager.SettingsManager
ActionRequest = action_request.ActionRequest
ViewConfigManager = view_config_manager.ViewConfigManager
GuiTestWrapper = gui_test_wrapper.GuiTestWrapper
ZeroIndexedRowCol = row_col.ZeroIndexedRowCol
OneIndexedRowCol = row_col.OneIndexedRowCol
def has_libclang():
"""Ensure libclang tests will run only on platforms that support this.
Returns:
str: row contents
"""
# Older version of Sublime Text x64 have ctypes crash bug.
if platform.system() == "Windows" and sublime.arch() == "x64" and \
int(sublime.version()) < 3123:
return False
return True
# TODO(@kjteske): For now the tests seem to not be working for binary completer
def should_run_objc_tests():
"""Decide if Objective C tests should be run.
For now, run only on Mac OS due to difficulties getting the GNUstep
environment setup with GNUstep and clang to run properly in
Windows and Linux CI's, and nearly all ObjC development is done on
Mac OS anyway.
"""
return platform.system() == "Darwin"
class BaseTestCompleter(object):
"""Base class for tests independent of the Completer implementation.
Attributes:
view (sublime.View): view
use_libclang (bool): decides if we use libclang in tests
"""
def set_up_completer(self):
"""Set up a completer for the current view.
Returns:
BaseCompleter: completer for the current view.
"""
manager = SettingsManager()
settings = manager.settings_for_view(self.view)
settings.use_libclang = self.use_libclang
view_config_manager = ViewConfigManager()
view_config = view_config_manager.load_for_view(self.view, settings)
completer = view_config.completer
return completer
def tear_down_completer(self):
"""Tear down completer for the current view.
Returns:
BaseCompleter: completer for the current view.
"""
view_config_manager = ViewConfigManager()
view_config_manager.clear_for_view(self.view.buffer_id())
def test_setup_view(self):
"""Test that setup view correctly sets up the view."""
file_name = path.join(path.dirname(__file__),
'test_files',
'test.cpp')
self.check_view(file_name)
def test_init(self):
"""Test that the completer is properly initialized."""
file_name = path.join(path.dirname(__file__),
'test_files',
'test.cpp')
self.set_up_view(file_name)
completer = self.set_up_completer()
self.assertIsNotNone(completer.version_str)
self.tear_down_completer()
def test_complete(self):
"""Test autocompletion for user type."""
file_name = path.join(path.dirname(__file__),
'test_files',
'test.cpp')
self.set_up_view(file_name)
completer = self.set_up_completer()
# Check the current cursor position is completable.
cursor_row_col = ZeroIndexedRowCol.from_one_indexed(
OneIndexedRowCol(9, 5))
self.assertEqual(self.get_row(cursor_row_col.row), " a.")
location = cursor_row_col.as_1d_location(self.view)
current_word = self.view.substr(self.view.word(location))
self.assertEqual(current_word, ".\n")
# Load the completions.
request = ActionRequest(self.view, location)
(_, completions) = completer.complete(request)
# Verify that we got the expected completions back.
self.assertIsNotNone(completions)
expected = ['foo\tvoid foo(double a)', 'foo(${1:double a})']
self.assertIn(expected, completions)
self.tear_down_completer()
def test_excluded_private(self):
"""Test autocompletion for user type."""
file_name = path.join(path.dirname(__file__),
'test_files',
'test.cpp')
self.set_up_view(file_name)
completer = self.set_up_completer()
# Check the current cursor position is completable.
cursor_row_col = ZeroIndexedRowCol.from_one_indexed(
OneIndexedRowCol(9, 5))
self.assertEqual(self.get_row(cursor_row_col.row), " a.")
location = cursor_row_col.as_1d_location(self.view)
current_word = self.view.substr(self.view.word(location))
self.assertEqual(current_word, ".\n")
# Load the completions.
request = ActionRequest(self.view, location)
(_, completions) = completer.complete(request)
# Verify that we got the expected completions back.
self.assertIsNotNone(completions)
expected = ['foo\tvoid foo(double a)', 'foo(${1:double a})']
unexpected = ['foo\tvoid foo(int a)', 'foo(${1:int a})']
if self.use_libclang:
self.assertIn(expected, completions)
self.assertNotIn(unexpected, completions)
self.tear_down_completer()
def test_excluded_destructor(self):
"""Test autocompletion for user type."""
file_name = path.join(path.dirname(__file__),
'test_files',
'test.cpp')
self.set_up_view(file_name)
completer = self.set_up_completer()
# Check the current cursor position is completable.
cursor_row_col = ZeroIndexedRowCol.from_one_indexed(
OneIndexedRowCol(9, 5))
self.assertEqual(self.get_row(cursor_row_col.row), " a.")
location = cursor_row_col.as_1d_location(self.view)
current_word = self.view.substr(self.view.word(location))
self.assertEqual(current_word, ".\n")
# Load the completions.
request = ActionRequest(self.view, location)
(_, completions) = completer.complete(request)
# Verify that we got the expected completions back.
self.assertIsNotNone(completions)
destructor = ['~A\tvoid ~A()', '~A()']
if self.use_libclang:
self.assertNotIn(destructor, completions)
else:
self.assertIn(destructor, completions)
self.tear_down_completer()
def test_complete_vector(self):
"""Test that we can complete vector members."""
file_name = path.join(path.dirname(__file__),
'test_files',
'test_vector.cpp')
self.set_up_view(file_name)
completer = self.set_up_completer()
# Check the current cursor position is completable.
cursor_row_col = ZeroIndexedRowCol.from_one_indexed(
OneIndexedRowCol(4, 7))
self.assertEqual(self.get_row(cursor_row_col.row), " vec.")
location = cursor_row_col.as_1d_location(self.view)
current_word = self.view.substr(self.view.word(location))
self.assertEqual(current_word, ".\n")
# Load the completions.
request = ActionRequest(self.view, location)
(_, completions) = completer.complete(request)
# Verify that we got the expected completions back.
self.assertIsNotNone(completions)
expected = ['clear\tvoid clear()', 'clear()']
self.assertIn(expected, completions)
self.tear_down_completer()
def test_complete_objc_property(self):
"""Test that we can complete Objective C properties."""
if not should_run_objc_tests() or not self.use_libclang:
return
file_name = path.join(path.dirname(__file__),
'test_files',
'test_property.m')
self.set_up_view(file_name)
completer = self.set_up_completer()
# Check the current cursor position is completable.
self.assertEqual(self.get_row(6), " foo.")
pos = self.view.text_point(6, 6)
current_word = self.view.substr(self.view.word(pos))
self.assertEqual(current_word, ".\n")
# Load the completions.
request = ActionRequest(self.view, pos)
(_, completions) = completer.complete(request)
# Verify that we got the expected completions back.
self.assertIsNotNone(completions)
expected = ['boolProperty\tBOOL boolProperty', 'boolProperty']
self.assertIn(expected, completions)
self.tear_down_completer()
def test_complete_objc_void_method(self):
"""Test that we can complete Objective C void methods."""
if not should_run_objc_tests() or not self.use_libclang:
return
file_name = path.join(path.dirname(__file__),
'test_files',
'test_void_method.m')
self.set_up_view(file_name)
completer = self.set_up_completer()
# Check the current cursor position is completable.
self.assertEqual(self.get_row(6), " [foo ")
pos = self.view.text_point(6, 7)
current_word = self.view.substr(self.view.word(pos))
self.assertEqual(current_word, " \n")
# Load the completions.
request = ActionRequest(self.view, pos)
(_, completions) = completer.complete(request)
# Verify that we got the expected completions back.
self.assertIsNotNone(completions)
expected = ['voidMethod\tvoid voidMethod', 'voidMethod']
self.assertIn(expected, completions)
self.tear_down_completer()
def test_complete_objc_method_one_parameter(self):
"""Test that we can complete Objective C methods with one parameter."""
if not should_run_objc_tests() or not self.use_libclang:
return
file_name = path.join(path.dirname(__file__),
'test_files',
'test_method_one_parameter.m')
self.set_up_view(file_name)
completer = self.set_up_completer()
# Check the current cursor position is completable.
self.assertEqual(self.get_row(6), " [foo ")
pos = self.view.text_point(6, 7)
current_word = self.view.substr(self.view.word(pos))
self.assertEqual(current_word, " \n")
# Load the completions.
request = ActionRequest(self.view, pos)
(_, completions) = completer.complete(request)
# Verify that we got the expected completions back.
self.assertIsNotNone(completions)
expected = ['oneParameterMethod:\tvoid oneParameterMethod:(BOOL)',
'oneParameterMethod:${1:(BOOL)}']
self.assertIn(expected, completions)
self.tear_down_completer()
def test_complete_objc_method_multiple_parameters(self):
"""Test that we can complete Objective C methods with 2+ parameters."""
if not should_run_objc_tests() or not self.use_libclang:
return
file_name = path.join(path.dirname(__file__),
'test_files',
'test_method_two_parameters.m')
self.set_up_view(file_name)
completer = self.set_up_completer()
# Check the current cursor position is completable.
self.assertEqual(self.get_row(6), " [foo ")
pos = self.view.text_point(6, 7)
current_word = self.view.substr(self.view.word(pos))
self.assertEqual(current_word, " \n")
# Load the completions.
request = ActionRequest(self.view, pos)
(_, completions) = completer.complete(request)
# Verify that we got the expected completions back.
self.assertIsNotNone(completions)
expected = [
'bar:strParam:\tNSInteger * bar:(BOOL) strParam:(NSString *)',
'bar:${1:(BOOL)} strParam:${2:(NSString *)}']
self.assertIn(expected, completions)
self.tear_down_completer()
def test_complete_objcpp(self):
"""Test that we can complete code in Objective-C++ files."""
if not should_run_objc_tests() or not self.use_libclang:
return
file_name = path.join(path.dirname(__file__),
'test_files',
'test_objective_cpp.mm')
self.set_up_view(file_name)
completer = self.set_up_completer()
# Check the current cursor position is completable.
self.assertEqual(self.get_row(3), " str.")
pos = self.view.text_point(3, 6)
current_word = self.view.substr(self.view.word(pos))
self.assertEqual(current_word, ".\n")
# Load the completions.
request = ActionRequest(self.view, pos)
(_, completions) = completer.complete(request)
# Verify that we got the expected completions back.
self.assertIsNotNone(completions)
expected = [
'clear\tvoid clear()', 'clear()']
self.assertIn(expected, completions)
self.tear_down_completer()
def test_unsaved_views(self):
"""Test that we gracefully handle unsaved views."""
# Construct an unsaved scratch view.
self.view = sublime.active_window().new_file()
self.view.set_scratch(True)
# Manually set up a completer.
manager = SettingsManager()
settings = manager.settings_for_view(self.view)
view_config_manager = ViewConfigManager()
view_config = view_config_manager.load_for_view(self.view, settings)
self.assertIsNone(view_config)
def test_cooperation_with_default_completions(self):
"""Empty clang completions should not hide default completions."""
file_name = path.join(path.dirname(__file__),
'test_files',
'test_errors.cpp')
self.set_up_view(file_name)
self.set_up_completer()
# Undefined foo object has no completions.
self.assertEqual(self.get_row(1), " foo.")
pos = self.view.text_point(1, 6)
current_word = self.view.substr(self.view.word(pos))
self.assertEqual(current_word, ".\n")
# Trigger default completions popup.
self.view.run_command('auto_complete')
self.assertTrue(self.view.is_auto_complete_visible())
self.tear_down_completer()
def test_get_declaration_location(self):
"""Test getting declaration location."""
if not self.use_libclang:
return
file_name = path.join(path.dirname(__file__),
'test_files',
'test_location.cpp')
self.set_up_view(file_name)
completer = self.set_up_completer()
# Check the current cursor position is completable.
row = 10
col = 15
cursor_row_col = ZeroIndexedRowCol.from_one_indexed(
OneIndexedRowCol(row, col))
self.assertEqual(self.get_row(cursor_row_col.row),
" cool_class.foo();")
location = cursor_row_col.as_1d_location(self.view)
current_word = self.view.substr(self.view.word(location))
self.assertEqual(current_word, "foo")
loc = completer.get_declaration_location(self.view, cursor_row_col)
self.assertEqual(loc.file.name, file_name)
self.assertEqual(loc.line, 3)
self.assertEqual(loc.column, 8)
self.tear_down_completer()
class TestBinCompleter(BaseTestCompleter, GuiTestWrapper):
"""Test class for the binary based completer."""
use_libclang = False
if has_libclang():
class TestLibCompleter(BaseTestCompleter, GuiTestWrapper):
"""Test class for the library based completer."""
use_libclang = True
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Implementation of ``DataChannel``s to communicate across the data plane."""
# pytype: skip-file
# mypy: disallow-untyped-defs
import abc
import collections
import logging
import queue
import sys
import threading
import time
from types import TracebackType
from typing import TYPE_CHECKING
from typing import Any
from typing import Callable
from typing import Collection
from typing import DefaultDict
from typing import Dict
from typing import Iterable
from typing import Iterator
from typing import List
from typing import Mapping
from typing import Optional
from typing import Set
from typing import Tuple
from typing import Type
from typing import Union
import grpc
from apache_beam.coders import coder_impl
from apache_beam.portability.api import beam_fn_api_pb2
from apache_beam.portability.api import beam_fn_api_pb2_grpc
from apache_beam.runners.worker.channel_factory import GRPCChannelFactory
from apache_beam.runners.worker.worker_id_interceptor import WorkerIdInterceptor
if TYPE_CHECKING:
import apache_beam.coders.slow_stream
OutputStream = apache_beam.coders.slow_stream.OutputStream
DataOrTimers = Union[beam_fn_api_pb2.Elements.Data,
beam_fn_api_pb2.Elements.Timers]
else:
OutputStream = type(coder_impl.create_OutputStream())
ExcInfo = Tuple[Type[BaseException], BaseException, TracebackType]
OptExcInfo = Union[ExcInfo, Tuple[None, None, None]]
# This module is experimental. No backwards-compatibility guarantees.
_LOGGER = logging.getLogger(__name__)
_DEFAULT_SIZE_FLUSH_THRESHOLD = 10 << 20 # 10MB
_DEFAULT_TIME_FLUSH_THRESHOLD_MS = 0 # disable time-based flush by default
class ClosableOutputStream(OutputStream):
"""A Outputstream for use with CoderImpls that has a close() method."""
def __init__(
self,
close_callback=None # type: Optional[Callable[[bytes], None]]
):
# type: (...) -> None
super(ClosableOutputStream, self).__init__()
self._close_callback = close_callback
def close(self):
# type: () -> None
if self._close_callback:
self._close_callback(self.get())
def maybe_flush(self):
# type: () -> None
pass
def flush(self):
# type: () -> None
pass
@staticmethod
def create(close_callback, # type: Optional[Callable[[bytes], None]]
flush_callback, # type: Optional[Callable[[bytes], None]]
data_buffer_time_limit_ms # type: int
):
# type: (...) -> ClosableOutputStream
if data_buffer_time_limit_ms > 0:
return TimeBasedBufferingClosableOutputStream(
close_callback,
flush_callback=flush_callback,
time_flush_threshold_ms=data_buffer_time_limit_ms)
else:
return SizeBasedBufferingClosableOutputStream(
close_callback, flush_callback=flush_callback)
class SizeBasedBufferingClosableOutputStream(ClosableOutputStream):
"""A size-based buffering OutputStream."""
def __init__(self,
close_callback=None, # type: Optional[Callable[[bytes], None]]
flush_callback=None, # type: Optional[Callable[[bytes], None]]
size_flush_threshold=_DEFAULT_SIZE_FLUSH_THRESHOLD # type: int
):
super(SizeBasedBufferingClosableOutputStream, self).__init__(close_callback)
self._flush_callback = flush_callback
self._size_flush_threshold = size_flush_threshold
# This must be called explicitly to avoid flushing partial elements.
def maybe_flush(self):
# type: () -> None
if self.size() > self._size_flush_threshold:
self.flush()
def flush(self):
# type: () -> None
if self._flush_callback:
self._flush_callback(self.get())
self._clear()
class TimeBasedBufferingClosableOutputStream(
SizeBasedBufferingClosableOutputStream):
"""A buffering OutputStream with both time-based and size-based."""
_periodic_flusher = None # type: Optional[PeriodicThread]
def __init__(
self,
close_callback=None, # type: Optional[Callable[[bytes], None]]
flush_callback=None, # type: Optional[Callable[[bytes], None]]
size_flush_threshold=_DEFAULT_SIZE_FLUSH_THRESHOLD, # type: int
time_flush_threshold_ms=_DEFAULT_TIME_FLUSH_THRESHOLD_MS # type: int
):
# type: (...) -> None
super(TimeBasedBufferingClosableOutputStream,
self).__init__(close_callback, flush_callback, size_flush_threshold)
assert time_flush_threshold_ms > 0
self._time_flush_threshold_ms = time_flush_threshold_ms
self._flush_lock = threading.Lock()
self._schedule_lock = threading.Lock()
self._closed = False
self._schedule_periodic_flush()
def flush(self):
# type: () -> None
with self._flush_lock:
super(TimeBasedBufferingClosableOutputStream, self).flush()
def close(self):
# type: () -> None
with self._schedule_lock:
self._closed = True
if self._periodic_flusher:
self._periodic_flusher.cancel()
self._periodic_flusher = None
super(TimeBasedBufferingClosableOutputStream, self).close()
def _schedule_periodic_flush(self):
# type: () -> None
def _flush():
# type: () -> None
with self._schedule_lock:
if not self._closed:
self.flush()
self._periodic_flusher = PeriodicThread(
self._time_flush_threshold_ms / 1000.0, _flush)
self._periodic_flusher.daemon = True
self._periodic_flusher.start()
class PeriodicThread(threading.Thread):
"""Call a function periodically with the specified number of seconds"""
def __init__(self,
interval, # type: float
function, # type: Callable
args=None, # type: Optional[Iterable]
kwargs=None # type: Optional[Mapping[str, Any]]
):
# type: (...) -> None
threading.Thread.__init__(self)
self._interval = interval
self._function = function
self._args = args if args is not None else []
self._kwargs = kwargs if kwargs is not None else {}
self._finished = threading.Event()
def run(self):
# type: () -> None
next_call = time.time() + self._interval
while not self._finished.wait(next_call - time.time()):
next_call = next_call + self._interval
self._function(*self._args, **self._kwargs)
def cancel(self):
# type: () -> None
"""Stop the thread if it hasn't finished yet."""
self._finished.set()
class DataChannel(metaclass=abc.ABCMeta):
"""Represents a channel for reading and writing data over the data plane.
Read data and timer from this channel with the input_elements method::
for elements_data in data_channel.input_elements(
instruction_id, transform_ids, timers):
[process elements_data]
Write data to this channel using the output_stream method::
out1 = data_channel.output_stream(instruction_id, transform_id)
out1.write(...)
out1.close()
Write timer to this channel using the output_timer_stream method::
out1 = data_channel.output_timer_stream(instruction_id,
transform_id,
timer_family_id)
out1.write(...)
out1.close()
When all data/timer for all instructions is written, close the channel::
data_channel.close()
"""
@abc.abstractmethod
def input_elements(self,
instruction_id, # type: str
expected_inputs, # type: Collection[Union[str, Tuple[str, str]]]
abort_callback=None # type: Optional[Callable[[], bool]]
):
# type: (...) -> Iterator[DataOrTimers]
"""Returns an iterable of all Element.Data and Element.Timers bundles for
instruction_id.
This iterable terminates only once the full set of data has been recieved
for each of the expected transforms. It may block waiting for more data.
Args:
instruction_id: which instruction the results must belong to
expected_inputs: which transforms to wait on for completion
abort_callback: a callback to invoke if blocking returning whether
to abort before consuming all the data
"""
raise NotImplementedError(type(self))
@abc.abstractmethod
def output_stream(
self,
instruction_id, # type: str
transform_id # type: str
):
# type: (...) -> ClosableOutputStream
"""Returns an output stream writing elements to transform_id.
Args:
instruction_id: which instruction this stream belongs to
transform_id: the transform_id of the returned stream
"""
raise NotImplementedError(type(self))
@abc.abstractmethod
def output_timer_stream(self,
instruction_id, # type: str
transform_id, # type: str
timer_family_id # type: str
):
# type: (...) -> ClosableOutputStream
"""Returns an output stream written timers to transform_id.
Args:
instruction_id: which instruction this stream belongs to
transform_id: the transform_id of the returned stream
timer_family_id: the timer family of the written timer
"""
raise NotImplementedError(type(self))
@abc.abstractmethod
def close(self):
# type: () -> None
"""Closes this channel, indicating that all data has been written.
Data can continue to be read.
If this channel is shared by many instructions, should only be called on
worker shutdown.
"""
raise NotImplementedError(type(self))
class InMemoryDataChannel(DataChannel):
"""An in-memory implementation of a DataChannel.
This channel is two-sided. What is written to one side is read by the other.
The inverse() method returns the other side of a instance.
"""
def __init__(self, inverse=None, data_buffer_time_limit_ms=0):
# type: (Optional[InMemoryDataChannel], int) -> None
self._inputs = [] # type: List[DataOrTimers]
self._data_buffer_time_limit_ms = data_buffer_time_limit_ms
self._inverse = inverse or InMemoryDataChannel(
self, data_buffer_time_limit_ms=data_buffer_time_limit_ms)
def inverse(self):
# type: () -> InMemoryDataChannel
return self._inverse
def input_elements(self,
instruction_id, # type: str
unused_expected_inputs, # type: Any
abort_callback=None # type: Optional[Callable[[], bool]]
):
# type: (...) -> Iterator[DataOrTimers]
other_inputs = []
for element in self._inputs:
if element.instruction_id == instruction_id:
if isinstance(element, beam_fn_api_pb2.Elements.Timers):
if not element.is_last:
yield element
if isinstance(element, beam_fn_api_pb2.Elements.Data):
if element.data or element.is_last:
yield element
else:
other_inputs.append(element)
self._inputs = other_inputs
def output_timer_stream(self,
instruction_id, # type: str
transform_id, # type: str
timer_family_id # type: str
):
# type: (...) -> ClosableOutputStream
def add_to_inverse_output(timer):
# type: (bytes) -> None
if timer:
self._inverse._inputs.append(
beam_fn_api_pb2.Elements.Timers(
instruction_id=instruction_id,
transform_id=transform_id,
timer_family_id=timer_family_id,
timers=timer,
is_last=False))
def close_stream(timer):
# type: (bytes) -> None
add_to_inverse_output(timer)
self._inverse._inputs.append(
beam_fn_api_pb2.Elements.Timers(
instruction_id=instruction_id,
transform_id=transform_id,
timer_family_id='',
is_last=True))
return ClosableOutputStream.create(
add_to_inverse_output, close_stream, self._data_buffer_time_limit_ms)
def output_stream(self, instruction_id, transform_id):
# type: (str, str) -> ClosableOutputStream
def add_to_inverse_output(data):
# type: (bytes) -> None
self._inverse._inputs.append( # pylint: disable=protected-access
beam_fn_api_pb2.Elements.Data(
instruction_id=instruction_id,
transform_id=transform_id,
data=data))
return ClosableOutputStream.create(
add_to_inverse_output,
add_to_inverse_output,
self._data_buffer_time_limit_ms)
def close(self):
# type: () -> None
pass
class _GrpcDataChannel(DataChannel):
"""Base class for implementing a BeamFnData-based DataChannel."""
_WRITES_FINISHED = object()
def __init__(self, data_buffer_time_limit_ms=0):
# type: (int) -> None
self._data_buffer_time_limit_ms = data_buffer_time_limit_ms
self._to_send = queue.Queue() # type: queue.Queue[DataOrTimers]
self._received = collections.defaultdict(
lambda: queue.Queue(maxsize=5)
) # type: DefaultDict[str, queue.Queue[DataOrTimers]]
self._receive_lock = threading.Lock()
self._reads_finished = threading.Event()
self._closed = False
self._exc_info = (None, None, None) # type: OptExcInfo
def close(self):
# type: () -> None
self._to_send.put(self._WRITES_FINISHED) # type: ignore[arg-type]
self._closed = True
def wait(self, timeout=None):
# type: (Optional[int]) -> None
self._reads_finished.wait(timeout)
def _receiving_queue(self, instruction_id):
# type: (str) -> queue.Queue[DataOrTimers]
with self._receive_lock:
return self._received[instruction_id]
def _clean_receiving_queue(self, instruction_id):
# type: (str) -> None
with self._receive_lock:
self._received.pop(instruction_id)
def input_elements(self,
instruction_id, # type: str
expected_inputs, # type: Collection[Union[str, Tuple[str, str]]]
abort_callback=None # type: Optional[Callable[[], bool]]
):
# type: (...) -> Iterator[DataOrTimers]
"""
Generator to retrieve elements for an instruction_id
input_elements should be called only once for an instruction_id
Args:
instruction_id(str): instruction_id for which data is read
expected_inputs(collection): expected inputs, include both data and timer.
"""
received = self._receiving_queue(instruction_id)
done_inputs = set() # type: Set[Union[str, Tuple[str, str]]]
abort_callback = abort_callback or (lambda: False)
try:
while len(done_inputs) < len(expected_inputs):
try:
element = received.get(timeout=1)
except queue.Empty:
if self._closed:
raise RuntimeError('Channel closed prematurely.')
if abort_callback():
return
t, v, tb = self._exc_info
if t:
raise t(v).with_traceback(tb)
else:
if isinstance(element, beam_fn_api_pb2.Elements.Timers):
if element.is_last:
done_inputs.add((element.transform_id, element.timer_family_id))
else:
yield element
elif isinstance(element, beam_fn_api_pb2.Elements.Data):
if element.is_last:
done_inputs.add(element.transform_id)
else:
assert element.transform_id not in done_inputs
yield element
else:
raise ValueError('Unexpected input element type %s' % type(element))
finally:
# Instruction_ids are not reusable so Clean queue once we are done with
# an instruction_id
self._clean_receiving_queue(instruction_id)
def output_stream(self, instruction_id, transform_id):
# type: (str, str) -> ClosableOutputStream
def add_to_send_queue(data):
# type: (bytes) -> None
if data:
self._to_send.put(
beam_fn_api_pb2.Elements.Data(
instruction_id=instruction_id,
transform_id=transform_id,
data=data))
def close_callback(data):
# type: (bytes) -> None
add_to_send_queue(data)
# End of stream marker.
self._to_send.put(
beam_fn_api_pb2.Elements.Data(
instruction_id=instruction_id,
transform_id=transform_id,
is_last=True))
return ClosableOutputStream.create(
close_callback, add_to_send_queue, self._data_buffer_time_limit_ms)
def output_timer_stream(self,
instruction_id, # type: str
transform_id, # type: str
timer_family_id # type: str
):
# type: (...) -> ClosableOutputStream
def add_to_send_queue(timer):
# type: (bytes) -> None
if timer:
self._to_send.put(
beam_fn_api_pb2.Elements.Timers(
instruction_id=instruction_id,
transform_id=transform_id,
timer_family_id=timer_family_id,
timers=timer,
is_last=False))
def close_callback(timer):
# type: (bytes) -> None
add_to_send_queue(timer)
self._to_send.put(
beam_fn_api_pb2.Elements.Timers(
instruction_id=instruction_id,
transform_id=transform_id,
timer_family_id=timer_family_id,
is_last=True))
return ClosableOutputStream.create(
close_callback, add_to_send_queue, self._data_buffer_time_limit_ms)
def _write_outputs(self):
# type: () -> Iterator[beam_fn_api_pb2.Elements]
stream_done = False
while not stream_done:
streams = [self._to_send.get()]
try:
# Coalesce up to 100 other items.
for _ in range(100):
streams.append(self._to_send.get_nowait())
except queue.Empty:
pass
if streams[-1] is self._WRITES_FINISHED:
stream_done = True
streams.pop()
if streams:
data_stream = []
timer_stream = []
for stream in streams:
if isinstance(stream, beam_fn_api_pb2.Elements.Timers):
timer_stream.append(stream)
elif isinstance(stream, beam_fn_api_pb2.Elements.Data):
data_stream.append(stream)
else:
raise ValueError('Unexpected output element type %s' % type(stream))
yield beam_fn_api_pb2.Elements(data=data_stream, timers=timer_stream)
def _read_inputs(self, elements_iterator):
# type: (Iterable[beam_fn_api_pb2.Elements]) -> None
try:
for elements in elements_iterator:
for timer in elements.timers:
self._receiving_queue(timer.instruction_id).put(timer)
for data in elements.data:
self._receiving_queue(data.instruction_id).put(data)
except: # pylint: disable=bare-except
if not self._closed:
_LOGGER.exception('Failed to read inputs in the data plane.')
self._exc_info = sys.exc_info()
raise
finally:
self._closed = True
self._reads_finished.set()
def set_inputs(self, elements_iterator):
# type: (Iterable[beam_fn_api_pb2.Elements]) -> None
reader = threading.Thread(
target=lambda: self._read_inputs(elements_iterator),
name='read_grpc_client_inputs')
reader.daemon = True
reader.start()
class GrpcClientDataChannel(_GrpcDataChannel):
"""A DataChannel wrapping the client side of a BeamFnData connection."""
def __init__(self,
data_stub, # type: beam_fn_api_pb2_grpc.BeamFnDataStub
data_buffer_time_limit_ms=0 # type: int
):
# type: (...) -> None
super(GrpcClientDataChannel, self).__init__(data_buffer_time_limit_ms)
self.set_inputs(data_stub.Data(self._write_outputs()))
class BeamFnDataServicer(beam_fn_api_pb2_grpc.BeamFnDataServicer):
"""Implementation of BeamFnDataServicer for any number of clients"""
def __init__(
self,
data_buffer_time_limit_ms=0 # type: int
):
self._lock = threading.Lock()
self._connections_by_worker_id = collections.defaultdict(
lambda: _GrpcDataChannel(data_buffer_time_limit_ms)
) # type: DefaultDict[str, _GrpcDataChannel]
def get_conn_by_worker_id(self, worker_id):
# type: (str) -> _GrpcDataChannel
with self._lock:
return self._connections_by_worker_id[worker_id]
def Data(self,
elements_iterator, # type: Iterable[beam_fn_api_pb2.Elements]
context # type: Any
):
# type: (...) -> Iterator[beam_fn_api_pb2.Elements]
worker_id = dict(context.invocation_metadata())['worker_id']
data_conn = self.get_conn_by_worker_id(worker_id)
data_conn.set_inputs(elements_iterator)
for elements in data_conn._write_outputs():
yield elements
class DataChannelFactory(metaclass=abc.ABCMeta):
"""An abstract factory for creating ``DataChannel``."""
@abc.abstractmethod
def create_data_channel(self, remote_grpc_port):
# type: (beam_fn_api_pb2.RemoteGrpcPort) -> GrpcClientDataChannel
"""Returns a ``DataChannel`` from the given RemoteGrpcPort."""
raise NotImplementedError(type(self))
@abc.abstractmethod
def create_data_channel_from_url(self, url):
# type: (str) -> Optional[GrpcClientDataChannel]
"""Returns a ``DataChannel`` from the given url."""
raise NotImplementedError(type(self))
@abc.abstractmethod
def close(self):
# type: () -> None
"""Close all channels that this factory owns."""
raise NotImplementedError(type(self))
class GrpcClientDataChannelFactory(DataChannelFactory):
"""A factory for ``GrpcClientDataChannel``.
Caches the created channels by ``data descriptor url``.
"""
def __init__(self,
credentials=None, # type: Any
worker_id=None, # type: Optional[str]
data_buffer_time_limit_ms=0 # type: int
):
# type: (...) -> None
self._data_channel_cache = {} # type: Dict[str, GrpcClientDataChannel]
self._lock = threading.Lock()
self._credentials = None
self._worker_id = worker_id
self._data_buffer_time_limit_ms = data_buffer_time_limit_ms
if credentials is not None:
_LOGGER.info('Using secure channel creds.')
self._credentials = credentials
def create_data_channel_from_url(self, url):
# type: (str) -> Optional[GrpcClientDataChannel]
if not url:
return None
if url not in self._data_channel_cache:
with self._lock:
if url not in self._data_channel_cache:
_LOGGER.info('Creating client data channel for %s', url)
# Options to have no limits (-1) on the size of the messages
# received or sent over the data plane. The actual buffer size
# is controlled in a layer above.
channel_options = [("grpc.max_receive_message_length", -1),
("grpc.max_send_message_length", -1)]
grpc_channel = None
if self._credentials is None:
grpc_channel = GRPCChannelFactory.insecure_channel(
url, options=channel_options)
else:
grpc_channel = GRPCChannelFactory.secure_channel(
url, self._credentials, options=channel_options)
# Add workerId to the grpc channel
grpc_channel = grpc.intercept_channel(
grpc_channel, WorkerIdInterceptor(self._worker_id))
self._data_channel_cache[url] = GrpcClientDataChannel(
beam_fn_api_pb2_grpc.BeamFnDataStub(grpc_channel),
self._data_buffer_time_limit_ms)
return self._data_channel_cache[url]
def create_data_channel(self, remote_grpc_port):
# type: (beam_fn_api_pb2.RemoteGrpcPort) -> GrpcClientDataChannel
url = remote_grpc_port.api_service_descriptor.url
# TODO(BEAM-7746): this can return None if url is falsey, but this seems
# incorrect, as code that calls this method seems to always expect
# non-Optional values.
return self.create_data_channel_from_url(url) # type: ignore[return-value]
def close(self):
# type: () -> None
_LOGGER.info('Closing all cached grpc data channels.')
for _, channel in self._data_channel_cache.items():
channel.close()
self._data_channel_cache.clear()
class InMemoryDataChannelFactory(DataChannelFactory):
"""A singleton factory for ``InMemoryDataChannel``."""
def __init__(self, in_memory_data_channel):
# type: (GrpcClientDataChannel) -> None
self._in_memory_data_channel = in_memory_data_channel
def create_data_channel(self, unused_remote_grpc_port):
# type: (beam_fn_api_pb2.RemoteGrpcPort) -> GrpcClientDataChannel
return self._in_memory_data_channel
def create_data_channel_from_url(self, url):
# type: (Any) -> GrpcClientDataChannel
return self._in_memory_data_channel
def close(self):
# type: () -> None
pass
|
|
# coding=utf-8
import hashlib
from raco import compile
from raco.algebra import Store, Select, Apply, Scan, CrossProduct, Sequence, \
ProjectingJoin, UnionAll, Sink, GroupBy, \
Limit, Intersection, Difference, Distinct, OrderBy, EmptyRelation, FileScan
from raco.backends.logical import OptLogicalAlgebra
from raco.backends.myria import MyriaLeftDeepTreeAlgebra
from raco.backends.myria import compile_to_json
from raco.backends.myria.catalog import MyriaCatalog
from raco.expression import UnnamedAttributeRef, NamedAttributeRef, COUNT, \
COUNTALL, SUM, AVG, STDEV, MAX, MIN, PYUDF, StringLiteral
from raco.python import convert
from raco.python.exceptions import PythonConvertException
from raco.relation_key import RelationKey
from raco.scheme import Scheme
from raco.types import STRING_TYPE, BOOLEAN_TYPE
from myria.udf import MyriaPythonFunction, MyriaFunction
def _get_column_index(inputs, aliases, attribute):
"""
Find the column index of for given attribute
:param inputs: Input queries with schema used to match against
:param aliases: An alias for each input for dotted references
:param attribute: The attribute to map to an index
:return: An UnnamedAttributeRef mapped to the given attribute
"""
# $0
if isinstance(attribute, (int, long)):
return UnnamedAttributeRef(attribute)
# alias.attribute
elif isinstance(attribute, basestring) and '.' in attribute:
assert aliases
alias, attribute = map(str.strip, attribute.split('.'))
index = aliases.index(alias)
# ProjectingJoin will not accept a NamedAttributeRef :(
return UnnamedAttributeRef(
sum(len(q.query.scheme()) for q in inputs[:index]) +
NamedAttributeRef(attribute).get_position(
inputs[index].query.scheme()))
# attribute
elif isinstance(attribute, basestring):
# ProjectingJoin will not accept a NamedAttributeRef :(
return UnnamedAttributeRef(
NamedAttributeRef(attribute).get_position(
sum((q.query.scheme() for q in inputs), Scheme())))
# lambda t1, t2: t1.attribute
elif callable(attribute):
ref = convert(attribute, [q.query.scheme() for q in inputs])
schema = sum((q.query.scheme() for q in inputs), Scheme())
return UnnamedAttributeRef(ref.get_position(schema))
def _unique_name(query):
""" Generate a unique relation name """
return 'result_%s' % hashlib.md5(str(query)).hexdigest()
def _create_udf(source_or_ast_or_callable, schema, connection,
name=None, out_type=None, multivalued=False):
name = name or _unique_name(str(source_or_ast_or_callable))
out_type = out_type or STRING_TYPE
MyriaPythonFunction(source_or_ast_or_callable,
str(out_type),
name,
multivalued,
connection=connection).register()
return PYUDF(
StringLiteral(name),
out_type,
*[StringLiteral(name) for scheme in schema
for name in scheme.get_names()])
class MyriaFluentQuery(object):
def __init__(self, parent, query, connection=None):
"""
Create a new fluent query
:param parent: The parent fluent instance used to create this one
:param query: The query associated with this fluent instance
:param connection: The connection associated with this fluent instance
"""
self.parent = parent
self.query = query
self.connection = connection if connection else parent.connection
self.catalog = MyriaCatalog(self.connection)
self.result = None
self.udfs = [f.to_dict()
for f in MyriaFunction.get_all(self.connection)]
def _scan(self, components):
""" Scan a relation with the given name components """
return Scan(RelationKey(*components),
MyriaCatalog(self.connection).get_scheme(
RelationKey(*components)))
@staticmethod
def _load(url, scheme, data_format='CSV', **kwargs):
return FileScan(url, data_format,
Scheme(zip(scheme.names, scheme.types)),
options=kwargs)
def _store(self, relation):
""" Store the result of a query """
return MyriaFluentQuery(self, Store(
RelationKey(relation if isinstance(relation, basestring)
else relation.name),
self.query))
def _empty(self, schema):
return MyriaFluentQuery(self, EmptyRelation(Scheme(zip(schema.names,
schema.types))))
def _sink(self):
""" Abandon the results of a query """
return MyriaFluentQuery(self, Sink(self.query))
def select(self, *args, **kwargs):
""" Perform a projection over the underlying query """
types = kwargs.pop('types', {})
multivalued = kwargs.pop('multivalued', {})
positional_attributes = (
[(arg, NamedAttributeRef(arg)) if isinstance(arg, basestring)
else ('_' + str(index),
self._convert(arg,
out_type=types.get(index),
multivalued=multivalued.get(index)))
for index, arg in enumerate(args)])
named_attributes = (
[(n, NamedAttributeRef(v)) if isinstance(v, basestring)
else (n, self._convert(v,
out_type=types.get(n),
multivalued=multivalued.get(n)))
for (n, v) in kwargs.items()])
return MyriaFluentQuery(self,
Apply(positional_attributes + named_attributes,
self.query))
def where(self, predicate):
""" Filter the query given a predicate """
return MyriaFluentQuery(self, Select(
self._convert(predicate, out_type=BOOLEAN_TYPE),
self.query))
def product(self, other):
""" Generate the cross product of two queries """
return MyriaFluentQuery(self, CrossProduct(left=self.query,
right=other.query))
def join(self, other, predicate=None, aliases=None, projection=None):
"""
Join two queries
:param other: The query to join on
:param predicate: A predicate used to select tuples in the result
:param aliases: A set of input aliases for attribute selection
:param projection: A set of columns to output from the join result
"""
if not predicate:
return self.product(other)
attributes = [_get_column_index([self, other], aliases, attribute)
for attribute in projection or
xrange(len(self.query.scheme() + other.query.scheme()))]
predicate = self._convert(predicate,
[self.query.scheme(), other.query.scheme()],
out_type=BOOLEAN_TYPE)
return MyriaFluentQuery(
self,
ProjectingJoin(
condition=predicate,
output_columns=attributes,
left=self.query,
right=other.query))
def count(self, attribute=None, groups=None):
""" Count the tuples in the query """
return MyriaFluentQuery(self, GroupBy(
input=self.query,
grouping_list=[_get_column_index([self], [], g)
for g in ((groups or [])
if isinstance(groups or [], list)
else [groups])],
aggregate_list=[COUNT(_get_column_index([self], [], attribute))
if attribute else COUNTALL()]))
def sum(self, attribute, groups=None):
""" Generate the sum of an attribute """
return MyriaFluentQuery(self, GroupBy(
input=self.query,
grouping_list=[_get_column_index([self], [], g)
for g in ((groups or [])
if isinstance(groups or [], list)
else [groups])],
aggregate_list=[SUM(_get_column_index([self], [], attribute))]))
def mean(self, attribute, groups=None):
""" Generate the arithmetic mean of an attribute"""
return MyriaFluentQuery(self, GroupBy(
input=self.query,
grouping_list=[_get_column_index([self], [], g)
for g in ((groups or [])
if isinstance(groups or [], list)
else [groups])],
aggregate_list=[AVG(_get_column_index([self], [], attribute))]))
def average(self, attribute, groups=None):
""" Generate the arithmetic mean of an attribute """
return self.mean(attribute, groups)
def stdev(self, attribute, groups=None):
""" Generate the standard deviation of an attribute """
return MyriaFluentQuery(self, GroupBy(
input=self.query,
grouping_list=[_get_column_index([self], [], g)
for g in ((groups or [])
if isinstance(groups or [], list)
else [groups])],
aggregate_list=[STDEV(_get_column_index([self], [], attribute))]))
def max(self, attribute, groups=None):
""" Generate the maximum value of an attribute """
return MyriaFluentQuery(self, GroupBy(
input=self.query,
grouping_list=[_get_column_index([self], [], g)
for g in ((groups or [])
if isinstance(groups or [], list)
else [groups])],
aggregate_list=[MAX(_get_column_index([self], [], attribute))]))
def min(self, attribute, groups=None):
""" Generate the minimum value of an attribute """
return MyriaFluentQuery(self, GroupBy(
input=self.query,
grouping_list=[_get_column_index([self], [], g)
for g in ((groups or [])
if isinstance(groups or [], list)
else [groups])],
aggregate_list=[MIN(_get_column_index([self], [], attribute))]))
def limit(self, n):
""" Limit the query to n results """
return MyriaFluentQuery(self, Limit(n, self.query))
def intersect(self, other):
""" Generate the intersection of two queries """
return MyriaFluentQuery(self, Intersection(self.query, other))
def distinct(self):
""" Generate the distinct values in a query """
return MyriaFluentQuery(self, Distinct(self.query))
def order(self, attribute, *args, **kwargs):
"""
Order a query by one or more attributes
:param attribute An attribute on which to order
:param args: Other attributes on which to order
:param kwargs: ascending=[True|False]
"""
attributes = [attribute] + list(args)
return MyriaFluentQuery(self, OrderBy(
self.query,
sort_columns=[_get_column_index([self], [], g)
for g in ((attributes or [])
if isinstance(attributes or [], list)
else [attributes])],
ascending=kwargs.get('ascending', True)))
def __add__(self, other):
""" Generate the union of tuples in a query """
return MyriaFluentQuery(self, UnionAll([self.query, other.query]))
def __sub__(self, other):
""" Generate the difference of tuples in a query """
return MyriaFluentQuery(self, Difference(self.query, other.query))
def __str__(self):
return str(self.query)
def __repr__(self):
return repr(self.query)
def to_dict(self):
return self.execute().to_dict()
def to_dataframe(self, index=None):
return self.execute().to_dataframe(index)
def execute(self, relation=None):
"""
Execute a query
:param relation: The name of a relation in which the result is stored
:return: A MyriaQuery instance that represents the executing query
"""
from myria.query import MyriaQuery
if not self.result:
json = self._store(relation or _unique_name(self.query)).to_json()
self.result = MyriaQuery.submit_plan(json, self.connection)
return self.result
def sink(self):
""" Execute the query but ignore its results """
from myria.query import MyriaQuery
return MyriaQuery.submit_plan(self._sink().to_json(), self.connection)
def to_json(self):
""" Convert this query into an optimized JSON plan """
# TODO deep copy, since optimize mutates
sequence = Sequence([self.query])
optimized = compile.optimize(sequence, OptLogicalAlgebra())
myria = compile.optimize(optimized, MyriaLeftDeepTreeAlgebra())
return compile_to_json(str(self.query), optimized, myria)
def _convert(self, source_or_ast_or_callable,
scheme=None, out_type=None, multivalued=False):
scheme = scheme or [self.query.scheme()]
try:
return convert(source_or_ast_or_callable, scheme, udfs=self.udfs)
except PythonConvertException:
udf = _create_udf(source_or_ast_or_callable, scheme,
connection=self.connection,
out_type=out_type,
multivalued=multivalued)
self.udfs.append([udf.name, len(udf.arguments), udf.typ])
return udf
|
|
DOMAIN_REGULAR = 'ibsear.ch/'
DOMAIN_NSFW = 'ibsearch.xxx/'
IMAGES_PATH = 'api/v1/images.json'
import random
import asyncio
import io
from urllib.parse import urlencode, unquote
class NoResults(Exception):
pass
class UnexpectedResponseCode(Exception):
pass
class Image:
def __init__(self, loop, baseurl, **kwargs):
self.format = kwargs.get("format")
self.height = kwargs.get("height")
self.width = kwargs.get("width")
self.id = kwargs.get("id")
self.path = kwargs.get("path")
self.url = "http://" + kwargs.get("server") + "." + baseurl + self.path
self.tags = kwargs.get("tags").split()
self.loop = loop
@asyncio.coroutine
def _async_request(self):
try:
import aiohttp
except ImportError:
raise Exception("Aiohttp has to be installed to use this function.")
else:
with aiohttp.ClientSession(loop=self.loop) as session:
res = yield from session.get(self.url)
try:
if not res.status == 200:
raise UnexpectedResponseCode(res.status, (yield from res.text()))
result = yield from res.read()
finally:
yield from res.release()
return result
def _request(self):
try:
import requests
except ImportError:
print("Requests has to be installed to use this function.")
else:
res = requests.get(self.url)
if not res.status_code == 200:
raise UnexpectedResponseCode(res.status_code, res.text)
result = res.content
return result
def get_image_bytes(self, async_=False):
""" Download the image and return the bytes """
if async_:
im_bytes = yield from self._async_request()
bio = io.BytesIO()
bio.write(im_bytes)
bio.seek(0)
return bio
else:
im_bytes = self._request()
bio = io.BytesIO()
bio.write(im_bytes)
bio.seek(0)
yield bio
def save(self, async_=False, file=None):
file = file or self.path.split("/")[-1]
if async_:
bio = yield from self.get_image_bytes()
else:
bio = next(self.get_image_bytes())
with open(file, "wb") as f:
f.write(bio.read())
if async_:
return file
else:
yield file
class IbSearch:
def __init__(self, api_key, loop=None):
self.api_key = api_key
self.headers = {'X-IbSearch-Key': api_key}
self.session = None
if loop:
self.loop = loop
def build_url(self, url, params):
""" Due to `params` as kwarg being % escaped """
return url + "?" + unquote(urlencode(params))
@asyncio.coroutine
def _async_request(self, url, params=None):
params = params or {}
url = self.build_url(url, params)
try:
import aiohttp
except ImportError:
raise Exception("Aiohttp has to be installed to use this function.")
else:
with aiohttp.ClientSession(loop=self.loop) as session:
res = yield from session.get(url, params=params, headers=self.headers)
try:
if not res.status == 200:
raise UnexpectedResponseCode(res.status, (yield from res.text()))
result = yield from res.json()
finally:
yield from res.release()
return result
def _request(self, url, params=None):
params = params or {}
url = self.build_url(url, params)
try:
import requests
except ImportError:
print("Requests has to be installed to use this function.")
else:
if not res.status_code == 200:
raise UnexpectedResponseCode(res.status_code, res.text)
result = res.json()
return result
@staticmethod
def _build_params(query, limit, page, shuffle, shuffle_limit):
params = {
'q': query.replace(" ","+")
}
if limit:
params['limit'] = limit
if page is not 1:
params['page'] = page
if shuffle:
params['shuffle'] = True
if shuffle_limit:
params['shuffle'] = shuffle_limit
# Because this does not appear to work on IbSearch's end at the time of writing,
# it needs to be done locally (see below).
return params
@asyncio.coroutine
def async_search(self, *args):
yield from self.search(*args, async_=True)
def search(self, query, *, limit=None, page=1, nsfw_allowed=False,
shuffle=False, shuffle_limit=None, async_=False):
if nsfw_allowed:
domain = DOMAIN_NSFW
else:
domain = DOMAIN_REGULAR
params = self._build_params(query, limit, page, shuffle, shuffle_limit)
url = "http://" + domain + IMAGES_PATH
if async_:
result = yield from self._async_request(url, params)
else:
result = self._request(url, params)
if len(result) == 0:
raise NoResults
images = [Image(self.loop, domain, **d) for d in result]
if shuffle:
images = self.shuffle(images, shuffle_limit)
if async_:
return images
else:
yield images
def shuffle(self, images, limit):
random.shuffle(images)
if limit:
try:
images = images[0:limit]
except IndexError:
pass # Just return the shuffled list
return images
def get_random_image(self, query, nsfw_allowed=False, async_=False):
if async_:
return self.async_get_random_image(query, nsfw_allowed=nsfw_allowed)
image_list = self.search(query, limit=100, nsfw_allowed=nsfw_allowed, shuffle=True,
shuffle_limit=1, async_=False)
try:
image = next(image_list)[0]
return image
except IndexError:
# Not supposed to happen but here just in case
raise NoResults
@asyncio.coroutine
def async_get_random_image(self, query, nsfw_allowed=False):
image_list = yield from self.search(query, limit=100, nsfw_allowed=nsfw_allowed,
shuffle=True, shuffle_limit=1, async_=True)
try:
image = image_list[0]
return image
except IndexError:
# Not supposed to happen but here just in case
raise NoResults
|
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import pprint
import sys
import uuid
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from google.cloud import pubsub_v1
from impl.database.database import JsonDatabase
PROJECT_ID = os.environ['GOOGLE_CLOUD_PROJECT']
PUBSUB_SUBSCRIPTION = 'codelab'
PROCUREMENT_API = 'cloudcommerceprocurement'
def _generate_internal_account_id():
### TODO: Replace with whatever ID generation code already exists. ###
return str(uuid.uuid4())
class Procurement(object):
"""Utilities for interacting with the Procurement API."""
def __init__(self, database):
self.service = build(PROCUREMENT_API, 'v1', cache_discovery=False)
self.database = database
##########################
### Account operations ###
##########################
def _get_account_id(self, name):
return name[len('providers/DEMO-{}/accounts/'.format(PROJECT_ID)):]
def _get_account_name(self, account_id):
return 'providers/DEMO-{}/accounts/{}'.format(PROJECT_ID,
account_id)
def get_account(self, account_id):
"""Gets an account from the Procurement Service."""
name = self._get_account_name(account_id)
request = self.service.providers().accounts().get(name=name)
try:
response = request.execute()
return response
except HttpError as err:
if err.resp.status == 404:
return None
def approve_account(self, account_id):
"""Approves the account in the Procurement Service."""
name = self._get_account_name(account_id)
request = self.service.providers().accounts().approve(
name=name, body={'approvalName': 'signup'})
request.execute()
def handle_account_message(self, message):
"""Handles incoming Pub/Sub messages about account resources."""
account_id = message['id']
customer = self.database.read(account_id)
account = self.get_account(account_id)
############################## IMPORTANT ##############################
### In true integrations, Pub/Sub messages for new accounts should ###
### be ignored. Account approvals are granted as a one-off action ###
### during customer sign up. This codelab does not include the sign ###
### up flow, so it chooses to approve accounts here instead. ###
### Production code for real, non-codelab services should never ###
### blindly approve these. The following should be done as a result ###
### of a user signing up. ###
#######################################################################
if account:
approval = None
for account_approval in account['approvals']:
if account_approval['name'] == 'signup':
approval = account_approval
break
if approval:
if approval['state'] == 'PENDING':
# See above note. Actual production integrations should not
# approve blindly when receiving a message.
self.approve_account(account_id)
elif approval['state'] == 'APPROVED':
# Now that it's approved, store a record in the database.
internal_id = _generate_internal_account_id()
customer = {
'procurement_account_id': account_id,
'internal_account_id': internal_id,
'products': {}
}
self.database.write(account_id, customer)
else:
# The account has been deleted, so delete the database record.
if customer:
self.database.delete(account_id)
# Always ack account messages. We only care about the above scenarios.
return True
##############################
### Entitlement operations ###
##############################
def _get_entitlement_name(self, entitlement_id):
return 'providers/DEMO-{}/entitlements/{}'.format(PROJECT_ID,
entitlement_id)
def get_entitlement(self, entitlement_id):
"""Gets an entitlement from the Procurement Service."""
name = self._get_entitlement_name(entitlement_id)
request = self.service.providers().entitlements().get(name=name)
try:
response = request.execute()
return response
except HttpError as err:
if err.resp.status == 404:
return None
def approve_entitlement(self, entitlement_id):
"""Approves the entitlement in the Procurement Service."""
name = self._get_entitlement_name(entitlement_id)
request = self.service.providers().entitlements().approve(
name=name, body={})
request.execute()
def approve_entitlement_plan_change(self, entitlement_id, new_pending_plan):
"""Approves the entitlement plan change in the Procurement Service."""
name = self._get_entitlement_name(entitlement_id)
body = {'pendingPlanName': new_pending_plan}
request = self.service.providers().entitlements().approvePlanChange(
name=name, body=body)
request.execute()
def handle_active_entitlement(self, entitlement, customer, account_id):
"""Updates the database to match the active entitlement."""
product = {
'product_id': entitlement['product'],
'plan_id': entitlement['plan'],
'start_time': entitlement['createTime'],
}
if 'usageReportingId' in entitlement:
product['consumer_id'] = entitlement['usageReportingId']
customer['products'][entitlement['product']] = product
### TODO: Set up the service for the customer to use. ###
self.database.write(account_id, customer)
def handle_entitlement_message(self, message, event_type):
"""Handles incoming Pub/Sub messages about entitlement resources."""
entitlement_id = message['id']
entitlement = self.get_entitlement(entitlement_id)
if not entitlement:
# Do nothing. The entitlement has to be canceled to be deleted, so
# this has already been handled by a cancellation message.
return True
account_id = self._get_account_id(entitlement['account'])
customer = self.database.read(account_id)
state = entitlement['state']
if not customer:
# If the record for this customer does not exist, don't ack the
# message and wait until an account message is handled and a record
# is created.
return False
if event_type == 'ENTITLEMENT_CREATION_REQUESTED':
if state == 'ENTITLEMENT_ACTIVATION_REQUESTED':
# Approve the entitlement and wait for another message for when
# it becomes active before setting up the service for the
# customer and updating our records.
self.approve_entitlement(entitlement_id)
return True
elif event_type == 'ENTITLEMENT_ACTIVE':
if state == 'ENTITLEMENT_ACTIVE':
# Handle an active entitlement by writing to the database.
self.handle_active_entitlement(entitlement, customer,
account_id)
return True
elif event_type == 'ENTITLEMENT_PLAN_CHANGE_REQUESTED':
if state == 'ENTITLEMENT_PENDING_PLAN_CHANGE_APPROVAL':
# Don't write anything to our database until the entitlement
# becomes active within the Procurement Service.
self.approve_entitlement_plan_change(
entitlement_id, entitlement['newPendingPlan'])
return True
elif event_type == 'ENTITLEMENT_PLAN_CHANGED':
if state == 'ENTITLEMENT_ACTIVE':
# Handle an active entitlement after a plan change.
self.handle_active_entitlement(entitlement, customer,
account_id)
return True
elif event_type == 'ENTITLEMENT_PLAN_CHANGE_CANCELLED':
# Do nothing. We approved the original change, but we never recorded
# it or changed the service level since it hadn't taken effect yet.
return True
elif event_type == 'ENTITLEMENT_CANCELLED':
if state == 'ENTITLEMENT_CANCELLED':
# Clear out our records of the customer's plan.
if entitlement['product'] in customer['products']:
del customer['products'][entitlement['product']]
### TODO: Turn off customer's service. ###
self.database.write(account_id, customer)
return True
elif event_type == 'ENTITLEMENT_PENDING_CANCELLATION':
# Do nothing. We want to cancel once it's truly canceled. For now
# it's just set to not renew at the end of the billing cycle.
return True
elif event_type == 'ENTITLEMENT_CANCELLATION_REVERTED':
# Do nothing. The service was already active, but now it's set to
# renew automatically at the end of the billing cycle.
return True
elif event_type == 'ENTITLEMENT_DELETED':
# Do nothing. The entitlement has to be canceled to be deleted, so
# this has already been handled by a cancellation message.
return True
return False
def main(argv):
"""Main entrypoint to the integration with the Procurement Service."""
if len(argv) != 1:
print('Usage: python -m impl.step_5_entitlement_cancel.app')
return
# Construct a service for the Partner Procurement API.
database = JsonDatabase()
procurement = Procurement(database)
# Get the subscription object in order to perform actions on it.
subscriber = pubsub_v1.SubscriberClient()
subscription_path = subscriber.subscription_path(PROJECT_ID,
PUBSUB_SUBSCRIPTION)
def callback(message):
"""Callback for handling Cloud Pub/Sub messages."""
payload = json.loads(message.data)
print('Received message:')
pprint.pprint(payload)
print()
ack = False
if 'entitlement' in payload:
ack = procurement.handle_entitlement_message(payload['entitlement'],
payload['eventType'])
elif 'account' in payload:
ack = procurement.handle_account_message(payload['account'])
else:
# If there's no account or entitlement, then just ack and ignore the
# message. This should never happen.
ack = True
if ack:
message.ack()
subscription = subscriber.subscribe(subscription_path, callback=callback)
print('Listening for messages on {}'.format(subscription_path))
print('Exit with Ctrl-\\')
while True:
try:
subscription.result()
except Exception as exception:
print('Listening for messages on {} threw an Exception: {}.'.format(
subscription_path, exception))
if __name__ == '__main__':
main(sys.argv)
|
|
"""
Tasks for volume operations.
"""
import re
import time
from datetime import datetime
from celery import current_app as app
from celery.result import allow_join_result
from celery.decorators import task
from celery import chain
from threepio import celery_logger
from rtwo.driver import EucaDriver, OSDriver
from rtwo.exceptions import LibcloudDeploymentError
from atmosphere.settings.local import ATMOSPHERE_PRIVATE_KEYFILE
from core.email import send_instance_email
from service.driver import get_driver
from service.deploy import mount_volume, check_volume, mkfs_volume,\
check_mount, umount_volume, lsof_location
from service.exceptions import DeviceBusyException
@task(name="check_volume_task",
max_retries=0,
default_retry_delay=20,
ignore_result=False)
def check_volume_task(driverCls, provider, identity,
instance_id, volume_id, *args, **kwargs):
try:
celery_logger.debug("check_volume task started at %s." % datetime.now())
driver = get_driver(driverCls, provider, identity)
instance = driver.get_instance(instance_id)
volume = driver.get_volume(volume_id)
attach_data = volume.extra['attachments'][0]
device = attach_data['device']
private_key = ATMOSPHERE_PRIVATE_KEYFILE
kwargs.update({'ssh_key': private_key})
kwargs.update({'timeout': 120})
# One script to make two checks:
# 1. Voume exists 2. Volume has a filesystem
cv_script = check_volume(device)
# NOTE: non_zero_deploy needed to stop LibcloudDeploymentError from being
# raised
kwargs.update({'deploy': cv_script,
'non_zero_deploy': True})
driver.deploy_to(instance, **kwargs)
kwargs.pop('non_zero_deploy', None)
# Script execute
if cv_script.exit_status != 0:
if 'No such file' in cv_script.stdout:
raise Exception('Volume check failed: %s. '
'Device %s does not exist on instance %s'
% (volume, device, instance))
elif 'Bad magic number' in cv_script.stdout:
# Filesystem needs to be created for this device
celery_logger.info("Mkfs needed")
mkfs_script = mkfs_volume(device)
kwargs.update({'deploy': mkfs_script})
driver.deploy_to(instance, **kwargs)
else:
raise Exception('Volume check failed: Something weird')
celery_logger.debug("check_volume task finished at %s." % datetime.now())
except LibcloudDeploymentError as exc:
celery_logger.exception(exc)
except Exception as exc:
celery_logger.warn(exc)
check_volume_task.retry(exc=exc)
def _parse_mount_location(mount_output, device_location):
"""
GENERAL ASSUMPTION:
Mount output is ALWAYS the same, and it looks like this:
<DEV_LOCATION> on <MOUNT_LOCATION> type (Disk Specs ...)
By splitting ' on ' AND ' type '
we can always retrieve <MOUNT_LOCATION>
"""
for line in mount_output.split("\n"):
if device_location not in line:
continue
before_text_idx = line.find(" on ") + 4
after_text_idx = line.find(" type ")
if before_text_idx == -1 or after_text_idx == -1:
return ""
return line[before_text_idx:after_text_idx]
@task(name="mount_task",
max_retries=0,
default_retry_delay=20,
ignore_result=False)
def mount_task(driverCls, provider, identity, instance_id, volume_id,
device=None, mount_location=None, *args, **kwargs):
try:
celery_logger.debug("mount task started at %s." % datetime.now())
celery_logger.debug("mount_location: %s" % (mount_location, ))
driver = get_driver(driverCls, provider, identity)
instance = driver.get_instance(instance_id)
volume = driver.get_volume(volume_id)
username = identity.get_username()
# DEV NOTE: Set as 'users' because this is a GUARANTEED group
# and we know our 'user' will exist (if atmo_init_full was executed)
# in case the VM does NOT rely on iPlant LDAP
groupname = "users"
celery_logger.debug(volume)
try:
attach_data = volume.extra['attachments'][0]
if not device:
device = attach_data['device']
except KeyError as IndexError:
celery_logger.warn("Volume %s missing attachments in Extra"
% (volume,))
device = None
if not device:
celery_logger.warn("Device never attached. Nothing to mount")
return None
private_key = "/opt/dev/atmosphere/extras/ssh/id_rsa"
kwargs.update({'ssh_key': private_key})
kwargs.update({'timeout': 120})
# Step 2. Check the volume is not already mounted
cm_script = check_mount()
kwargs.update({'deploy': cm_script})
driver.deploy_to(instance, **kwargs)
if device in cm_script.stdout:
mount_location = _parse_mount_location(cm_script.stdout, device)
if not mount_location:
raise Exception("Device already mounted, "
"but mount location could not be determined!"
"Check _parse_mount_location()!")
celery_logger.warn(
"Device already mounted. Mount output:%s" %
cm_script.stdout)
# Device has already been mounted. Move along..
return mount_location
# Step 3. Find a suitable location to mount the volume
celery_logger.info("Original mount location - %s" % mount_location)
if not mount_location:
inc = 1
while True:
if '/vol%s' % inc in cm_script.stdout:
inc += 1
else:
break
mount_location = '/vol%s' % inc
celery_logger.info("Device location - %s" % device)
celery_logger.info("New mount location - %s" % mount_location)
mv_script = mount_volume(device, mount_location, username, groupname)
kwargs.update({'deploy': mv_script})
driver.deploy_to(instance, **kwargs)
celery_logger.debug("mount task finished at %s." % datetime.now())
return mount_location
except Exception as exc:
celery_logger.warn(exc)
mount_task.retry(exc=exc)
@task(name="umount_task",
max_retries=3,
default_retry_delay=32,
ignore_result=False)
def umount_task(driverCls, provider, identity, instance_id,
volume_id, *args, **kwargs):
try:
celery_logger.debug("umount_task started at %s." % datetime.now())
driver = get_driver(driverCls, provider, identity)
instance = driver.get_instance(instance_id)
volume = driver.get_volume(volume_id)
attach_data = volume.extra['attachments'][0]
device = attach_data['device']
# Check mount to find the mount_location for device
private_key = "/opt/dev/atmosphere/extras/ssh/id_rsa"
kwargs.update({'ssh_key': private_key})
kwargs.update({'timeout': 120})
mount_location = None
cm_script = check_mount()
kwargs.update({'deploy': cm_script})
driver.deploy_to(instance, **kwargs)
regex = re.compile("(?P<device>[\w/]+) on (?P<location>.*) type")
for line in cm_script.stdout.split('\n'):
res = regex.search(line)
if not res:
continue
search_dict = res.groupdict()
dev_found = search_dict['device']
if device == dev_found:
mount_location = search_dict['location']
break
# Volume not mounted, move along..
if not mount_location:
return
um_script = umount_volume(device)
kwargs.update({'deploy': um_script})
driver.deploy_to(instance, **kwargs)
if 'is busy' in um_script.stdout:
# Show all processes that are making device busy..
lsof_script = lsof_location(mount_location)
kwargs.update({'deploy': lsof_script})
driver.deploy_to(instance, **kwargs)
regex = re.compile("(?P<name>[\w]+)\s*(?P<pid>[\d]+)")
offending_processes = []
for line in lsof_script.stdout.split('\n'):
res = regex.search(line)
if not res:
continue
search_dict = res.groupdict()
offending_processes.append(
(search_dict['name'], search_dict['pid']))
raise DeviceBusyException(mount_location, offending_processes)
# Return here if no errors occurred..
celery_logger.debug("umount_task finished at %s." % datetime.now())
except DeviceBusyException:
raise
except Exception as exc:
celery_logger.warn(exc)
umount_task.retry(exc=exc)
@task(name="attach_task",
default_retry_delay=20,
ignore_result=False,
max_retries=1)
def attach_task(driverCls, provider, identity, instance_id, volume_id,
device_choice=None, *args, **kwargs):
try:
celery_logger.debug("attach_task started at %s." % datetime.now())
driver = get_driver(driverCls, provider, identity)
from service.volume import attach_volume # TODO: Test pulling this up -- out of band
attach_volume(driver, instance_id, volume_id, device_choice=device_choice)
# When the reslt returns the volume will be 'attaching'
# We can't do anything until the volume is 'available/in-use'
attempts = 0
while True:
volume = driver.get_volume(volume_id)
# Give up if you can't find the volume
if not volume:
return None
if attempts > 6: # After 6 attempts (~1min)
break
# Openstack Check
if isinstance(driver, OSDriver) and\
'attaching' not in volume.extra.get('status', ''):
break
if isinstance(driver, EucaDriver) and\
'attaching' not in volume.extra.get('status', ''):
break
# Exponential backoff..
attempts += 1
sleep_time = 2**attempts
celery_logger.debug("Volume %s is not ready (%s). Sleep for %s"
% (volume.id, volume.extra.get('status', 'no-status'),
sleep_time))
time.sleep(sleep_time)
if 'available' in volume.extra.get('status', ''):
raise Exception("Volume %s failed to attach to instance %s"
% (volume.id, instance_id))
# Device path for euca == openstack
try:
attach_data = volume.extra['attachments'][0]
device = attach_data['device']
except (IndexError, KeyError) as bad_fetch:
celery_logger.warn("Could not find 'device' in "
"volume.extra['attachments']: "
"Volume:%s Extra:%s" % (volume.id, volume.extra))
device = None
celery_logger.debug("attach_task finished at %s." % datetime.now())
return device
except Exception as exc:
celery_logger.exception(exc)
attach_task.retry(exc=exc)
@task(name="detach_task",
max_retries=1,
default_retry_delay=20,
ignore_result=False)
def detach_task(driverCls, provider, identity,
instance_id, volume_id, *args, **kwargs):
try:
celery_logger.debug("detach_task started at %s." % datetime.now())
driver = get_driver(driverCls, provider, identity)
instance = driver.get_instance(instance_id)
volume = driver.get_volume(volume_id)
driver.detach_volume(volume)
# When the reslt returns the volume will be 'detaching'
# We will ensure the volume does not return to 'in-use'
attempts = 0
while True:
volume = driver.get_volume(volume_id)
if attempts > 6: # After 6 attempts (~1min)
break
# The Openstack way
if isinstance(driver, OSDriver)\
and 'detaching' not in volume.extra['status']:
break
# The Eucalyptus way
attach_data = volume.extra['attachments'][0]
if isinstance(driver, EucaDriver) and attach_data\
and 'detaching' not in attach_data.get('status'):
break
# Exponential backoff..
attempts += 1
sleep_time = 2**attempts
celery_logger.debug("Volume %s is not ready (%s). Sleep for %s"
% (volume.id, volume.extra['status'], sleep_time))
time.sleep(sleep_time)
if 'in-use' in volume.extra['status']:
raise Exception("Failed to detach Volume %s to instance %s"
% (volume, instance))
celery_logger.debug("detach_task finished at %s." % datetime.now())
except DeviceBusyException:
# We should NOT retry if the device is busy
raise
except Exception as exc:
# If the volume is NOT attached, do not retry.
if 'Volume is not attached' in exc.message:
return
celery_logger.exception(exc)
detach_task.retry(exc=exc)
@task(name="update_mount_location", max_retries=2, default_retry_delay=15)
def update_mount_location(new_mount_location,
driverCls, provider, identity,
volume_alias):
"""
"""
from service import volume as volume_service
try:
celery_logger.debug(
"update_mount_location task started at %s." %
datetime.now())
driver = get_driver(driverCls, provider, identity)
volume = driver.get_volume(volume_alias)
if not volume:
return
if not new_mount_location:
return
volume_metadata = volume.extra['metadata']
return volume_service._update_volume_metadata(
driver, volume,
metadata={'mount_location': new_mount_location})
celery_logger.debug(
"update_mount_location task finished at %s." %
datetime.now())
except Exception as exc:
celery_logger.exception(exc)
update_mount_location.retry(exc=exc)
@task(name="update_volume_metadata", max_retries=2, default_retry_delay=15)
def update_volume_metadata(driverCls, provider,
identity, volume_alias,
metadata):
"""
"""
from service import volume as volume_service
try:
celery_logger.debug(
"update_volume_metadata task started at %s." %
datetime.now())
driver = get_driver(driverCls, provider, identity)
volume = driver.get_volume(volume_alias)
if not volume:
return
return volume_service._update_volume_metadata(
driver, volume,
metadata=metadata)
celery_logger.debug("volume_metadata task finished at %s." % datetime.now())
except Exception as exc:
celery_logger.exception(exc)
update_volume_metadata.retry(exc=exc)
# Deploy and Destroy tasks
@task(name="mount_failed")
def mount_failed(task_uuid, driverCls, provider, identity, volume_id,
unmount=False, **celery_task_args):
from service import volume as volume_service
try:
celery_logger.debug("mount_failed task started at %s." % datetime.now())
celery_logger.info("task_uuid=%s" % task_uuid)
result = app.AsyncResult(task_uuid)
with allow_join_result():
exc = result.get(propagate=False)
err_str = "Mount Error Traceback:%s" % (result.traceback,)
celery_logger.error(err_str)
driver = get_driver(driverCls, provider, identity)
volume = driver.get_volume(volume_id)
if unmount:
tmp_status = 'umount_error'
else:
tmp_status = 'mount_error'
return volume_service._update_volume_metadata(
driver, volume,
metadata={'tmp_status': tmp_status})
celery_logger.debug("mount_failed task finished at %s." % datetime.now())
except Exception as exc:
celery_logger.warn(exc)
mount_failed.retry(exc=exc)
|
|
from baseModule import SimpleBlackbox, BaseImplementation, execType
from PyQt5.QtCore import QTimer
from PyQt5.QtWidgets import QWidget, QVBoxLayout, QLineEdit, QCheckBox, QSpacerItem, QSizePolicy
import os
__nodes__ = ["IfThenElse", "FileWatch", "MonitorName", "Compare", "Bool", "String"]
class IfThenElseImplementation(BaseImplementation):
def defineIO(self):
self.registerFunc("exec", self.execute)
def execute(self):
if self.getReturnOfFirstFunction("conditionin"):
self.fireExec("true")
else:
self.fireExec("false")
class IfThenElse(SimpleBlackbox):
author = "DrLuke"
name = "If Then Else"
modulename = "drluke.builtin.ifthenelse"
Category = ["Builtin"]
placeable = True
implementation = IfThenElseImplementation
def defineIO(self):
self.addInput(execType, "exec", "Execute")
self.addInput([], "conditionin", "Condition")
self.addOutput(execType, "true", "True")
self.addOutput(execType, "false", "False")
class FileWatchImplementation(BaseImplementation):
def __init__(self, *args, **kwargs):
super(FileWatchImplementation, self).__init__(*args, **kwargs)
self.changed = False
self.filecontent = ""
def defineIO(self):
self.registerFunc("change", self.changeOut)
self.registerFunc("file", lambda: self.filecontent)
def changeOut(self):
out = self.changed
self.changed = False
return out
def receiveNodedata(self, data):
if "filecontent" in data:
self.filecontent = data["filecontent"]
self.changed = True
class FileWatch(SimpleBlackbox):
author = "DrLuke"
name = "Filewatch"
modulename = "drluke.builtin.filewatch"
Category = ["Builtin"]
placeable = True
implementation = FileWatchImplementation
def __init__(self, *args, **kwargs):
super(FileWatch, self).__init__(*args, **kwargs)
self.filePath = ""
self.lastEdited = 0
self.fileContent = ""
self.propertiesWidget = QWidget()
self.vlayout = QVBoxLayout()
self.lineEdit = QLineEdit()
self.lineEdit.textChanged.connect(self.lineEditTextChanges)
self.vlayout.addWidget(self.lineEdit)
self.vlayout.addItem(QSpacerItem(40, 20, QSizePolicy.Minimum, QSizePolicy.Expanding))
self.propertiesWidget.setLayout(self.vlayout)
self.timer = QTimer()
self.timer.timeout.connect(self.checkFileChange)
self.timer.start(200)
def getPropertiesWidget(self):
return self.propertiesWidget
def lineEditTextChanges(self, text):
self.filePath = text
self.checkFileChange()
def defineIO(self):
self.addOutput(bool, "change", "File Changed")
self.addOutput(str, "file", "File content")
def checkFileChange(self):
if os.path.exists(self.filePath) and os.path.isfile(self.filePath):
if os.path.getmtime(self.filePath) > self.lastEdited:
self.lastEdited = os.path.getmtime(self.filePath)
with open(self.filePath, "r") as f:
self.fileContent = f.read()
self.sendDataToImplementations({"filecontent": self.fileContent})
else:
self.lastEdited = 0
def serialize(self):
return {"filepath": self.filePath}
def deserialize(self, data):
if "filepath" in data:
self.filePath = data["filepath"]
self.checkFileChange()
class MonitorNameImplementation(BaseImplementation):
def defineIO(self):
self.registerFunc("name", lambda: self.runtime.monitorname)
class MonitorName(SimpleBlackbox):
author = "DrLuke"
name = "Monitor Name"
modulename = "drluke.builtin.monitorname"
Category = ["Builtin"]
placeable = True
implementation = MonitorNameImplementation
def defineIO(self):
self.addOutput(str, "name", "Name")
class CompareImplementation(BaseImplementation):
def defineIO(self):
self.registerFunc("out", self.compare)
def compare(self):
in1 = self.getReturnOfFirstFunction("in1")
in2 = self.getReturnOfFirstFunction("in2")
return in1 == in2
class Compare(SimpleBlackbox):
author = "DrLuke"
name = "Compare"
modulename = "drluke.builtin.compare"
Category = ["Builtin"]
placeable = True
implementation = CompareImplementation
def defineIO(self):
self.addInput([], "in1", "In 1")
self.addInput([], "in2", "In 2")
self.addOutput(bool, "out", "Equal")
class BoolImplementation(BaseImplementation):
def init(self):
self.value = True
def defineIO(self):
self.registerFunc("boolout", lambda: self.value)
def receiveNodedata(self, data):
self.value = data
class Bool(SimpleBlackbox):
author = "DrLuke"
name = "Bool"
modulename = "drluke.builtin.bool"
Category = ["Builtin"]
placeable = True
implementation = BoolImplementation
def __init__(self, *args, **kwargs):
super(Bool, self).__init__(*args, **kwargs)
self.propertiesWidget = QWidget()
self.vlayout = QVBoxLayout()
self.toggle = QCheckBox("Output")
self.toggle.toggled.connect(self.toggleTrueFalse)
self.vlayout.addWidget(self.toggle)
self.vlayout.addItem(QSpacerItem(40, 20, QSizePolicy.Minimum, QSizePolicy.Expanding))
self.propertiesWidget.setLayout(self.vlayout)
def toggleTrueFalse(self, bool):
self.sendDataToImplementations(bool)
def getPropertiesWidget(self):
return self.propertiesWidget
def defineIO(self):
self.addOutput(bool, "boolout", "Bool out")
class StringImplementation(BaseImplementation):
def init(self):
self.value = ""
def defineIO(self):
self.registerFunc("strout", lambda: self.value)
def receiveNodedata(self, data):
self.value = data
class String(SimpleBlackbox):
author = "DrLuke"
name = "String"
modulename = "drluke.builtin.string"
Category = ["Builtin"]
placeable = True
implementation = StringImplementation
def __init__(self, *args, **kwargs):
super(String, self).__init__(*args, **kwargs)
self.text = ""
self.propertiesWidget = QWidget()
self.vlayout = QVBoxLayout()
self.lineEdit = QLineEdit()
self.lineEdit.textChanged.connect(self.textChanged)
self.vlayout.addWidget(self.lineEdit)
self.vlayout.addItem(QSpacerItem(40, 20, QSizePolicy.Minimum, QSizePolicy.Expanding))
self.propertiesWidget.setLayout(self.vlayout)
def textChanged(self, text):
self.text = text
self.sendDataToImplementations(text)
def getPropertiesWidget(self):
return self.propertiesWidget
def defineIO(self):
self.addOutput(str, "strout", "String out")
def serialize(self):
return self.text
def deserialize(self, data):
if type(data) is str:
self.text = data
self.lineEdit.setText(self.text)
|
|
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission
from django.test import TestCase
from django.urls import reverse
from wagtail.core.models import Page, Site
from wagtail.tests.utils import WagtailTestUtils
class TestSiteIndexView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
self.home_page = Page.objects.get(id=2)
def get(self, params={}):
return self.client.get(reverse('wagtailsites:index'), params)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailsites/index.html')
def test_pagination(self):
pages = ['0', '1', '-1', '9999', 'Not a page']
for page in pages:
response = self.get({'p': page})
self.assertEqual(response.status_code, 200)
class TestSiteCreateView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
self.home_page = Page.objects.get(id=2)
self.localhost = Site.objects.all()[0]
def get(self, params={}):
return self.client.get(reverse('wagtailsites:add'), params)
def post(self, post_data={}):
return self.client.post(reverse('wagtailsites:add'), post_data)
def create_site(self, hostname='testsite', port=80, is_default_site=False, root_page=None):
root_page = root_page or self.home_page
Site.objects.create(
hostname=hostname,
port=port,
is_default_site=is_default_site,
root_page=root_page)
def test_default_fixtures_present(self):
# we should have loaded with a single site
self.assertEqual(self.localhost.hostname, 'localhost')
self.assertEqual(self.localhost.port, 80)
self.assertEqual(self.localhost.is_default_site, True)
self.assertEqual(self.localhost.root_page, self.home_page)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailsites/create.html')
def test_create(self):
response = self.post({
'hostname': "testsite",
'port': "80",
'root_page': str(self.home_page.id),
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailsites:index'))
# Check that the site was created
self.assertEqual(Site.objects.filter(hostname='testsite').count(), 1)
def test_duplicate_defaults_not_allowed(self):
response = self.post({
'hostname': "also_default",
'port': "80",
'is_default_site': "on",
'root_page': str(self.home_page.id),
})
# Should return the form with errors
self.assertEqual(response.status_code, 200)
self.assertEqual(bool(response.context['form'].errors), True)
# Check that the site was not created
sites = Site.objects.filter(hostname='also_default')
self.assertEqual(sites.count(), 0)
def test_duplicate_hostnames_on_different_ports_allowed(self):
response = self.post({
'hostname': "localhost",
'port': "8000",
'root_page': str(self.home_page.id),
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailsites:index'))
# Check that the site was created
self.assertEqual(Site.objects.filter(hostname='localhost').count(), 2)
def test_duplicate_hostnames_on_same_port_not_allowed(self):
# Confirm there's one localhost already
self.assertEqual(Site.objects.filter(hostname='localhost').count(), 1)
response = self.post({
'hostname': "localhost",
'port': "80",
'root_page': str(self.home_page.id),
})
# Should return the form with errors
self.assertEqual(response.status_code, 200)
self.assertEqual(bool(response.context['form'].errors), True)
# Check that the site was not created, still only one localhost entry
self.assertEqual(Site.objects.filter(hostname='localhost').count(), 1)
class TestSiteEditView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
self.home_page = Page.objects.get(id=2)
self.localhost = Site.objects.all()[0]
def get(self, params={}, site_id=None):
return self.client.get(reverse('wagtailsites:edit', args=(site_id or self.localhost.id, )), params)
def post(self, post_data={}, site_id=None):
site_id = site_id or self.localhost.id
site = Site.objects.get(id=site_id)
post_defaults = {
'hostname': site.hostname,
'port': site.port,
'root_page': site.root_page.id,
}
for k, v in post_defaults.items():
post_data[k] = post_data.get(k, v)
if 'default' in post_data:
if post_data['default']: # only include the is_default_site key if we want to set it
post_data['is_default_site'] = 'on'
elif site.is_default_site:
post_data['is_default_site'] = 'on'
return self.client.post(reverse('wagtailsites:edit', args=(site_id,)), post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailsites/edit.html')
def test_nonexistant_redirect(self):
self.assertEqual(self.get(site_id=100000).status_code, 404)
def test_edit(self):
edited_hostname = 'edited'
response = self.post({
'hostname': edited_hostname,
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailsites:index'))
# Check that the site was edited
self.assertEqual(Site.objects.get(id=self.localhost.id).hostname, edited_hostname)
def test_changing_the_default_site_workflow(self):
# First create a second, non-default, site
second_site = Site.objects.create(
hostname="not_yet_default",
port=80,
is_default_site=False,
root_page=self.home_page)
# Make the original default no longer default
response = self.post(
{
'default': False,
},
site_id=self.localhost.id
)
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailsites:index'))
# Check that the site is no longer default
self.assertEqual(Site.objects.get(id=self.localhost.id).is_default_site, False)
# Now make the second site default
response = self.post(
{
'default': True,
},
site_id=second_site.id
)
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailsites:index'))
# Check that the second site is now set as default
self.assertEqual(Site.objects.get(id=second_site.id).is_default_site, True)
def test_making_a_second_site_the_default_not_allowed(self):
second_site = Site.objects.create(
hostname="also_default",
port=80,
is_default_site=False,
root_page=self.home_page)
response = self.post(
{
'default': True,
},
site_id=second_site.id
)
# Should return the form with errors
self.assertEqual(response.status_code, 200)
self.assertEqual(bool(response.context['form'].errors), True)
# Check that the site was not editd
self.assertEqual(Site.objects.get(id=second_site.id).is_default_site, False)
class TestSiteDeleteView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
self.home_page = Page.objects.get(id=2)
self.localhost = Site.objects.all()[0]
def get(self, params={}, site_id=None):
return self.client.get(reverse('wagtailsites:delete', args=(site_id or self.localhost.id, )), params)
def post(self, post_data={}, site_id=None):
return self.client.post(reverse('wagtailsites:delete', args=(site_id or self.localhost.id, )), post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/generic/confirm_delete.html')
def test_nonexistant_redirect(self):
self.assertEqual(self.get(site_id=100000).status_code, 404)
def test_posting_deletes_site(self):
response = self.post()
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailsites:index'))
# Check that the site was edited
with self.assertRaises(Site.DoesNotExist):
Site.objects.get(id=self.localhost.id)
class TestLimitedPermissions(TestCase, WagtailTestUtils):
def setUp(self):
# Create a user
user = get_user_model().objects.create_user(username='test', email='test@email.com', password='password')
user.user_permissions.add(
Permission.objects.get(codename='access_admin'),
Permission.objects.get(codename='add_site'),
Permission.objects.get(codename='change_site'),
Permission.objects.get(codename='delete_site')
)
# Login
self.assertTrue(self.client.login(username='test', password='password'))
self.home_page = Page.objects.get(id=2)
self.localhost = Site.objects.all()[0]
def test_get_index(self):
response = self.client.get(reverse('wagtailsites:index'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailsites/index.html')
def test_get_create_view(self):
response = self.client.get(reverse('wagtailsites:add'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailsites/create.html')
def test_create(self):
response = self.client.post(reverse('wagtailsites:add'), {
'hostname': "testsite",
'port': "80",
'root_page': str(self.home_page.id),
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailsites:index'))
# Check that the site was created
self.assertEqual(Site.objects.filter(hostname='testsite').count(), 1)
def test_get_edit_view(self):
edit_url = reverse('wagtailsites:edit', args=(self.localhost.id,))
response = self.client.get(edit_url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailsites/edit.html')
def test_edit(self):
edit_url = reverse('wagtailsites:edit', args=(self.localhost.id,))
edited_hostname = 'edited'
response = self.client.post(edit_url, {
'hostname': edited_hostname,
'port': 80,
'root_page': self.home_page.id,
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailsites:index'))
# Check that the site was edited
self.assertEqual(Site.objects.get(id=self.localhost.id).hostname, edited_hostname)
def test_get_delete_view(self):
delete_url = reverse('wagtailsites:delete', args=(self.localhost.id,))
response = self.client.get(delete_url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/generic/confirm_delete.html')
def test_delete(self):
delete_url = reverse('wagtailsites:delete', args=(self.localhost.id,))
response = self.client.post(delete_url)
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailsites:index'))
# Check that the site was edited
with self.assertRaises(Site.DoesNotExist):
Site.objects.get(id=self.localhost.id)
|
|
# coding=utf-8
# Copyright 2019 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""A tensor supporting reduction over irregularly grouped entries.
A segmented tensor is a tensor with a set of indices {0, ..., num_segments - 1}
and an index map that assigns an index to each element of the tensor. Two
elements with the same index are considered grouped together. The set of all
elements with index `k` is called the segment over k.
Segmented tensors support reductions over segments (reduce_mean, reduce_sum,
etc.). A typical example is performing reductions over table cells:
```
# Prepare a tf.Tensor with table values.
values = ...
# Prepare the table indices, either by rows or columns. The shape of `row_ids`
# and `col_ids` has to be a prefix of the shape of `values`.
row_index = segmented_tensor.IndexMap(indices=row_ids, num_segments=max_rows)
col_index = segmented_tensor.IndexMap(indices=col_ids, num_segments=max_cols)
# Combine the indices to get a table indexed by cell. The result has
# `num_segments` equal to row_ids * col_ids.
cell_index = segmented_tensor.ProductIndexMap(row_index, col_index)
# Compute the averages per cell. The result is a `Tensor` with shape
# [max_rows * max_cols, ..] together with an index map on it. The index map is
# equal to range(max_rows * max_cols).
cell_averages, _ = segmented_tensor.reduce_mean(values, cell_index)
# Gather the results to get back a Tensor with the same shape as `cell_index`.
# If there are multiple elements in the same cell they will have the same value.
token_to_its_cell_average = segmented_tensor.gather(cell_averages, cell_index)
```
Batching is supported by setting `batch_dims`. The first `batch_dims` dimensions
will be treated as the batch. Elements of different batches are never grouped
together, not even if they have the same index.
"""
import tensorflow.compat.v1 as tf
class IndexMap(object):
"""Index grouping entries within a tensor."""
def __init__(self, indices, num_segments, batch_dims=0):
"""Creates an index.
Args:
indices: <int32> Tensor of indices, same shape as `values`.
num_segments: <int32> Scalar tensor, the number of segments. All elements
in a batched segmented tensor must have the same number of segments
(although many segments can be empty).
batch_dims: Python integer, the number of batch dimensions. The first
`batch_dims` dimensions of a SegmentedTensor are treated as batch
dimensions. Segments in different batch elements are always distinct
even if they have the same index.
"""
self.indices = tf.convert_to_tensor(indices)
self.num_segments = tf.convert_to_tensor(num_segments)
self.batch_dims = batch_dims
def batch_shape(self):
return tf.shape(self.indices)[:self.batch_dims]
class ProductIndexMap(IndexMap):
"""The product of two indices."""
def __init__(self, outer_index, inner_index):
"""Combines indices i and j into pairs (i, j).
The result is an index where each segment (i, j) is the intersection of
segments i and j. For example if the inputs represent table cells indexed by
respectively rows and columns the output will be a table indexed by
(row, column) pairs, i.e. by cell.
The implementation combines indices {0, .., n - 1} and {0, .., m - 1} into
{0, .., nm - 1}. The output has `num_segments` equal to
`outer_index.num_segements` * `inner_index.num_segments`.
Args:
outer_index: IndexMap.
inner_index: IndexMap, must have the same shape as `outer_index`.
"""
if outer_index.batch_dims != inner_index.batch_dims:
raise ValueError('outer_index.batch_dims and inner_index.batch_dims '
'must be the same.')
super(ProductIndexMap, self).__init__(
indices=(inner_index.indices +
outer_index.indices * inner_index.num_segments),
num_segments=inner_index.num_segments * outer_index.num_segments,
batch_dims=inner_index.batch_dims)
self.outer_index = outer_index
self.inner_index = inner_index
def project_outer(self, index):
"""Projects an index with the same index set onto the outer components."""
return IndexMap(
indices=tf.floor_div(index.indices, self.inner_index.num_segments),
num_segments=self.outer_index.num_segments,
batch_dims=index.batch_dims)
def project_inner(self, index):
"""Projects an index with the same index set onto the inner components."""
return IndexMap(
indices=tf.floormod(index.indices, self.inner_index.num_segments),
num_segments=self.inner_index.num_segments,
batch_dims=index.batch_dims)
def gather(values, index, name='segmented_gather'):
"""Gathers from `values` using the index map.
For each element in the domain of the index map this operation looks up a
value for that index in `values`. Two elements from the same segment always
get assigned the same value.
Args:
values: [B1, ..., Bn, num_segments, V1, ...] Tensor with segment values.
index: [B1, ..., Bn, I1, ..., Ik] IndexMap.
name: Name for the TensorFlow operation.
Returns:
[B1, ..., Bn, I1, ..., Ik, V1, ...] Tensor with the gathered values.
"""
return tf.gather(
values, index.indices, batch_dims=index.batch_dims, name=name)
def flatten(index, name='segmented_flatten'):
"""Flattens a batched index map to a 1d index map.
This operation relabels the segments to keep batch elements distinct. The k-th
batch element will have indices shifted by `num_segments` * (k - 1). The
result is a tensor with `num_segments` multiplied by the number of elements
in the batch.
Args:
index: IndexMap to flatten.
name: Name for the TensorFlow operation.
Returns:
The flattened IndexMap.
"""
with tf.variable_scope(name):
batch_size = tf.reduce_prod(index.batch_shape())
offset = tf.range(batch_size) * index.num_segments
offset = tf.reshape(offset, index.batch_shape())
for _ in range(index.batch_dims, index.indices.shape.rank):
offset = tf.expand_dims(offset, -1)
indices = offset + index.indices
return IndexMap(
indices=tf.reshape(indices, [-1]),
num_segments=index.num_segments * batch_size,
batch_dims=0)
def range_index_map(batch_shape, num_segments, name='range_index_map'):
"""Constructs an index map equal to range(num_segments)."""
with tf.variable_scope(name):
batch_shape = tf.convert_to_tensor(batch_shape)
batch_shape.shape.assert_has_rank(1)
num_segments = tf.convert_to_tensor(num_segments)
num_segments.shape.assert_has_rank(0)
indices = tf.range(num_segments)
shape = tf.concat([
tf.ones_like(batch_shape, dtype=tf.int32),
tf.expand_dims(num_segments, axis=0)
],
axis=0)
indices = tf.reshape(indices, shape)
multiples = tf.concat([batch_shape, [1]], axis=0)
indices = tf.tile(indices, multiples)
return IndexMap(
indices=indices,
num_segments=num_segments,
batch_dims=batch_shape.shape.as_list()[0])
def _segment_reduce(values, index, segment_reduce_fn, name):
"""Applies a segment reduction segment-wise."""
with tf.variable_scope(name):
# Flatten the batch dimensions, as segments ops do not support batching.
# However if `values` has extra dimensions to the right keep them
# unflattened. Segmented ops support vector-valued operations.
flat_index = flatten(index)
vector_shape = tf.shape(values)[index.indices.shape.rank:]
flattened_shape = tf.concat([[-1], vector_shape], axis=0)
flat_values = tf.reshape(values, flattened_shape)
segment_means = segment_reduce_fn(
data=flat_values,
segment_ids=flat_index.indices,
num_segments=flat_index.num_segments)
# Unflatten the values.
new_shape = tf.concat(
[index.batch_shape(), [index.num_segments], vector_shape], axis=0)
output_values = tf.reshape(segment_means, new_shape)
output_index = range_index_map(index.batch_shape(), index.num_segments)
return output_values, output_index
def reduce_mean(values, index, name='segmented_reduce_mean'):
"""Averages a tensor over its segments.
Outputs 0 for empty segments.
This operations computes the mean over segments, with support for:
- Batching using the first dimensions [B1, B2, ..., Bn]. Each element in
a batch can have different indices.
- Vectorization using the last dimension [V1, V2, ...]. If they are present
the output will be a mean of vectors rather than scalars.
Only the middle dimensions [I1, ..., Ik] are reduced by the operation.
Args:
values: [B1, B2, ..., Bn, I1, .., Ik, V1, V2, ..] tensor of values to be
averaged.
index: IndexMap [B1, B2, ..., Bn, I1, .., Ik] index defining the segments.
name: Name for the TensorFlow ops.
Returns:
A pair (output_values, output_index) where `output_values` is a tensor
of shape [B1, B2, ..., Bn, num_segments, V1, V2, ..] and `index` is an
IndexMap with shape [B1, B2, ..., Bn, num_segments].
"""
return _segment_reduce(values, index, tf.math.unsorted_segment_mean, name)
def reduce_sum(values, index, name='segmented_reduce_sum'):
"""Sums a tensor over its segments.
Outputs 0 for empty segments.
This operations computes the sum over segments, with support for:
- Batching using the first dimensions [B1, B2, ..., Bn]. Each element in
a batch can have different indices.
- Vectorization using the last dimension [V1, V2, ...]. If they are present
the output will be a sum of vectors rather than scalars.
Only the middle dimensions [I1, ..., Ik] are reduced by the operation.
Args:
values: [B1, B2, ..., Bn, I1, .., Ik, V1, V2, ..] tensor of values to be
averaged.
index: IndexMap [B1, B2, ..., Bn, I1, .., Ik] index defining the segments.
name: Name for the TensorFlow ops.
Returns:
A pair (output_values, output_index) where `output_values` is a tensor
of shape [B1, B2, ..., Bn, num_segments, V1, V2, ..] and `index` is an
IndexMap with shape [B1, B2, ..., Bn, num_segments].
"""
return _segment_reduce(values, index, tf.math.unsorted_segment_sum, name)
def reduce_max(values, index, name='segmented_reduce_max'):
"""Computes the maximum over segments.
This operations computes the maximum over segments, with support for:
- Batching using the first dimensions [B1, B2, ..., Bn]. Each element in
a batch can have different indices.
- Vectorization using the last dimension [V1, V2, ...]. If they are present
the output will be an element-wise maximum of vectors rather than scalars.
Only the middle dimensions [I1, ..., Ik] are reduced by the operation.
Args:
values: [B1, B2, ..., Bn, I1, .., Ik, V1, V2, ..] tensor of values to be
averaged.
index: IndexMap [B1, B2, ..., Bn, I1, .., Ik] index defining the segments.
name: Name for the TensorFlow ops.
Returns:
A pair (output_values, output_index) where `output_values` is a tensor
of shape [B1, B2, ..., Bn, num_segments, V1, V2, ..] and `index` is an
IndexMap with shape [B1, B2, ..., Bn, num_segments].
"""
return _segment_reduce(values, index, tf.math.unsorted_segment_max, name)
def reduce_min(values, index, name='segmented_reduce_min'):
"""Computes the minimum over segments."""
return _segment_reduce(values, index, tf.math.unsorted_segment_min, name)
|
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.12 (https://github.com/warner/python-versioneer)
# these strings will be replaced by git during git-archive
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
# these strings are filled in when 'setup.py versioneer' creates _version.py
tag_prefix = "v"
parentdir_prefix = "frontera-"
versionfile_source = "frontera/_version.py"
import os, sys, re, subprocess, errno
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % args[0])
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % args[0])
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose=False):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" %
(root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs,"r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
def git_versions_from_keywords(keywords, tag_prefix, verbose=False):
if not keywords:
return {} # keyword-finding function failed to find keywords
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return { "version": r,
"full": keywords["full"].strip() }
# no suitable tags, so we use the full revision id
if verbose:
print("no suitable tags, using full revision id")
return { "version": keywords["full"].strip(),
"full": keywords["full"].strip() }
def git_versions_from_vcs(tag_prefix, root, verbose=False):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
return {}
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
stdout = run_command(GITS, ["describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print("tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def get_versions(default={"version": "unknown", "full": ""}, verbose=False):
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
keywords = { "refnames": git_refnames, "full": git_full }
ver = git_versions_from_keywords(keywords, tag_prefix, verbose)
if ver:
return rep_by_pep440(ver)
try:
root = os.path.abspath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in range(len(versionfile_source.split('/'))):
root = os.path.dirname(root)
except NameError:
return default
return rep_by_pep440(
git_versions_from_vcs(tag_prefix, root, verbose)
or versions_from_parentdir(parentdir_prefix, root, verbose)
or default)
def git2pep440(ver_str):
dash_count = ver_str.count('-')
if dash_count == 0:
return ver_str
elif dash_count == 1:
return ver_str.split('-')[0] + ".post.dev1.pre"
elif dash_count == 2:
tag, commits, _ = ver_str.split('-')
return ".post.dev".join([tag, commits])
elif dash_count == 3:
tag, commits, _, _ = ver_str.split('-')
commits = str(int(commits) + 1)
return ".post.dev".join([tag, commits]) + ".pre"
else:
raise RuntimeError("Invalid version string")
def rep_by_pep440(ver):
if ver["full"]: # only if versions_from_parentdir was not used
ver["version"] = git2pep440(ver["version"])
else:
ver["version"] = ver["version"].split('-')[0]
return ver
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.layers import convolutional
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
# Returns true iff the two initializers produce the same tensor to
# within a tiny tolerance.
def identicaltest(tc, init1, init2, shape=None):
"""Tests if two initializations are identical to within tiny tolerances.
Args:
tc: An instance of TensorFlowTestCase.
init1: An Initializer that generates a tensor of a given shape
init2: An Initializer that generates a tensor of a given shape
shape: Shape of the tensor to initialize or `None` to use a vector of length
100.
Returns:
True or False as determined by test.
"""
if shape is None:
shape = [100]
with tc.test_session(graph=ops.Graph()):
t1 = init1(shape).eval()
with tc.test_session(graph=ops.Graph()):
t2 = init2(shape).eval()
return np.allclose(t1, t2, rtol=1e-15, atol=1e-15)
def duplicated_initializer(tc, init, graph_seed, shape=None):
"""Tests duplicated random initializer within the same graph.
This test generates two random kernels from the same initializer to the same
graph, and checks if the results are close enough. Even given the same global,
seed, two different instances of random kernels should generate different
results.
Args:
tc: An instance of TensorFlowTestCase.
init: An Initializer that generates a tensor of a given shape
graph_seed: A graph-level seed to use.
shape: Shape of the tensor to initialize or `None` to use a vector of length
100.
Returns:
True or False as determined by test.
"""
if shape is None:
shape = [100]
with tc.test_session(graph=ops.Graph()):
random_seed.set_random_seed(graph_seed)
t1 = init(shape).eval()
t2 = init(shape).eval()
return np.allclose(t1, t2, rtol=1e-15, atol=1e-15)
def _init_sampler(tc, init, num):
"""Returns a func to generate a random tensor of shape [num].
Args:
tc: An instance of TensorFlowTestCase.
init: An Initializer that generates a tensor of a given shape
num: Size of 1D tensor to create.
Returns:
Function to generate a random tensor.
"""
def func():
with tc.test_session(use_gpu=True):
return init([num]).eval()
return func
class ConstantInitializersTest(test.TestCase):
def testZerosInitializer(self):
with self.test_session(use_gpu=True):
shape = [2, 3]
x = variable_scope.get_variable(
"x", shape=shape, initializer=init_ops.zeros_initializer())
x.initializer.run()
self.assertAllEqual(x.eval(), np.zeros(shape))
def testOnesInitializer(self):
with self.test_session(use_gpu=True):
shape = [2, 3]
x = variable_scope.get_variable(
"x", shape=shape, initializer=init_ops.ones_initializer())
x.initializer.run()
self.assertAllEqual(x.eval(), np.ones(shape))
def testConstantZeroInitializer(self):
with self.test_session(use_gpu=True):
shape = [2, 3]
x = variable_scope.get_variable(
"x", shape=shape, initializer=init_ops.constant_initializer(0.0))
x.initializer.run()
self.assertAllEqual(x.eval(), np.zeros(shape))
def testConstantOneInitializer(self):
with self.test_session(use_gpu=True):
shape = [2, 3]
x = variable_scope.get_variable(
"x", shape=shape, initializer=init_ops.constant_initializer(1.0))
x.initializer.run()
self.assertAllEqual(x.eval(), np.ones(shape))
def testConstantIntInitializer(self):
with self.test_session(use_gpu=True):
shape = [2, 3]
x = variable_scope.get_variable(
"x",
shape=shape,
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(7))
x.initializer.run()
self.assertEqual(x.dtype.base_dtype, dtypes.int32)
self.assertAllEqual(x.eval(), 7 * np.ones(shape, dtype=np.int32))
def testConstantTupleInitializer(self):
with self.test_session(use_gpu=True):
shape = [3]
x = variable_scope.get_variable(
"x",
shape=shape,
dtype=dtypes.int32,
initializer=init_ops.constant_initializer((10, 20, 30)))
x.initializer.run()
self.assertEqual(x.dtype.base_dtype, dtypes.int32)
self.assertAllEqual(x.eval(), [10, 20, 30])
def _testNDimConstantInitializer(self, name, value, shape, expected):
with self.test_session(use_gpu=True):
init = init_ops.constant_initializer(value, dtype=dtypes.int32)
x = variable_scope.get_variable(name, shape=shape, initializer=init)
x.initializer.run()
actual = array_ops.reshape(x, [-1]).eval()
self.assertEqual(len(actual), len(expected))
for a, e in zip(actual, expected):
self.assertEqual(a, e)
def testNDimConstantInitializer(self):
value = [0, 1, 2, 3, 4, 5]
shape = [2, 3]
expected = list(value)
self._testNDimConstantInitializer("list", value, shape, expected)
self._testNDimConstantInitializer("ndarray",
np.asarray(value), shape, expected)
self._testNDimConstantInitializer("2D-ndarray",
np.asarray(value).reshape(tuple(shape)),
shape, expected)
def _testNDimConstantInitializerLessValues(self, name, value, shape,
expected):
with self.test_session(use_gpu=True):
init = init_ops.constant_initializer(value, dtype=dtypes.int32)
x = variable_scope.get_variable(name, shape=shape, initializer=init)
x.initializer.run()
actual = array_ops.reshape(x, [-1]).eval()
self.assertGreater(len(actual), len(expected))
for i in xrange(len(actual)):
a = actual[i]
e = expected[i] if i < len(expected) else expected[-1]
self.assertEqual(a, e)
def testNDimConstantInitializerLessValues(self):
value = [0, 1, 2, 3, 4, 5]
shape = [2, 4]
expected = list(value)
self._testNDimConstantInitializerLessValues("list", value, shape, expected)
self._testNDimConstantInitializerLessValues("ndarray",
np.asarray(value), shape,
expected)
self._testNDimConstantInitializerLessValues(
"2D-ndarray", np.asarray(value).reshape(tuple([2, 3])), shape, expected)
def _testNDimConstantInitializerMoreValues(self, value, shape):
ops.reset_default_graph()
with self.test_session(use_gpu=True):
init = init_ops.constant_initializer(value, dtype=dtypes.int32)
self.assertRaises(
ValueError,
variable_scope.get_variable,
"x",
shape=shape,
initializer=init)
def testNDimConstantInitializerMoreValues(self):
value = [0, 1, 2, 3, 4, 5, 6, 7]
shape = [2, 3]
self._testNDimConstantInitializerMoreValues(value, shape)
self._testNDimConstantInitializerMoreValues(np.asarray(value), shape)
self._testNDimConstantInitializerMoreValues(
np.asarray(value).reshape(tuple([2, 4])), shape)
def testInvalidValueTypeForConstantInitializerCausesTypeError(self):
c = constant_op.constant([1.0, 2.0, 3.0])
with self.assertRaisesRegexp(
TypeError, r"Invalid type for initial value: .*Tensor.*"):
init_ops.constant_initializer(c, dtype=dtypes.float32)
v = variables.Variable([3.0, 2.0, 1.0])
with self.assertRaisesRegexp(
TypeError, r"Invalid type for initial value: .*Variable.*"):
init_ops.constant_initializer(v, dtype=dtypes.float32)
class RandomNormalInitializationTest(test.TestCase):
def testInitializerIdentical(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.random_normal_initializer(0.0, 1.0, seed=1, dtype=dtype)
init2 = init_ops.random_normal_initializer(0.0, 1.0, seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init1, init2))
def testInitializerDifferent(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.random_normal_initializer(0.0, 1.0, seed=1, dtype=dtype)
init2 = init_ops.random_normal_initializer(0.0, 1.0, seed=2, dtype=dtype)
self.assertFalse(identicaltest(self, init1, init2))
def testDuplicatedInitializer(self):
init = init_ops.random_normal_initializer(0.0, 1.0)
self.assertFalse(duplicated_initializer(self, init, 1))
def testInvalidDataType(self):
self.assertRaises(
ValueError,
init_ops.random_normal_initializer,
0.0,
1.0,
dtype=dtypes.string)
class TruncatedNormalInitializationTest(test.TestCase):
def testInitializerIdentical(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.truncated_normal_initializer(
0.0, 1.0, seed=1, dtype=dtype)
init2 = init_ops.truncated_normal_initializer(
0.0, 1.0, seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init1, init2))
def testInitializerDifferent(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.truncated_normal_initializer(
0.0, 1.0, seed=1, dtype=dtype)
init2 = init_ops.truncated_normal_initializer(
0.0, 1.0, seed=2, dtype=dtype)
self.assertFalse(identicaltest(self, init1, init2))
def testDuplicatedInitializer(self):
init = init_ops.truncated_normal_initializer(0.0, 1.0)
self.assertFalse(duplicated_initializer(self, init, 1))
def testInvalidDataType(self):
self.assertRaises(
ValueError,
init_ops.truncated_normal_initializer,
0.0,
1.0,
dtype=dtypes.string)
class RandomUniformInitializationTest(test.TestCase):
def testInitializerIdentical(self):
for dtype in [dtypes.float32, dtypes.float64, dtypes.int64]:
init1 = init_ops.random_uniform_initializer(0, 7, seed=1, dtype=dtype)
init2 = init_ops.random_uniform_initializer(0, 7, seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init1, init2))
def testInitializerDifferent(self):
for dtype in [dtypes.float32, dtypes.float64, dtypes.int32, dtypes.int64]:
init1 = init_ops.random_uniform_initializer(0, 7, seed=1, dtype=dtype)
init2 = init_ops.random_uniform_initializer(0, 7, seed=2, dtype=dtype)
self.assertFalse(identicaltest(self, init1, init2))
def testDuplicatedInitializer(self):
init = init_ops.random_uniform_initializer(0.0, 1.0)
self.assertFalse(duplicated_initializer(self, init, 1))
class UniformUnitScalingInitializationTest(test.TestCase):
def testInitializerIdentical(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.uniform_unit_scaling_initializer(seed=1, dtype=dtype)
init2 = init_ops.uniform_unit_scaling_initializer(seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init1, init2))
init3 = init_ops.uniform_unit_scaling_initializer(
1.5, seed=1, dtype=dtype)
init4 = init_ops.uniform_unit_scaling_initializer(
1.5, seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init3, init4))
def testInitializerDifferent(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.uniform_unit_scaling_initializer(seed=1, dtype=dtype)
init2 = init_ops.uniform_unit_scaling_initializer(seed=2, dtype=dtype)
init3 = init_ops.uniform_unit_scaling_initializer(
1.5, seed=1, dtype=dtype)
self.assertFalse(identicaltest(self, init1, init2))
self.assertFalse(identicaltest(self, init1, init3))
self.assertFalse(identicaltest(self, init2, init3))
def testZeroSize(self):
shape = [0, 2]
with self.test_session():
x = variable_scope.get_variable(
"x",
shape=shape,
initializer=init_ops.uniform_unit_scaling_initializer())
variables.global_variables_initializer().run()
self.assertAllEqual(shape, x.eval().shape)
def testDuplicatedInitializer(self):
init = init_ops.uniform_unit_scaling_initializer()
self.assertFalse(duplicated_initializer(self, init, 1))
def testInvalidDataType(self):
self.assertRaises(
ValueError,
init_ops.uniform_unit_scaling_initializer,
dtype=dtypes.string)
# TODO(vrv): move to sequence_ops_test?
class RangeTest(test.TestCase):
def _Range(self, start, limit, delta):
with self.test_session(use_gpu=True):
tf_ans = math_ops.range(start, limit, delta, name="range")
self.assertEqual([len(np.arange(start, limit, delta))],
tf_ans.get_shape())
return tf_ans.eval()
def testBasic(self):
self.assertTrue(
np.array_equal(self._Range(0, 5, 1), np.array([0, 1, 2, 3, 4])))
self.assertTrue(np.array_equal(self._Range(0, 5, 2), np.array([0, 2, 4])))
self.assertTrue(np.array_equal(self._Range(0, 6, 2), np.array([0, 2, 4])))
self.assertTrue(
np.array_equal(self._Range(13, 32, 7), np.array([13, 20, 27])))
self.assertTrue(
np.array_equal(
self._Range(100, 500, 100), np.array([100, 200, 300, 400])))
self.assertEqual(math_ops.range(0, 5, 1).dtype, dtypes.int32)
def testLimitOnly(self):
with self.test_session(use_gpu=True):
self.assertAllEqual(np.arange(5), math_ops.range(5).eval())
def testEmpty(self):
for start in 0, 5:
self.assertTrue(np.array_equal(self._Range(start, start, 1), []))
def testNonInteger(self):
self.assertTrue(
np.allclose(self._Range(0, 2, 0.5), np.array([0, 0.5, 1, 1.5])))
self.assertTrue(np.allclose(self._Range(0, 5, 2.5), np.array([0, 2.5])))
self.assertTrue(
np.allclose(self._Range(0, 3, 0.9), np.array([0, 0.9, 1.8, 2.7])))
self.assertTrue(
np.allclose(
self._Range(100., 500., 100.), np.array([100, 200, 300, 400])))
self.assertEqual(math_ops.range(0., 5., 1.).dtype, dtypes.float32)
def testNegativeDelta(self):
self.assertTrue(
np.array_equal(self._Range(5, -1, -1), np.array([5, 4, 3, 2, 1, 0])))
self.assertTrue(
np.allclose(self._Range(2.5, 0, -0.5), np.array([2.5, 2, 1.5, 1, 0.5])))
self.assertTrue(
np.array_equal(self._Range(-5, -10, -3), np.array([-5, -8])))
def testDType(self):
zero_int32 = math_ops.cast(0, dtypes.int32)
zero_int64 = math_ops.cast(0, dtypes.int64)
zero_float32 = math_ops.cast(0, dtypes.float32)
zero_float64 = math_ops.cast(0, dtypes.float64)
self.assertEqual(math_ops.range(zero_int32, 0, 1).dtype, dtypes.int32)
self.assertEqual(math_ops.range(zero_int64, 0, 1).dtype, dtypes.int64)
self.assertEqual(math_ops.range(zero_float32, 0, 1).dtype, dtypes.float32)
self.assertEqual(math_ops.range(zero_float64, 0, 1).dtype, dtypes.float64)
self.assertEqual(
math_ops.range(zero_int32, zero_int64, 1).dtype, dtypes.int64)
self.assertEqual(
math_ops.range(zero_int64, zero_float32, 1).dtype, dtypes.float32)
self.assertEqual(
math_ops.range(zero_float32, zero_float64, 1).dtype, dtypes.float64)
self.assertEqual(
math_ops.range(zero_float64, zero_int32, 1).dtype, dtypes.float64)
self.assertEqual(
math_ops.range(
0, 0, 1, dtype=dtypes.int32).dtype, dtypes.int32)
self.assertEqual(
math_ops.range(
0, 0, 1, dtype=dtypes.int64).dtype, dtypes.int64)
self.assertEqual(
math_ops.range(
0, 0, 1, dtype=dtypes.float32).dtype, dtypes.float32)
self.assertEqual(
math_ops.range(
0, 0, 1, dtype=dtypes.float64).dtype, dtypes.float64)
# TODO(vrv): move to sequence_ops_test?
class LinSpaceTest(test.TestCase):
def _gpu_modes(self):
if test.is_gpu_available():
return [False, True]
else:
return [False]
def _LinSpace(self, start, stop, num):
# NOTE(touts): Needs to pass a graph to get a new session each time.
with ops.Graph().as_default() as graph:
with self.test_session(graph=graph, force_gpu=self.force_gpu):
tf_ans = math_ops.linspace(start, stop, num, name="linspace")
self.assertEqual([num], tf_ans.get_shape())
return tf_ans.eval()
def testPositive(self):
for self.force_gpu in self._gpu_modes():
self.assertArrayNear(self._LinSpace(1., 5., 1), np.array([1.]), 1e-5)
self.assertArrayNear(self._LinSpace(1., 5., 2), np.array([1., 5.]), 1e-5)
self.assertArrayNear(
self._LinSpace(1., 5., 3), np.array([1., 3., 5.]), 1e-5)
self.assertArrayNear(
self._LinSpace(1., 5., 4), np.array([1., 7. / 3., 11. / 3., 5.]),
1e-5)
def testNegative(self):
for self.force_gpu in self._gpu_modes():
self.assertArrayNear(self._LinSpace(-1., -5., 1), np.array([-1.]), 1e-5)
self.assertArrayNear(
self._LinSpace(-1., -5., 2), np.array([-1., -5.]), 1e-5)
self.assertArrayNear(
self._LinSpace(-1., -5., 3), np.array([-1., -3., -5.]), 1e-5)
self.assertArrayNear(
self._LinSpace(-1., -5., 4),
np.array([-1., -7. / 3., -11. / 3., -5.]), 1e-5)
def testNegativeToPositive(self):
for self.force_gpu in self._gpu_modes():
self.assertArrayNear(self._LinSpace(-1., 5., 1), np.array([-1.]), 1e-5)
self.assertArrayNear(
self._LinSpace(-1., 5., 2), np.array([-1., 5.]), 1e-5)
self.assertArrayNear(
self._LinSpace(-1., 5., 3), np.array([-1., 2., 5.]), 1e-5)
self.assertArrayNear(
self._LinSpace(-1., 5., 4), np.array([-1., 1., 3., 5.]), 1e-5)
def testPoint(self):
for self.force_gpu in self._gpu_modes():
self.assertArrayNear(self._LinSpace(5., 5., 1), np.array([5.]), 1e-5)
self.assertArrayNear(self._LinSpace(5., 5., 2), np.array([5.] * 2), 1e-5)
self.assertArrayNear(self._LinSpace(5., 5., 3), np.array([5.] * 3), 1e-5)
self.assertArrayNear(self._LinSpace(5., 5., 4), np.array([5.] * 4), 1e-5)
class DeviceTest(test.TestCase):
def testNoDevice(self):
with ops.Graph().as_default():
var = variables.Variable([[1.0, 1.0]])
self.assertDeviceEqual(None, var.device)
self.assertDeviceEqual(None, var.initializer.device)
def testDevice(self):
with ops.Graph().as_default():
with ops.device("/job:ps"):
var = variables.Variable([[1.0, 1.0]])
self.assertDeviceEqual("/job:ps", var.device)
self.assertDeviceEqual("/job:ps", var.initializer.device)
class OrthogonalInitializerTest(test.TestCase):
def testInitializerIdentical(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.orthogonal_initializer(seed=1, dtype=dtype)
init2 = init_ops.orthogonal_initializer(seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init1, init2, (10, 10)))
def testInitializerDifferent(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.orthogonal_initializer(seed=1, dtype=dtype)
init2 = init_ops.orthogonal_initializer(seed=2, dtype=dtype)
self.assertFalse(identicaltest(self, init1, init2, (10, 10)))
def testDuplicatedInitializer(self):
init = init_ops.orthogonal_initializer()
self.assertFalse(duplicated_initializer(self, init, 1, (10, 10)))
def testInvalidDataType(self):
self.assertRaises(
ValueError, init_ops.orthogonal_initializer, dtype=dtypes.string)
def testInvalidShape(self):
init1 = init_ops.orthogonal_initializer()
with self.test_session(graph=ops.Graph(), use_gpu=True):
self.assertRaises(ValueError, init1, shape=[5])
def testGain(self):
shape = (10, 10)
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.orthogonal_initializer(seed=1, dtype=dtype)
init2 = init_ops.orthogonal_initializer(gain=3.14, seed=1, dtype=dtype)
with self.test_session(graph=ops.Graph(), use_gpu=True):
t1 = init1(shape).eval()
with self.test_session(graph=ops.Graph(), use_gpu=True):
t2 = init2(shape).eval()
return np.allclose(t1, t2 / 3.14, rtol=1e-15, atol=1e-15)
def testShapesValues(self):
for dtype in [dtypes.float32, dtypes.float64]:
for shape in [(10, 10), (10, 9, 8), (100, 5, 5), (50, 40), (40, 50)]:
init = init_ops.orthogonal_initializer(dtype=dtype)
tol = 1e-5 if dtype == dtypes.float32 else 1e-12
with self.test_session(graph=ops.Graph(), use_gpu=True):
# Check the shape
t = init(shape).eval()
self.assertAllEqual(shape, t.shape)
# Check orthogonality by computing the inner product
t = t.reshape((np.prod(t.shape[:-1]), t.shape[-1]))
if t.shape[0] > t.shape[1]:
self.assertAllClose(
np.dot(t.T, t), np.eye(t.shape[1]), rtol=tol, atol=tol)
else:
self.assertAllClose(
np.dot(t, t.T), np.eye(t.shape[0]), rtol=tol, atol=tol)
class ConvolutionDeltaOrthogonalInitializerTest(test.TestCase):
def testInitializerIdentical(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.convolutional_delta_orthogonal(seed=1, dtype=dtype)
init2 = init_ops.convolutional_delta_orthogonal(seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init1, init2, (3, 3, 10, 10)))
def testInitializerDifferent(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.convolutional_delta_orthogonal(seed=1, dtype=dtype)
init2 = init_ops.convolutional_delta_orthogonal(seed=2, dtype=dtype)
self.assertFalse(identicaltest(self, init1, init2, (3, 3, 10, 10)))
def testDuplicatedInitializer(self):
init = init_ops.convolutional_delta_orthogonal()
self.assertFalse(duplicated_initializer(self, init, 1, (3, 3, 10, 10)))
def testInvalidDataType(self):
self.assertRaises(
ValueError, init_ops.convolutional_delta_orthogonal,
dtype=dtypes.string)
def testInvalidShape(self):
init1 = init_ops.convolutional_delta_orthogonal()
with self.test_session(graph=ops.Graph(), use_gpu=True):
self.assertRaises(ValueError, init1, shape=[3, 3, 6, 5])
def testGain(self):
shape = (3, 3, 10, 10)
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.convolutional_delta_orthogonal(seed=1, dtype=dtype)
init2 = init_ops.convolutional_delta_orthogonal(gain=3.14,
seed=1, dtype=dtype)
with self.test_session(graph=ops.Graph(), use_gpu=True):
t1 = init1(shape).eval()
with self.test_session(graph=ops.Graph(), use_gpu=True):
t2 = init2(shape).eval()
return np.allclose(t1, t2 / 3.14, rtol=1e-15, atol=1e-15)
def testShapesValues(self):
for dtype in [dtypes.float32]:
for kernel_size in [[3], [8], [3, 5], [2, 4], [3, 3, 3], [2, 2, 2]]:
tol = 1e-2
# Check orthogonality by computing the 2-norms of the inputs and outputs.
if len(kernel_size) == 1:
shape = [4, 32, 64]
convolution = convolutional.conv1d
elif len(kernel_size) == 2:
convolution = convolutional.conv2d
shape = [4, 32, 32, 64]
else:
shape = [4, 16, 16, 16, 64]
convolution = convolutional.conv3d
inputs = random_ops.random_normal(shape, dtype=dtype)
inputs_2norm = linalg_ops.norm(inputs)
outputs = convolution(
inputs, padding="same", filters=128,
kernel_size=kernel_size, use_bias=False,
kernel_initializer=init_ops.convolutional_delta_orthogonal(
gain=3.14))
outputs_shape = shape[0:-1] + [128]
outputs_2norm = linalg_ops.norm(outputs)
my_ops = variables.global_variables_initializer()
with self.test_session(use_gpu=True) as sess:
sess.run(my_ops)
# Check the shape of the outputs
t = outputs.eval()
self.assertAllEqual(t.shape, outputs_shape)
# Check isometry of the delta-orthogonal kernel.
self.assertAllClose(
sess.run(inputs_2norm)/np.sqrt(np.prod(shape)),
sess.run(outputs_2norm)/(np.sqrt(np.prod(shape))*np.sqrt(3.14)),
rtol=tol, atol=tol)
def testNonuniformity(self):
value = 0
abs_value = 0
shape = [3, 3, 10, 10]
count = 70
tol = 1e-5
with self.test_session(use_gpu=True): # as sess:
for i in range(count):
x = variable_scope.get_variable("{}".format(i), shape=shape,
initializer=
init_ops.convolutional_delta_orthogonal)
x.initializer.run()
y = x.eval()[1, 1, :, :]
determinant = np.linalg.det(y)
value += determinant
abs_value += np.abs(determinant)
# Check there is some variation in the signs of the determinants
self.assertLess(value, count - tol)
self.assertLess(-count + tol, value)
# Check all determinants have absolute value 1
# Compute the sum of the absolute values of 'count' determinants
self.assertAllClose(abs_value, count, rtol=tol, atol=tol)
class IdentityInitializerTest(test.TestCase):
def testInvalidDataType(self):
self.assertRaises(
ValueError, init_ops.orthogonal_initializer, dtype=dtypes.string)
def testInvalidShape(self):
init = init_ops.identity_initializer()
with self.test_session(graph=ops.Graph(), use_gpu=True):
self.assertRaises(ValueError, init, shape=[5, 7, 7])
self.assertRaises(ValueError, init, shape=[5])
self.assertRaises(ValueError, init, shape=[])
def testNonSquare(self):
init = init_ops.identity_initializer()
shape = (10, 5)
with self.test_session(graph=ops.Graph(), use_gpu=True):
self.assertAllClose(init(shape).eval(), np.eye(*shape))
def testGain(self):
shape = (10, 10)
for dtype in [dtypes.float32, dtypes.float64]:
init_default = init_ops.identity_initializer(dtype=dtype)
init_custom = init_ops.identity_initializer(gain=0.9, dtype=dtype)
with self.test_session(graph=ops.Graph(), use_gpu=True):
self.assertAllClose(init_default(shape).eval(), np.eye(*shape))
with self.test_session(graph=ops.Graph(), use_gpu=True):
self.assertAllClose(init_custom(shape).eval(), np.eye(*shape) * 0.9)
def testPartitions(self):
shape = (10, 10)
init = init_ops.identity_initializer()
partitioner = partitioned_variables.variable_axis_size_partitioner(1)
with self.test_session(graph=ops.Graph(), use_gpu=True):
with variable_scope.variable_scope(
"foo", partitioner=partitioner, initializer=init):
v = array_ops.identity(variable_scope.get_variable("bar", shape=shape))
variables.global_variables_initializer().run()
self.assertAllClose(v.eval(), np.eye(*shape))
if __name__ == "__main__":
test.main()
|
|
# General Utility Methods for Algorithms
import random as rand
import numpy as np
import numpy.matlib
import matplotlib.pyplot as pyplot
import matplotlib.colors as plotcolors
# James
def multiply_matrix(matrix_1, matrix_2):
if matrix_1.shape[1] != matrix_2.shape[0]:
return None
result = np.empty((matrix_1.shape[0], matrix_2.shape[1]), dtype=float)
# We can use transpose & dot product library function.
# Dot product of first rows of matrix_1 and matrix_2^t gives us first resulting number.of first row.
# Dot product of first row of matrix_1 and second row of matrix_2^t gives us second resulting number of first row.
matrix_2_t = matrix_2.transpose()
for i in range(matrix_1.shape[0]):
for j in range(matrix_2_t.shape[0]):
result[i, j] = matrix_1[i].dot(matrix_2_t[j])
return result
# Emeke
# works n x m matrices
def multiply_matrix2(matrix_1, matrix_2):
product = np.matlib.empty((matrix_1.shape[0], matrix_2.shape[1]))
for i in range(product.shape[0]):
for j in range(product.shape[1]):
product[i, j] = matrix_1[i, :].dot(matrix_2[:, j])
return product
# Seth
def lu_fact(matrix):
size = matrix.shape[0]
L = np.identity(size, float)
U = np.ndarray.astype(matrix, float)
for row in xrange(1, size):
for col in xrange(0, row):
L[row][col] = U[row][col] / U[col][col]
U[row] -= L[row][col] * U[col]
error = matrix_error(multiply_matrix(L, U), matrix)
return L, U, error
# Seth
def find_determinant(matrix):
size = matrix.shape[0]
if size == 1:
return matrix[0][0]
answer = 0
modifier = 1
for i in xrange(size):
element = matrix[0][i]
newMatrix = np.zeros((size - 1, size - 1))
for row in xrange(1, size):
newCol = 0
for col in xrange(size):
if col != i:
newMatrix[row - 1][newCol] = matrix[row][col]
newCol += 1
answer += element * modifier * find_determinant(newMatrix)
modifier *= -1
return answer
# Seth
def vector_error(array):
if len(array) == 0:
return
answer = np.absolute(array[0])
for i in range(len(array)):
if np.absolute(array[i]) > answer:
answer = np.absolute(array[i])
return answer
# Seth
def getDiag(matrix):
diag = np.copy(matrix)
for i in range(diag.shape[0]):
for j in range(diag.shape[1]):
if i != j:
diag[i][j] = 0
return diag
# Seth
def getLowerDiag(matrix):
lower = np.copy(matrix)
for i in range(lower.shape[0]):
for j in range(lower.shape[1]):
if i < j:
lower[i][j] = 0
return lower
# James
def matrix_trace(matrix):
loop = min(matrix.shape[1], matrix.shape[0])
sum = 0
for i in range(loop):
sum += matrix[i, i]
return sum
# James
def vector_norm(vector):
squared_sum = 0
for i in range(len(vector)):
squared_sum += vector[i] ** 2
return np.sqrt(squared_sum)
# James
# if [ a b c
# d e f
# g h i ] , cut_size = 1
# return [ e f
# h i ] , will return same matrix of cut_size = 0
#
def get_sub_matrix(matrix, cut_size=1):
m, n = matrix.shape
if cut_size <= 0:
return matrix
arr = np.empty((m - cut_size, n - cut_size))
for x in range(cut_size, n):
for y in range(cut_size, m):
arr[y - cut_size, x - cut_size] = matrix[y, x]
return arr
# James
def matrix_error(matrix, original_matrix):
if matrix.shape != original_matrix.shape:
return None
y, x = matrix.shape
error_matrix = matrix - original_matrix
# Allowed built-ins were iffy on this one, so didn't use np.sum(matrix-original_matrix, axis=1)
max = abs(error_matrix[0, 0])
for i in range(y):
for j in range(x):
compared = abs(error_matrix[i, j])
if max < compared:
max = compared
return max
# James
# This beautiful code took 3.5 hours T_T
def matrix_cofactor(matrix):
y, x = matrix.shape
cofactor = np.empty([y, x], dtype=float)
for i in range(y):
flip = 1.0 if (i % 2 == 0) else -1.0
for j in range(x):
sub_matrix = np.delete(np.delete(matrix, j, 1), i, 0)
cofactor[i, j] = flip * find_determinant(sub_matrix)
flip *= -1
return cofactor
# James
def matrix_inverse(matrix):
return 1.0 / find_determinant(matrix) * matrix_cofactor(matrix).T
# Emeke
def matrix_inverse_22(matrix):
det = matrix[0, 0] * matrix[1, 1] - matrix[0, 1] * matrix[1, 0]
matrixB = np.matrix([[matrix[0, 0], - matrix[0, 1]], [-matrix[1, 0], matrix[1, 1]]])
if det == 0:
return None
return (1.0 / det) * matrixB
""" Emeke
Generates 1000 random 2x2 matrices
Create a series of randomly generated matrices with uniformly distributed entries within a given range
shape (tuple(int, int)): Desired shape of matrices.
number (int): Requested number of matrices.
lower (Real): Lower bound for random range.
upper (Real): Upper bound for random range.
"""
def random_matrices(shape, number, lower, upper):
series = tuple()
while len(series) < number:
mat = np.matlib.empty(shape)
for i in range(mat.shape[0]):
for j in range(mat.shape[1]):
mat[i, j] = rand.uniform(lower, upper)
series += (mat,)
return series
# Emeke
def plot_colored(data, colors, color_label, xlabel, ylabel, title, xscale, yscale, cmap, fname):
pyplot.clf()
# Create colormap object if needed
colormap = None if cmap is None else plotcolors.LinearSegmentedColormap.from_list('cplot', cmap)
# Plot data
pyplot.scatter(data[0], data[1], s=40, c=colors, cmap=colormap)
# Create titles and legend, then render
pyplot.colorbar().set_label(color_label)
pyplot.title(title).set_size('xx-large')
pyplot.xlabel(xlabel)
pyplot.ylabel(ylabel)
pyplot.xlim(xscale)
pyplot.ylim(yscale)
pyplot.savefig(fname)
|
|
# -*- coding: utf-8 -*-
import collections
from ecoxipy import _unicode
from ecoxipy import _helpers
from ._common import _string_repr
class NamespaceNameMixin(object):
'''\
Contains functionality implementing `Namespaces in XML
<http://www.w3.org/TR/REC-xml-names/>`_.
'''
_slots = {'_namespace_prefix', '_local_name',
'_v_namespace_uri', '_v_namespace_source'}
def _set_namespace_properties(self, index):
components = _helpers.get_qualified_name_components(self.name)
self._namespace_prefix, self._local_name = components
return components[index]
def _clear_namespace_uri(self):
if hasattr(self, '_v_namespace_uri'):
namespace_source = self._v_namespace_source
if namespace_source is not None:
namespace_source._remove_namespace_target(self)
del self._v_namespace_uri
del self._v_namespace_source
def _clear_namespace_properties(self):
self._clear_namespace_uri()
del self._namespace_prefix
del self._local_name
@property
def namespace_prefix(self):
'''\
The namespace prefix (the part before ``:``) of the node's name.
'''
try:
return self._namespace_prefix
except AttributeError:
return self._set_namespace_properties(0)
@property
def local_name(self):
'''\
The local name (the part after ``:``) of the node's name.
'''
try:
return self._local_name
except AttributeError:
return self._set_namespace_properties(1)
@property
def namespace_uri(self):
'''\
The namespace URI the :attr:`namespace_prefix` refers to. It is
:const:`None` if there is no namespace prefix and it is :const:`False`
if the prefix lookup failed.
'''
try:
return self._v_namespace_uri
except AttributeError:
if self.parent is None:
namespace_source = None
namespace_uri = False
else:
if isinstance(self, Attribute):
if self.namespace_prefix is None:
namespace_source = None
namespace_uri = None
elif self.namespace_prefix == u'xml':
namespace_source = None
namespace_uri = u'http://www.w3.org/XML/1998/namespace'
elif self.namespace_prefix == u'xmlns':
namespace_source = None
namespace_uri = u'http://www.w3.org/2000/xmlns/'
else:
namespace_source = self.parent.parent
else:
namespace_source = self
if namespace_source is not None:
namespace_source, namespace_uri = namespace_source._get_namespace(
self.namespace_prefix)
if namespace_source is not None:
namespace_source._register_namespace_target(self)
self._v_namespace_source = namespace_source
self._v_namespace_uri = namespace_uri
return namespace_uri
class Attribute(NamespaceNameMixin):
'''\
Represents an item of an :class:`Element`'s :class:`Attributes`. It
inherits from :class:`NamespaceNameMixin` and should not be
instantiated on itself, rather use :meth:`Attributes.create_attribute`.
'''
__slots__ = {'_parent', '_name', '_value', '_check_well_formedness',
'_namespace_attribute_prefix'}
def __init__(self, parent, name, value, check_well_formedness):
if check_well_formedness:
_helpers.enforce_valid_xml_name(name)
self._parent = parent
self._namespace_attribute_prefix = False
self._name = name
self._value = value
self._check_well_formedness = check_well_formedness
self._update_namespace_prefix()
def _set_namespace(self, prefix, value):
attributes = self.parent
if len(value) == 0:
value = None
if attributes is not None:
attributes.parent._set_namespace(prefix, value)
def _remove_namespace(self, prefix):
attributes = self.parent
if attributes is not None:
attributes.parent._remove_namespace(prefix)
def _update_namespace_uri(self):
prefix = self._namespace_attribute_prefix
if prefix is not False:
self._set_namespace(prefix, self._value)
def _update_namespace_prefix(self):
name = self._name
if name == u'xmlns':
prefix = None
elif name.startswith(u'xmlns:') and len(name) > 6:
prefix = name[6:]
if self._check_well_formedness:
if prefix == u'xmlns':
raise ecoxipy.XMLWellFormednessException(
u'The namespace prefix "xmlns" must not be redefined.'
)
if prefix == u'xml':
raise ecoxipy.XMLWellFormednessException(
u'The namespace prefix "xml" must not be redefined.')
else:
prefix = False
old_prefix = self._namespace_attribute_prefix
if prefix != old_prefix:
if old_prefix is not False:
self._remove_namespace(old_prefix)
if prefix is not False:
self._set_namespace(prefix, self._value)
self._namespace_attribute_prefix = prefix
@property
def parent(self):
'''\
The parent :class:`Attributes`.
'''
try:
return self._parent
except AttributeError:
return None
@property
def name(self):
'''\
The attribute's name. On setting the value is converted to an
Unicode string, if there is already another attribute with the
same name on the :attr:`parent` :class:`Attributes` instance a
:class:`KeyError` is raised.
'''
return self._name
@name.setter
def name(self, name):
name = _unicode(name)
if name == self._name:
return
if self._check_well_formedness:
_helpers.enforce_valid_xml_name(name)
if name in self._parent._attributes:
raise KeyError(
u'An attribute with name "{}" does already exist in the parent.'.format(
name))
del self._parent._attributes[self._name]
self._parent._attributes[name] = self
self._name = name
self._clear_namespace_properties()
self._update_namespace_prefix()
@property
def value(self):
'''\
The attribute's value.
'''
return self._value
@value.setter
def value(self, value):
value = _unicode(value)
if value == self._value:
return
self._update_namespace_uri()
self._value = value
def __repr__(self):
return 'ecoxipy.pyxom.Attribute({}, {})'.format(
_string_repr(self._name), _string_repr(self._value))
def __eq__(self, other):
return (isinstance(other, Attribute)
and self.name == other.name
and self.value == other.value)
def __ne__(self, other):
return (not(isinstance(other, Attribute))
or self.name != other.name
or self.value != other.value)
def __hash__(self):
return object.__hash__(self)
class Attributes(collections.Mapping):
'''\
This mapping, containing :class:`Attribute` instances identified by their
names, represents attributes of an :class:`Element`. It should not
be instantiated on itself.
'''
__slots__ = {'_parent', '_attributes', '_check_well_formedness'}
__slots__.update(NamespaceNameMixin._slots)
def __init__(self, parent, attributes, check_well_formedness):
self._parent = parent
self._attributes = {}
for name in attributes:
value = attributes[name]
self._attributes[name] = Attribute(self, name, value,
check_well_formedness)
self._check_well_formedness = check_well_formedness
def __len__(self):
return len(self._attributes)
def __iter__(self):
return self._attributes.__iter__()
def __contains__(self, name):
name = _unicode(name)
return name in self._attributes
def __getitem__(self, name):
name = _unicode(name)
return self._attributes[name]
def __delitem__(self, name):
name = _unicode(name)
item = self._attributes[name]
item._clear_namespace_uri()
del self._attributes[name]
del item._parent
def create_attribute(self, name, value):
'''\
Create a new :class:`Attribute` as part of the instance.
:param name: the attribute's name
:param value: the attribute's value
:returns: the created attribute
:rtype: :class:`Attribute`
:raises KeyError: If an attribute with ``name`` already exists in the
instance.
'''
name = _unicode(name)
if name in self._attributes:
raise KeyError(
u'An attribute with name "{}" already exists.'.format(name))
value = _unicode(value)
attribute = Attribute(self, name, value, self._check_well_formedness)
self._attributes[name] = attribute
return attribute
def add(self, attribute):
'''\
Add an attribute to the instance. If the attribute is contained in an
:class:`Attributes` instance it is first removed from that.
:param attribute: the attribute to add
:type attribute: :class:`Attribute`
:raises ValueError: if attribute is no :class:`Attribute` instance
:raises KeyError: If an attribute with the ``attribute``'s name
already exists in the instance.
'''
if not isinstance(attribute, Attribute):
raise ValueError(
'The parameter "attribute" must be an "ecoxipy.pyxom.Attribute" instance.')
if attribute.name in self._attributes:
raise KeyError(
u'An attribute with name "{}" already exists.'.format(name))
parent = attribute.parent
attribute._clear_namespace_uri()
if parent is not None:
parent.remove(attribute)
self._attributes[attribute.name] = attribute
attribute._parent = self
def remove(self, attribute):
'''\
Remove the given ``attribute``.
:param attribute: the attribute to remove
:type attribute: :class:`Attribute`
:raises KeyError: If no attribute with the name of ``attribute``
is contained in the instance.
:raises ValueError: If there is an attribute with the name of
``attribute`` contained, but it is not ``attribute``.
'''
self_attribute = self._attributes[attribute.name]
if self_attribute is not attribute:
raise KeyError(
'The parameter "attribute" must be contained within object.')
del self[attribute.name]
@property
def parent(self):
'''\
The parent :class:`Element`.
'''
return self._parent
def __repr__(self):
return 'ecoxipy.pyxom.Attributes{}'.format(
', '.join([repr(attribute) for attribute in self.values()]))
def to_dict(self):
'''\
Creates a :class:`dict` from the instance's :class:`Attribute`
instances. The keys are the attribute's names, identifying the
attribute's values.
'''
return {
attribute.name: attribute.value
for attribute in self.values()
}
def _attribute_value_mapping(self):
return _AttributeValueMapping(self)
def __hash__(self):
return object.__hash__(self)
class _AttributeValueMapping(collections.Mapping):
__slots__ = {'_attributes'}
def __init__(self, attributes):
self._attributes = attributes
def __getitem__(self, name):
return self._attributes[name].value
def __len__(self):
return len(self._attributes)
def __iter__(self):
for name in self._attributes:
yield name
def items(self):
for name in self._attributes:
yield name, self._attributes[name].value
del collections
|
|
import copy
import datetime
import unittest
from test import libvtd_test
import libvtd.node
from third_party import six
class TestNode(unittest.TestCase):
"""Test various "node" types (Project, Next Action, Comment, etc.)"""
def testParsingDueDates(self):
""" Check that valid due dates get parsed, and invalid ones remain."""
n = libvtd.node.NextAction()
n.AbsorbText('Test VTD <2013-06-31 <2013-06-29 18:59')
# The invalid date June 31 should remain in the text.
self.assertEqual('Test VTD <2013-06-31', n.text)
# The valid datetime should have been parsed as the due date.
self.assertEqual(datetime.datetime(2013, 6, 29, 18, 59), n.due_date)
def testNestingUnderFile(self):
"""Check that any non-File Node can be nested under a File."""
f = libvtd.node.File()
self.assertFalse(f.AddChild(libvtd.node.File()))
self.assertTrue(f.AddChild(libvtd.node.Section()))
self.assertTrue(f.AddChild(libvtd.node.Project()))
self.assertTrue(f.AddChild(libvtd.node.NextAction()))
self.assertTrue(f.AddChild(libvtd.node.Comment()))
def testNestingUnderSection(self):
"""Check that any non-File node can be nested under a Section."""
s = libvtd.node.Section()
# File can never nest under Section.
self.assertFalse(s.AddChild(libvtd.node.File()))
# Section can only be added if it's of a higher level.
self.assertFalse(s.AddChild(libvtd.node.Section(level=s.level)))
self.assertTrue(s.AddChild(libvtd.node.Section(level=s.level + 1)))
# Project, NextAction, or Comment can always be added.
self.assertTrue(s.AddChild(libvtd.node.Project()))
self.assertTrue(s.AddChild(libvtd.node.NextAction()))
self.assertTrue(s.AddChild(libvtd.node.Comment()))
def testNestingUnderProject(self):
"""Check that anything except File or Section can nest under a Project.
Also check that only sufficiently indented blocks can nest.
"""
p = libvtd.node.Project()
# File and Section can never nest under Project.
self.assertFalse(p.AddChild(libvtd.node.File()))
self.assertFalse(p.AddChild(libvtd.node.Section()))
# Project can be added, but only if sufficiently indented.
self.assertFalse(p.AddChild(libvtd.node.Project(indent=p.indent)))
self.assertTrue(p.AddChild(libvtd.node.Project(indent=p.indent + 2)))
# NextAction can be added, but only if sufficiently indented.
self.assertFalse(p.AddChild(
libvtd.node.NextAction(indent=p.indent)))
self.assertTrue(p.AddChild(
libvtd.node.NextAction(indent=p.indent + 2)))
# Comment can be added, but only if sufficiently indented.
self.assertFalse(p.AddChild(libvtd.node.Comment(indent=p.indent)))
self.assertTrue(p.AddChild(libvtd.node.Comment(indent=p.indent + 2)))
def testNestingUnderNextAction(self):
"""Check that only Comment can nest under a NextAction."""
n = libvtd.node.NextAction()
# File and Section can never nest under Project.
self.assertFalse(n.AddChild(libvtd.node.File()))
self.assertFalse(n.AddChild(libvtd.node.Section()))
# Project cannot be added, regardless of indentation.
self.assertFalse(n.AddChild(libvtd.node.Project(indent=n.indent)))
self.assertFalse(n.AddChild(libvtd.node.Project(indent=n.indent + 2)))
# NextAction cannot be added, regardless of indentation.
self.assertFalse(n.AddChild(
libvtd.node.NextAction(indent=n.indent)))
self.assertFalse(n.AddChild(
libvtd.node.NextAction(indent=n.indent + 2)))
# Comment can be added, but only if sufficiently indented.
self.assertFalse(n.AddChild(libvtd.node.Comment(indent=n.indent)))
self.assertTrue(n.AddChild(libvtd.node.Comment(indent=n.indent + 2)))
def testNestingUnderComment(self):
"""Check that only Comment can nest under a Comment."""
c = libvtd.node.Comment()
# File and Section can never nest under Project.
self.assertFalse(c.AddChild(libvtd.node.File()))
self.assertFalse(c.AddChild(libvtd.node.Section()))
# Project cannot be added, regardless of indentation.
self.assertFalse(c.AddChild(libvtd.node.Project(indent=c.indent)))
self.assertFalse(c.AddChild(libvtd.node.Project(indent=c.indent + 2)))
# NextAction cannot be added, regardless of indentation.
self.assertFalse(c.AddChild(
libvtd.node.NextAction(indent=c.indent)))
self.assertFalse(c.AddChild(
libvtd.node.NextAction(indent=c.indent + 2)))
# Comment can be added, but only if sufficiently indented.
self.assertFalse(c.AddChild(libvtd.node.Comment(indent=c.indent)))
self.assertTrue(c.AddChild(libvtd.node.Comment(indent=c.indent + 2)))
def testAtomicAbsorption(self):
"""Failed call to AbsorbText must leave Node in its original state.
"""
action = libvtd.node.File.CreateNodeFromLine(' @ Action')
test_action = copy.deepcopy(action)
# This text should be invalid, because it's less indented than the
# parent text.
self.assertFalse(test_action.AbsorbText('@p:1 @work @t:15 to do'))
self.maxDiff = None
# Kind of ugly, but assertDictEqual fails for _diff_functions because
# the (otherwise identical) functions are bound to different objects.
keys_to_disregard = ['_diff_functions']
for key in keys_to_disregard:
test_action.__dict__.pop(key)
action.__dict__.pop(key)
self.assertDictEqual(test_action.__dict__, action.__dict__)
def testAbsorption(self):
# File should not ever absorb text; its text should only come from the
# file contents.
self.assertFalse(libvtd.node.File(None).AbsorbText('More file text!'))
# Section should absorb text only when new.
section = libvtd.node.Section(3)
self.assertTrue(section.AbsorbText('To do later'))
self.assertEqual('To do later', section.text)
self.assertFalse(section.AbsorbText('extra text'))
# Project can absorb anything indented by enough (and blank lines).
project = libvtd.node.File.CreateNodeFromLine(' # Project which')
self.assertTrue(project.AbsorbText(''))
self.assertFalse(project.AbsorbText(' is NOT indented enough'))
self.assertTrue(project.AbsorbText(' IS indented enough'))
self.assertEqual('Project which\n\nIS indented enough', project.text)
# NextAction can also absorb anything indented by enough.
action = libvtd.node.File.CreateNodeFromLine(' @ NextAction which')
self.assertTrue(action.AbsorbText(''))
self.assertFalse(action.AbsorbText(' is NOT indented enough'))
self.assertTrue(action.AbsorbText(' IS indented enough'))
self.assertEqual('NextAction which\n\nIS indented enough', action.text)
# Comment can also absorb anything indented by enough.
comment = libvtd.node.File.CreateNodeFromLine(' * Comment which')
self.assertTrue(comment.AbsorbText(''))
self.assertFalse(comment.AbsorbText(' is NOT indented enough'))
self.assertTrue(comment.AbsorbText(' IS indented enough'))
self.assertEqual('Comment which\n\nIS indented enough', comment.text)
def testDateStatesNoDates(self):
"""Test DateStates for node with no dates."""
action = libvtd.node.NextAction()
self.assertEqual(libvtd.node.DateStates.ready,
action.DateState(datetime.datetime.now()))
def testDateStatesDefaultReadyDate(self):
"""Test DateStates for node with due date, and implicit ready date."""
action = libvtd.node.NextAction()
self.assertTrue(action.AbsorbText(
'@ test default ready date <2013-08-27'))
# Tasks are 'ready' (i.e., not yet 'due') until the end of the day on
# the ready date. The default ready date is 1 day before the due date.
self.assertEqual(libvtd.node.DateStates.ready,
action.DateState(datetime.datetime(2013, 8, 26, 23)))
# Tasks become due as soon as the ready date ends, and stay due until
# the end of the day on the due date.
self.assertEqual(libvtd.node.DateStates.due,
action.DateState(datetime.datetime(2013, 8, 27, 1)))
self.assertEqual(libvtd.node.DateStates.due,
action.DateState(datetime.datetime(2013, 8, 27, 23)))
# Anything after the due date is late.
self.assertEqual(libvtd.node.DateStates.late,
action.DateState(datetime.datetime(2013, 8, 28, 1)))
def testDateStatesExplicitReadyDate(self):
"""Test DateStates with explicit ready date."""
action = libvtd.node.NextAction()
self.assertTrue(action.AbsorbText(
'@ test explicit ready date <2013-08-27(2)'))
# Tasks stay ready until the end of the day on the ready date.
self.assertEqual(libvtd.node.DateStates.ready,
action.DateState(datetime.datetime(2013, 8, 25, 23)))
# Tasks become due as soon as the ready date begins, and stay due until
# the end of the day on the due date.
self.assertEqual(libvtd.node.DateStates.due,
action.DateState(datetime.datetime(2013, 8, 26, 1)))
self.assertEqual(libvtd.node.DateStates.due,
action.DateState(datetime.datetime(2013, 8, 27, 23)))
# Anything after the due date is late.
self.assertEqual(libvtd.node.DateStates.late,
action.DateState(datetime.datetime(2013, 8, 28, 1)))
def testDateStatesVisibleDate(self):
"""Test DateStates with explicit ready date."""
action = libvtd.node.NextAction()
self.assertTrue(action.AbsorbText('@ test visible date >2013-08-20'))
# Anything before the visible date is invisible.
self.assertEqual(libvtd.node.DateStates.invisible,
action.DateState(datetime.datetime(2013, 8, 19, 23)))
# Tasks become ready as soon as the visible date begins.
self.assertEqual(libvtd.node.DateStates.ready,
action.DateState(datetime.datetime(2013, 8, 20, 1)))
class TestFile(unittest.TestCase):
"""Test the File class."""
def testParseSimpleSection(self):
"""Parse a line corresponding to a section"""
section = libvtd.node.File.CreateNodeFromLine('= A section =')
self.assertEqual('A section', section.text)
self.assertEqual(1, section.level)
def testParseSectionWithAttributes(self):
"""Parse a section with default priority and contexts."""
section = libvtd.node.File.CreateNodeFromLine(
'== @@Home @p:3 relaxing @t:20 ==')
# The time-tag does *not* get filtered out, because that only works for
# NextAction objects.
self.assertEqual('Home relaxing @t:20', section.text)
self.assertEqual(2, section.level)
six.assertCountEqual(self, ['home'], section.contexts)
self.assertEqual(3, section.priority)
def testParseNextAction(self):
action = libvtd.node.File.CreateNodeFromLine(
' @ @p:1 @@Read @t:15 chapter 8 >2013-06-28 13:00 '
'@home <2013-07-05 22:30')
self.assertEqual('NextAction', action.__class__.__name__)
self.assertEqual('Read chapter 8', action.text)
six.assertCountEqual(self, ['read', 'home'], action.contexts)
self.assertEqual(datetime.datetime(2013, 6, 28, 13),
action.visible_date)
self.assertEqual(datetime.datetime(2013, 7, 5, 22, 30),
action.due_date)
self.assertEqual(2, action.indent)
self.assertEqual(1, action.priority)
self.assertEqual(15, action.minutes)
def testFileLineNumbers(self):
with libvtd_test.TempInput([
'= Section =',
'',
'# Project',
' @ Action',
]) as file_name:
file = libvtd.node.File(file_name)
self.assertTupleEqual((file_name, 1), file.Source())
section = file.children[0]
self.assertTupleEqual((file_name, 1), section.Source())
project = section.children[0]
self.assertTupleEqual((file_name, 3), project.Source())
action = project.children[0]
self.assertTupleEqual((file_name, 4), action.Source())
class TestRecurringActions(unittest.TestCase):
"""Test various kinds of recurring actions."""
def testDayRecurSimple(self):
"""Test a simple action which recurs every day."""
recur = libvtd.node.NextAction()
self.assertTrue(recur.AbsorbText("Check today's calendar EVERY day"))
self.assertEqual("Check today's calendar", recur.text)
self.assertTrue(recur.recurring)
self.assertEqual(libvtd.node.DateStates.new,
recur.DateState(datetime.datetime.now()))
# After it's been done at least once, its visible, due, and late dates
# should be set accordingly.
self.assertTrue(recur.AbsorbText(" (LASTDONE 2013-09-01 16:14)"))
self.assertEqual(libvtd.node.DateStates.invisible,
recur.DateState(datetime.datetime(2013, 9, 1, 23)))
self.assertEqual(libvtd.node.DateStates.due,
recur.DateState(datetime.datetime(2013, 9, 2, 1)))
self.assertEqual(libvtd.node.DateStates.due,
recur.DateState(datetime.datetime(2013, 9, 2, 23)))
self.assertEqual(libvtd.node.DateStates.late,
recur.DateState(datetime.datetime(2013, 9, 3, 1)))
def testDayRecurDifferentEndTime(self):
"""Test action which recurs every day, where 'days' begin at 9am."""
recur = libvtd.node.NextAction()
self.assertTrue(recur.AbsorbText(
"Pick out clothes EVERY day [9:00] (LASTDONE 2013-09-01 08:30)"))
self.assertEqual("Pick out clothes", recur.text)
self.assertEqual(libvtd.node.DateStates.invisible,
recur.DateState(datetime.datetime(2013, 9, 1, 8, 59)))
self.assertEqual(libvtd.node.DateStates.due,
recur.DateState(datetime.datetime(2013, 9, 1, 9, 1)))
self.assertEqual(libvtd.node.DateStates.due,
recur.DateState(datetime.datetime(2013, 9, 2, 8, 59)))
self.assertEqual(libvtd.node.DateStates.late,
recur.DateState(datetime.datetime(2013, 9, 2, 9, 1)))
def testDayRecurDifferentStartAndEndTime(self):
"""Test action which recurs every day from 5pm to 9am."""
recur = libvtd.node.NextAction()
self.assertTrue(recur.AbsorbText(
"Pick out clothes EVERY day [17:00 - 9:00] " +
"(LASTDONE 2013-09-01 08:30)"))
self.assertEqual("Pick out clothes", recur.text)
self.assertEqual(libvtd.node.DateStates.invisible,
recur.DateState(datetime.datetime(2013, 9, 1, 16)))
self.assertEqual(libvtd.node.DateStates.due,
recur.DateState(datetime.datetime(2013, 9, 1, 18)))
self.assertEqual(libvtd.node.DateStates.due,
recur.DateState(datetime.datetime(2013, 9, 2, 8)))
self.assertEqual(libvtd.node.DateStates.late,
recur.DateState(datetime.datetime(2013, 9, 2, 10)))
def testDayRecurMultipleDays(self):
"""Test action which occurs every 3 days."""
recur = libvtd.node.NextAction()
self.assertTrue(recur.AbsorbText(
'Shave EVERY 3 days (LASTDONE 2013-09-01 08:30)'))
self.assertEqual('Shave', recur.text)
self.assertEqual(libvtd.node.DateStates.invisible,
recur.DateState(datetime.datetime(2013, 9, 3, 23)))
self.assertEqual(libvtd.node.DateStates.due,
recur.DateState(datetime.datetime(2013, 9, 4, 1)))
self.assertEqual(libvtd.node.DateStates.due,
recur.DateState(datetime.datetime(2013, 9, 4, 23)))
self.assertEqual(libvtd.node.DateStates.late,
recur.DateState(datetime.datetime(2013, 9, 5, 1)))
def testDayRecurRange(self):
"""Test action which recurs within a given range of days."""
recur = libvtd.node.NextAction()
self.assertTrue(recur.AbsorbText(
'Check spare TP in bathrooms EVERY 3-5 days ' +
'(LASTDONE 2013-09-01 08:30)'))
self.assertEqual('Check spare TP in bathrooms', recur.text)
self.assertEqual(libvtd.node.DateStates.invisible,
recur.DateState(datetime.datetime(2013, 9, 3, 23)))
self.assertEqual(libvtd.node.DateStates.ready,
recur.DateState(datetime.datetime(2013, 9, 4, 1)))
self.assertEqual(libvtd.node.DateStates.ready,
recur.DateState(datetime.datetime(2013, 9, 5, 23)))
self.assertEqual(libvtd.node.DateStates.due,
recur.DateState(datetime.datetime(2013, 9, 6, 1)))
self.assertEqual(libvtd.node.DateStates.due,
recur.DateState(datetime.datetime(2013, 9, 6, 23)))
self.assertEqual(libvtd.node.DateStates.late,
recur.DateState(datetime.datetime(2013, 9, 7, 1)))
def testWeekRecurSimple(self):
"""Test a simple action which recurs every week."""
recur = libvtd.node.NextAction()
self.assertTrue(recur.AbsorbText(
'Call parents EVERY week (LASTDONE 2013-09-01 08:30)'))
self.assertEqual('Call parents', recur.text)
self.assertEqual(libvtd.node.DateStates.invisible,
recur.DateState(datetime.datetime(2013, 9, 7, 23)))
self.assertEqual(libvtd.node.DateStates.due,
recur.DateState(datetime.datetime(2013, 9, 8, 1)))
self.assertEqual(libvtd.node.DateStates.due,
recur.DateState(datetime.datetime(2013, 9, 14, 23)))
self.assertEqual(libvtd.node.DateStates.late,
recur.DateState(datetime.datetime(2013, 9, 15, 1)))
def testWeekRecurDueDate(self):
"""Recurs every 1-2 weeks; change boundary so we don't split weekend.
Also checks that the due date falls on the *end* of the day, if no time
is specified.
"""
recur = libvtd.node.NextAction()
self.assertTrue(recur.AbsorbText(
'Call parents EVERY 1-2 weeks[Sun] (LASTDONE 2013-09-03 08:30)'))
self.assertEqual('Call parents', recur.text)
self.assertEqual(libvtd.node.DateStates.invisible,
recur.DateState(datetime.datetime(2013, 9, 8, 23)))
self.assertEqual(libvtd.node.DateStates.ready,
recur.DateState(datetime.datetime(2013, 9, 9, 1)))
self.assertEqual(libvtd.node.DateStates.ready,
recur.DateState(datetime.datetime(2013, 9, 15, 23)))
self.assertEqual(libvtd.node.DateStates.due,
recur.DateState(datetime.datetime(2013, 9, 16, 1)))
self.assertEqual(libvtd.node.DateStates.due,
recur.DateState(datetime.datetime(2013, 9, 22, 23)))
self.assertEqual(libvtd.node.DateStates.late,
recur.DateState(datetime.datetime(2013, 9, 23, 1)))
def testWeekRecurTrickyDateBoundaries(self):
"""Check that visible date is start of day; due date is end of day."""
recur = libvtd.node.NextAction()
self.assertTrue(recur.AbsorbText(
'Call parents EVERY 1-2 weeks[Sat - Sun] ' +
'(LASTDONE 2013-09-03 08:30)'))
self.assertEqual('Call parents', recur.text)
self.assertEqual(libvtd.node.DateStates.invisible,
recur.DateState(datetime.datetime(2013, 9, 6, 23)))
self.assertEqual(libvtd.node.DateStates.ready,
recur.DateState(datetime.datetime(2013, 9, 7, 1)))
self.assertEqual(libvtd.node.DateStates.ready,
recur.DateState(datetime.datetime(2013, 9, 8, 23)))
self.assertEqual(libvtd.node.DateStates.due,
recur.DateState(datetime.datetime(2013, 9, 14, 1)))
self.assertEqual(libvtd.node.DateStates.due,
recur.DateState(datetime.datetime(2013, 9, 15, 23)))
self.assertEqual(libvtd.node.DateStates.late,
recur.DateState(datetime.datetime(2013, 9, 16, 1)))
def testWeekRecurVisibleDate(self):
"""Weekly recurring action with a custom visible date."""
recur = libvtd.node.NextAction()
self.assertTrue(recur.AbsorbText(
'Take out garbages EVERY week [Monday 12:00 - Tuesday 7:00] ' +
'(LASTDONE 2013-09-09 21:30)'))
self.assertEqual('Take out garbages', recur.text)
self.assertEqual(libvtd.node.DateStates.invisible,
recur.DateState(datetime.datetime(2013, 9, 16, 11)))
self.assertEqual(libvtd.node.DateStates.due,
recur.DateState(datetime.datetime(2013, 9, 16, 13)))
self.assertEqual(libvtd.node.DateStates.due,
recur.DateState(datetime.datetime(2013, 9, 17, 6)))
self.assertEqual(libvtd.node.DateStates.late,
recur.DateState(datetime.datetime(2013, 9, 17, 8)))
def testWeekRecurVisibleDateDoneLate(self):
"""
Actions done late (but before vis-date) should count for previous week.
Identical to previous test, except it's done late; this shouldn't
change the due dates at all.
"""
recur = libvtd.node.NextAction()
self.assertTrue(recur.AbsorbText(
'Take out garbages EVERY week [Monday 12:00 - Tuesday 7:00] ' +
'(LASTDONE 2013-09-10 07:30)'))
self.assertEqual('Take out garbages', recur.text)
self.assertEqual(libvtd.node.DateStates.invisible,
recur.DateState(datetime.datetime(2013, 9, 16, 11)))
self.assertEqual(libvtd.node.DateStates.due,
recur.DateState(datetime.datetime(2013, 9, 16, 13)))
self.assertEqual(libvtd.node.DateStates.due,
recur.DateState(datetime.datetime(2013, 9, 17, 6)))
self.assertEqual(libvtd.node.DateStates.late,
recur.DateState(datetime.datetime(2013, 9, 17, 8)))
def testMonthRecurSimple(self):
"""Simple task which recurs every month."""
recur = libvtd.node.NextAction()
self.assertTrue(recur.AbsorbText(
'Scrub toilets EVERY month (LASTDONE 2013-09-10 07:30)'))
self.assertEqual('Scrub toilets', recur.text)
self.assertEqual(libvtd.node.DateStates.invisible,
recur.DateState(datetime.datetime(2013, 9, 30, 23)))
self.assertEqual(libvtd.node.DateStates.due,
recur.DateState(datetime.datetime(2013, 10, 1, 1)))
self.assertEqual(libvtd.node.DateStates.due,
recur.DateState(datetime.datetime(2013, 10, 31, 23)))
self.assertEqual(libvtd.node.DateStates.late,
recur.DateState(datetime.datetime(2013, 11, 1, 1)))
def testMonthRecurTrickyDateBoundaries(self):
"""Check that visible date is start of day; due date is end of day."""
recur = libvtd.node.NextAction()
self.assertTrue(recur.AbsorbText(
'Clean out filing cabinets EVERY 4-6 months [4 - 15] ' +
'(LASTDONE 2013-09-08 21:00)'))
self.assertEqual(libvtd.node.DateStates.invisible,
recur.DateState(datetime.datetime(2013, 1, 3, 23)))
self.assertEqual(libvtd.node.DateStates.ready,
recur.DateState(datetime.datetime(2014, 1, 4, 1)))
self.assertEqual(libvtd.node.DateStates.ready,
recur.DateState(datetime.datetime(2014, 2, 15, 23)))
self.assertEqual(libvtd.node.DateStates.due,
recur.DateState(datetime.datetime(2014, 2, 16, 1)))
self.assertEqual(libvtd.node.DateStates.due,
recur.DateState(datetime.datetime(2014, 3, 15, 23)))
self.assertEqual(libvtd.node.DateStates.late,
recur.DateState(datetime.datetime(2014, 3, 16, 1)))
def testMonthRecurNegativeDates(self):
"""Negative dates go from the end of the month."""
recur = libvtd.node.NextAction()
self.assertTrue(recur.AbsorbText(
'Do budget for new month EVERY month [-3 - 0] ' +
'(LASTDONE 2014-02-08 22:00)'))
self.assertEqual(libvtd.node.DateStates.invisible,
recur.DateState(datetime.datetime(2013, 2, 24, 23)))
self.assertEqual(libvtd.node.DateStates.due,
recur.DateState(datetime.datetime(2014, 2, 25, 1)))
self.assertEqual(libvtd.node.DateStates.due,
recur.DateState(datetime.datetime(2014, 2, 28, 23)))
self.assertEqual(libvtd.node.DateStates.late,
recur.DateState(datetime.datetime(2014, 3, 1, 1)))
def testMonthRecurVisibleDueTimes(self):
"""Monthly recurring tasks with due times/visible times."""
recur = libvtd.node.NextAction()
self.assertTrue(recur.AbsorbText(
'Do budget for new month EVERY month [-3 8:00 - 0 18:00] ' +
'(LASTDONE 2014-02-08 22:00)'))
self.assertEqual(libvtd.node.DateStates.invisible,
recur.DateState(datetime.datetime(2013, 2, 25, 7)))
self.assertEqual(libvtd.node.DateStates.due,
recur.DateState(datetime.datetime(2014, 2, 25, 9)))
self.assertEqual(libvtd.node.DateStates.due,
recur.DateState(datetime.datetime(2014, 2, 28, 17)))
self.assertEqual(libvtd.node.DateStates.late,
recur.DateState(datetime.datetime(2014, 2, 28, 19)))
def testMonthRecurVisibleDateDoneLate(self):
"""Paying the rent late shouldn't skip next month's rent."""
recur = libvtd.node.NextAction()
self.assertTrue(recur.AbsorbText(
'Pay rent EVERY month [7 - 10] (LASTDONE 2013-09-12 22:00)'))
self.assertEqual(libvtd.node.DateStates.invisible,
recur.DateState(datetime.datetime(2013, 10, 6, 23)))
self.assertEqual(libvtd.node.DateStates.due,
recur.DateState(datetime.datetime(2013, 10, 7, 1)))
self.assertEqual(libvtd.node.DateStates.due,
recur.DateState(datetime.datetime(2013, 10, 10, 23)))
self.assertEqual(libvtd.node.DateStates.late,
recur.DateState(datetime.datetime(2013, 10, 11, 1)))
def testMonthlyIsDoneIfLastdoneAtStartOfInterval(self):
"""Guards against chiphogg/vim-vtd#17."""
recur = libvtd.node.NextAction()
self.assertTrue(recur.AbsorbText(
'@ Run EVERY month [1 09:00 - 3] (LASTDONE 2019-09-01 09:00)'))
self.assertEqual(
libvtd.node.DateStates.invisible,
recur.DateState(datetime.datetime(2019, 9, 1, 9, 0)))
self.assertEqual(
libvtd.node.DateStates.invisible,
recur.DateState(datetime.datetime(2019, 10, 1, 8, 59)))
self.assertEqual(
libvtd.node.DateStates.due,
recur.DateState(datetime.datetime(2019, 10, 1, 9, 0)))
def testWeeklyIsDoneIfLastdoneAtStartOfInterval(self):
"""Guards against chiphogg/vim-vtd#17."""
recur = libvtd.node.NextAction()
self.assertTrue(recur.AbsorbText(
'@ Run EVERY week [Fri 09:00 - 17:00] (LASTDONE 2015-01-30 09:00)'))
self.assertEqual(
libvtd.node.DateStates.invisible,
recur.DateState(datetime.datetime(2015, 1, 30, 9, 0)))
self.assertEqual(
libvtd.node.DateStates.invisible,
recur.DateState(datetime.datetime(2015, 2, 6, 8, 59)))
self.assertEqual(
libvtd.node.DateStates.due,
recur.DateState(datetime.datetime(2015, 2, 6, 9, 0)))
def testDailyIsDoneIfLastdoneAtStartOfInterval(self):
"""Guards against chiphogg/vim-vtd#17."""
recur = libvtd.node.NextAction()
self.assertTrue(recur.AbsorbText(
'@ Run EVERY day [09:00 - 17:00] (LASTDONE 2015-01-30 09:00)'))
self.assertEqual(
libvtd.node.DateStates.invisible,
recur.DateState(datetime.datetime(2015, 1, 30, 9, 0)))
self.assertEqual(
libvtd.node.DateStates.invisible,
recur.DateState(datetime.datetime(2015, 1, 31, 8, 59)))
self.assertEqual(
libvtd.node.DateStates.due,
recur.DateState(datetime.datetime(2015, 1, 31, 9, 0)))
|
|
######################################################################
#
# Copyright (C) 2013
# Associated Universities, Inc. Washington DC, USA,
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Library General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
# License for more details.
#
# You should have received a copy of the GNU Library General Public License
# along with this library; if not, write to the Free Software Foundation,
# Inc., 675 Massachusetts Ave, Cambridge, MA 02139, USA.
#
# Correspondence concerning VLA Pipelines should be addressed as follows:
# Please register and submit helpdesk tickets via: https://help.nrao.edu
# Postal address:
# National Radio Astronomy Observatory
# VLA Pipeline Support Office
# PO Box O
# Socorro, NM, USA
#
######################################################################
# EVLA pipeline
# For continuum modes (contiguous spws within a baseband)
# May work for other modes as well
#
# Runs on CASA 4.1.0
# 06/13/12 C. Chandler
# 07/20/12 B. Kent
# 02/05/13 C. Chandler
######################################################################
# Change version and date below with each svn commit. Note changes in the
# .../trunk/doc/CHANGELOG.txt and .../trunk/doc/bugs_features.txt files
version = "1.2.0"
svnrevision = '9744'
date = "2013Mar27"
print "Pipeline version "+version+" for use with CASA 4.1.0"
import sys
[major,minor,revision] = casadef.casa_version.split('.')
casa_version = 100*int(major)+10*int(minor)+int(revision)
if casa_version < 410:
sys.exit("Your CASA version is "+casadef.casa_version+", please re-start using CASA 4.1.0")
# Define location of pipeline
pipepath='/home/ekoch/canfar_scripts/EVLA_pipeline1.2.0/'
#This is the default time-stamped casa log file, in case we
# need to return to it at any point in the script
log_dir='logs'
if not os.path.exists(log_dir):
os.makedirs(log_dir)
maincasalog = casalogger.func_globals['thelogfile']
def logprint(msg, logfileout=maincasalog):
print (msg)
casalog.setlogfile(logfileout)
casalog.post(msg)
casalog.setlogfile(maincasalog)
casalog.post(msg)
return
#Create timing profile list and file if they don't already exist
if 'time_list' not in globals():
time_list = []
timing_file='logs/timing.log'
if not os.path.exists(timing_file):
timelog=open(timing_file,'w')
else:
timelog=open(timing_file,'a')
def runtiming(pipestate, status):
'''Determine profile for a given state/stage of the pipeline
'''
time_list.append({'pipestate':pipestate, 'time':time.time(), 'status':status})
if (status == "end"):
timelog=open(timing_file,'a')
timelog.write(pipestate+': '+str(time_list[-1]['time'] - time_list[-2]['time'])+' sec \n')
timelog.flush()
timelog.close()
#with open(maincasalog, 'a') as casalogfile:
# tempfile = open('logs/'+pipestate+'.log','r')
# casalogfile.write(tempfile.read())
# tempfile.close()
#casalogfile.close()
return time_list
######################################################################
# The following script includes all the definitions and functions and
# prior inputs needed by a run of the pipeline.
time_list=runtiming('startup', 'start')
execfile(pipepath+'EVLA_pipe_startup.py')
time_list=runtiming('startup', 'end')
pipeline_save()
######################################################################
try:
######################################################################
# IMPORT THE DATA TO CASA
execfile(pipepath+'EVLA_pipe_import.py')
######################################################################
# HANNING SMOOTH (OPTIONAL, MAY BE IMPORTANT IF THERE IS NARROWBAND RFI)
execfile(pipepath+'EVLA_pipe_hanning.py')
######################################################################
# GET SOME INFORMATION FROM THE MS THAT WILL BE NEEDED LATER, LIST
# THE DATA, AND MAKE SOME PLOTS
execfile(pipepath+'EVLA_pipe_msinfo.py')
######################################################################
# DETERMINISTIC FLAGGING:
# TIME-BASED: online flags, shadowed data, zeroes, pointing scans, quacking
# CHANNEL-BASED: end 5% of channels of each spw, 10 end channels at
# edges of basebands
execfile(pipepath+'EVLA_pipe_flagall.py')
######################################################################
# PREPARE FOR CALIBRATIONS
# Fill model columns for primary calibrators
execfile(pipepath+'EVLA_pipe_calprep.py')
######################################################################
# PRIOR CALIBRATIONS
# Gain curves, opacities, antenna position corrections,
# requantizer gains (NB: requires CASA 4.1!). Also plots switched
# power tables, but these are not currently used in the calibration
execfile(pipepath+'EVLA_pipe_priorcals.py')
#*********************************************************************
# INITIAL TEST CALIBRATIONS USING BANDPASS AND DELAY CALIBRATORS
execfile(pipepath+'EVLA_pipe_testBPdcals.py')
#*********************************************************************
# IDENTIFY AND FLAG BASEBANDS WITH BAD DEFORMATTERS OR RFI BASED ON
# BP TABLE AMPS
execfile(pipepath+'EVLA_pipe_flag_baddeformatters.py')
#*********************************************************************
# IDENTIFY AND FLAG BASEBANDS WITH BAD DEFORMATTERS OR RFI BASED ON
# BP TABLE PHASES
execfile(pipepath+'EVLA_pipe_flag_baddeformattersphase.py')
######################################################################
# Flag spws that have no calibration at this point
execfile(pipepath+'EVLA_pipe_flag_uncalspws1.py')
#*********************************************************************
# FLAG POSSIBLE RFI ON BP CALIBRATOR USING RFLAG
execfile(pipepath+'EVLA_pipe_checkflag.py')
######################################################################
# DO SEMI-FINAL DELAY AND BANDPASS CALIBRATIONS
# (semi-final because we have not yet determined the spectral index
# of the bandpass calibrator)
execfile(pipepath+'EVLA_pipe_semiFinalBPdcals1.py')
######################################################################
# Use flagdata again on calibrators
execfile(pipepath+'EVLA_pipe_checkflag_semiFinal.py')
######################################################################
# RE-RUN semiFinalBPdcals.py FOLLOWING rflag
execfile(pipepath+'EVLA_pipe_semiFinalBPdcals2.py')
######################################################################
# Flag spws that have no calibration at this point
execfile(pipepath+'EVLA_pipe_flag_uncalspws1b.py')
######################################################################
# DETERMINE SOLINT FOR SCAN-AVERAGE EQUIVALENT
execfile(pipepath+'EVLA_pipe_solint.py')
######################################################################
# DO TEST GAIN CALIBRATIONS TO ESTABLISH SHORT SOLINT
execfile(pipepath+'EVLA_pipe_testgains.py')
#*********************************************************************
# MAKE GAIN TABLE FOR FLUX DENSITY BOOTSTRAPPING
# Make a gain table that includes gain and opacity corrections for final
# amp cal, for flux density bootstrapping
execfile(pipepath+'EVLA_pipe_fluxgains.py')
######################################################################
# FLAG GAIN TABLE PRIOR TO FLUX DENSITY BOOTSTRAPPING
# NB: need to break here to flag the gain table interatively, if
# desired; not included in real-time pipeline
# execfile(pipepath+'EVLA_pipe_fluxflag.py')
#*********************************************************************
# DO THE FLUX DENSITY BOOTSTRAPPING -- fits spectral index of
# calibrators with a power-law and puts fit in model column
execfile(pipepath+'EVLA_pipe_fluxboot.py')
######################################################################
# MAKE FINAL CALIBRATION TABLES
execfile(pipepath+'EVLA_pipe_finalcals.py')
######################################################################
# APPLY ALL CALIBRATIONS AND CHECK CALIBRATED DATA
execfile(pipepath+'EVLA_pipe_applycals.py')
######################################################################
# Flag spws that have no calibration at this point
execfile(pipepath+'EVLA_pipe_flag_uncalspws2.py')
######################################################################
# NOW RUN ALL CALIBRATED DATA (INCLUDING TARGET) THROUGH rflag
execfile(pipepath+'EVLA_pipe_targetflag.py')
######################################################################
# CALCULATE DATA WEIGHTS BASED ON ST. DEV. WITHIN EACH SPW
execfile(pipepath+'EVLA_pipe_statwt.py')
######################################################################
# COLLECT RELEVANT PLOTS AND TABLES
execfile(pipepath+'EVLA_pipe_filecollect.py')
######################################################################
# WRITE WEBLOG
execfile(pipepath+'EVLA_pipe_weblog.py')
######################################################################
# Quit if there have been any exceptions caught:
except KeyboardInterrupt, keyboardException:
logprint ("Keyboard Interrupt: " + str(keyboardException))
except Exception, generalException:
logprint ("Exiting script: " + str(generalException))
|
|
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Some common functionality for beets' test cases."""
from __future__ import division, absolute_import, print_function
import time
import sys
import os
import tempfile
import shutil
import six
import unittest
from contextlib import contextmanager
# Mangle the search path to include the beets sources.
sys.path.insert(0, '..') # noqa
import beets.library
from beets import importer, logging
from beets.ui import commands
from beets import util
import beets
# Make sure the development versions of the plugins are used
import beetsplug
beetsplug.__path__ = [os.path.abspath(
os.path.join(__file__, '..', '..', 'beetsplug')
)]
# Test resources path.
RSRC = util.bytestring_path(os.path.join(os.path.dirname(__file__), 'rsrc'))
PLUGINPATH = os.path.join(os.path.dirname(__file__), 'rsrc', 'beetsplug')
# Propagate to root loger so nosetest can capture it
log = logging.getLogger('beets')
log.propagate = True
log.setLevel(logging.DEBUG)
# Dummy item creation.
_item_ident = 0
# OS feature test.
HAVE_SYMLINK = hasattr(os, 'symlink')
def item(lib=None):
global _item_ident
_item_ident += 1
i = beets.library.Item(
title=u'the title',
artist=u'the artist',
albumartist=u'the album artist',
album=u'the album',
genre=u'the genre',
composer=u'the composer',
grouping=u'the grouping',
year=1,
month=2,
day=3,
track=4,
tracktotal=5,
disc=6,
disctotal=7,
lyrics=u'the lyrics',
comments=u'the comments',
bpm=8,
comp=True,
path='somepath{0}'.format(_item_ident),
length=60.0,
bitrate=128000,
format='FLAC',
mb_trackid='someID-1',
mb_albumid='someID-2',
mb_artistid='someID-3',
mb_albumartistid='someID-4',
album_id=None,
)
if lib:
lib.add(i)
return i
_album_ident = 0
def album(lib=None):
global _item_ident
_item_ident += 1
i = beets.library.Album(
artpath=None,
albumartist=u'some album artist',
albumartist_sort=u'some sort album artist',
albumartist_credit=u'some album artist credit',
album=u'the album',
genre=u'the genre',
year=2014,
month=2,
day=5,
tracktotal=0,
disctotal=1,
comp=False,
mb_albumid='someID-1',
mb_albumartistid='someID-1'
)
if lib:
lib.add(i)
return i
# Dummy import session.
def import_session(lib=None, loghandler=None, paths=[], query=[], cli=False):
cls = commands.TerminalImportSession if cli else importer.ImportSession
return cls(lib, loghandler, paths, query)
class Assertions(object):
"""A mixin with additional unit test assertions."""
def assertExists(self, path): # noqa
self.assertTrue(os.path.exists(util.syspath(path)),
u'file does not exist: {!r}'.format(path))
def assertNotExists(self, path): # noqa
self.assertFalse(os.path.exists(util.syspath(path)),
u'file exists: {!r}'.format((path)))
def assert_equal_path(self, a, b):
"""Check that two paths are equal."""
# The common case.
if a == b:
return
self.assertEqual(util.normpath(a), util.normpath(b),
u'paths are not equal: {!r} and {!r}'.format(a, b))
# A test harness for all beets tests.
# Provides temporary, isolated configuration.
class TestCase(unittest.TestCase, Assertions):
"""A unittest.TestCase subclass that saves and restores beets'
global configuration. This allows tests to make temporary
modifications that will then be automatically removed when the test
completes. Also provides some additional assertion methods, a
temporary directory, and a DummyIO.
"""
def setUp(self):
# A "clean" source list including only the defaults.
beets.config.sources = []
beets.config.read(user=False, defaults=True)
# Direct paths to a temporary directory. Tests can also use this
# temporary directory.
self.temp_dir = util.bytestring_path(tempfile.mkdtemp())
beets.config['statefile'] = \
util.py3_path(os.path.join(self.temp_dir, b'state.pickle'))
beets.config['library'] = \
util.py3_path(os.path.join(self.temp_dir, b'library.db'))
beets.config['directory'] = \
util.py3_path(os.path.join(self.temp_dir, b'libdir'))
# Set $HOME, which is used by confit's `config_dir()` to create
# directories.
self._old_home = os.environ.get('HOME')
os.environ['HOME'] = util.py3_path(self.temp_dir)
# Initialize, but don't install, a DummyIO.
self.io = DummyIO()
def tearDown(self):
if os.path.isdir(self.temp_dir):
shutil.rmtree(self.temp_dir)
if self._old_home is None:
del os.environ['HOME']
else:
os.environ['HOME'] = self._old_home
self.io.restore()
beets.config.clear()
beets.config._materialized = False
class LibTestCase(TestCase):
"""A test case that includes an in-memory library object (`lib`) and
an item added to the library (`i`).
"""
def setUp(self):
super(LibTestCase, self).setUp()
self.lib = beets.library.Library(':memory:')
self.i = item(self.lib)
def tearDown(self):
self.lib._connection().close()
super(LibTestCase, self).tearDown()
# Mock timing.
class Timecop(object):
"""Mocks the timing system (namely time() and sleep()) for testing.
Inspired by the Ruby timecop library.
"""
def __init__(self):
self.now = time.time()
def time(self):
return self.now
def sleep(self, amount):
self.now += amount
def install(self):
self.orig = {
'time': time.time,
'sleep': time.sleep,
}
time.time = self.time
time.sleep = self.sleep
def restore(self):
time.time = self.orig['time']
time.sleep = self.orig['sleep']
# Mock I/O.
class InputException(Exception):
def __init__(self, output=None):
self.output = output
def __str__(self):
msg = "Attempt to read with no input provided."
if self.output is not None:
msg += " Output: {!r}".format(self.output)
return msg
class DummyOut(object):
encoding = 'utf8'
def __init__(self):
self.buf = []
def write(self, s):
self.buf.append(s)
def get(self):
if six.PY2:
return b''.join(self.buf)
else:
return ''.join(self.buf)
def flush(self):
self.clear()
def clear(self):
self.buf = []
class DummyIn(object):
encoding = 'utf8'
def __init__(self, out=None):
self.buf = []
self.reads = 0
self.out = out
def add(self, s):
if six.PY2:
self.buf.append(s + b'\n')
else:
self.buf.append(s + '\n')
def readline(self):
if not self.buf:
if self.out:
raise InputException(self.out.get())
else:
raise InputException()
self.reads += 1
return self.buf.pop(0)
class DummyIO(object):
"""Mocks input and output streams for testing UI code."""
def __init__(self):
self.stdout = DummyOut()
self.stdin = DummyIn(self.stdout)
def addinput(self, s):
self.stdin.add(s)
def getoutput(self):
res = self.stdout.get()
self.stdout.clear()
return res
def readcount(self):
return self.stdin.reads
def install(self):
sys.stdin = self.stdin
sys.stdout = self.stdout
def restore(self):
sys.stdin = sys.__stdin__
sys.stdout = sys.__stdout__
# Utility.
def touch(path):
open(path, 'a').close()
class Bag(object):
"""An object that exposes a set of fields given as keyword
arguments. Any field not found in the dictionary appears to be None.
Used for mocking Album objects and the like.
"""
def __init__(self, **fields):
self.fields = fields
def __getattr__(self, key):
return self.fields.get(key)
# Platform mocking.
@contextmanager
def platform_windows():
import ntpath
old_path = os.path
try:
os.path = ntpath
yield
finally:
os.path = old_path
@contextmanager
def platform_posix():
import posixpath
old_path = os.path
try:
os.path = posixpath
yield
finally:
os.path = old_path
@contextmanager
def system_mock(name):
import platform
old_system = platform.system
platform.system = lambda: name
try:
yield
finally:
platform.system = old_system
def slow_test(unused=None):
def _id(obj):
return obj
if 'SKIP_SLOW_TESTS' in os.environ:
return unittest.skip(u'test is slow')
return _id
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""recipe.py: the main functionality of this program"""
__author__ = "Daniel O'Connell"
__copyright__ = "BSD-3"
import logging
import wx
import wx.richtext as rt
import sqlalchemy.exc
import GUI
from widgets.recipePanel import RecipePanel
from widgets.editRecipe import EditRecipe
from widgets.treeControls import FILE_ICON, FOLDER_ICON, FOLDER_OPEN_ICON
from database import DBConfig
from database import session_scope
from models import *
import i18n
_ = i18n.language.ugettext
logging.basicConfig(filename='recipes.log', level=logging.DEBUG)
class RecipesWindow(GUI.MainWindow):
def setup(self):
self.tabs = {"recipes": 0,
"edit": 1}
self.database = DBConfig('sqlite:///database.db')
session = self.database.getSession()
self.userId = session.query(User)\
.filter(User.name.like('dan')).first().id
session.close()
self.tabsContainer.SetSelection(self.tabs["recipes"])
isz = (16, 16)
self.il = wx.ImageList(isz[0], isz[1])
self.icons = {
FOLDER_ICON: self.il.Add(wx.ArtProvider_GetBitmap(wx.ART_FOLDER,
wx.ART_OTHER, isz)),
FOLDER_OPEN_ICON: self.il.Add(wx.ArtProvider_GetBitmap(wx.ART_FILE_OPEN,
wx.ART_OTHER,
isz)),
FILE_ICON: self.il.Add(wx.ArtProvider_GetBitmap(wx.ART_NORMAL_FILE,
wx.ART_OTHER, isz))}
self.recipesList.SetImageList(self.il)
self.getRecipes()
with session_scope(self.database) as session:
substance_names = [name.name for name in
self.database.getSubstances(session)]
self.searchIngredients.SetChoices(substance_names)
self.searchIngredients.SetItemsCallback(self.filterIngredients)
self.setupEditRecipe(self.edit_recipe_tab)
self.recipes_menu_options = [_("open in new tab"), _("edit"),
_("new"), _("delete")]
self.setStatusBarText(_("setup: OK"))
def getRecipes(self, name=None, ingredients=None, groups=None):
print name, ingredients
with session_scope(self.database) as session:
self.setupRecipes(self.database.
getRecipesByGroups(session,
title=name,
ingredients=ingredients))
if name or ingredients:
self.recipesList.ExpandAllChildren(self.recipesList.GetRootItem())
else:
self.recipesList.CollapseAllChildren(self.recipesList.GetRootItem())
def filterIngredients(self, items):
name = None
if self.searchRecipeName.GetValue():
name = "%" + self.searchRecipeName.GetValue() + "%"
self.getRecipes(name, items)
def filterRecipes(self, event):
event.Skip()
name = None
if event.GetString():
name = '%' + event.GetString() + '%'
self.getRecipes(name, self.searchIngredients.GetItems())
def filterGroups(self, event):
event.Skip()
def saveRecipe(self, edit_recipe):
self.setStatusBarText(_("saving recipe"))
errors = False
ingredients = edit_recipe.get_ingredients()
if ingredients is None:
errors = True
title = edit_recipe.get_name()
if not title:
errors = True
#TODO: display error message
if not errors:
with session_scope(self.database) as session:
logging.debug("user %s" % session.query(User).get(self.userId))
recipe = None
if edit_recipe.get_recipe_id():
recipe = session.query(Recipe)\
.get(edit_recipe.get_recipe_id())
if not recipe:
recipe = Recipe()
recipe.title = title
recipe.description = edit_recipe.get_description()
recipe.algorythm = edit_recipe.get_algorythm()
recipe.time = edit_recipe.get_time()
recipe.difficulty = edit_recipe.get_difficulty()
recipe.groups = session.query(Group)\
.filter(Group.id.in_(edit_recipe.get_groups())).all()
recipe.user = session.query(User).get(self.userId)
if not recipe.id:
self.setStatusBarText(session.add(recipe))
elif recipe.ingredients:
for ingredient in recipe.ingredients:
session.delete(ingredient)
recipe.ingredients = []
session.flush()
for substance, amount, unit in ingredients:
try:
session.add(Ingredient(recipe=recipe,
substance=self.database
.getSubstance(substance,
session),
unit=self.database
.getUnit(unit,
session),
amount=amount))
except sqlalchemy.exc.IntegrityError as e:
logging.error(e)
current = self.tabsContainer.GetSelection()
self.tabsContainer.SetSelection(self.tabs["recipes"])
#TODO: this should be a delete, but that causes everything to blow up
self.tabsContainer.RemovePage(current)
else:
self.setStatusBarText(_("errors"))
def setupEditRecipe(self, tab):
with session_scope(self.database) as session:
substance_names = [name.name for name in
self.database.getSubstances(session)]
unit_names = [name.name for name in
self.database.getUnits(session)]
groups = self.database.getGroups(session)
tab.setup(groups, substance_names, unit_names)
tab.set_save_action(self.saveRecipe)
def edit_recipe(self, recipe=None):
tab = wx.Panel(self.tabsContainer, wx.ID_ANY,
wx.DefaultPosition, wx.DefaultSize,
wx.TAB_TRAVERSAL)
tabs_sizer = wx.BoxSizer(wx.VERTICAL)
panel = EditRecipe(tab)
tabs_sizer.Add(panel, 1, wx.ALL | wx.EXPAND, 5)
tab.SetSizer(tabs_sizer)
tab.Layout()
tabs_sizer.Fit(tab)
if recipe:
title = _("edit recipe")
else:
title = _("add recipe")
self.tabsContainer.AddPage(tab, title, True, wx.NullBitmap)
self.setupEditRecipe(panel)
if recipe:
panel.set_recipe(recipe)
def addNodeToTree(self, tree, parent, data, name, normalPic, expandedPic):
node = tree.AppendItem(parent, name)
tree.SetPyData(node, data.id)
tree.SetItemImage(node, normalPic, wx.TreeItemIcon_Normal)
tree.SetItemImage(node, expandedPic, wx.TreeItemIcon_Expanded)
return node
def addRecipesToTree(self, tree, treeNode, groups, normalPic, expandedPic,
recipePic):
for child, data in iter(sorted(groups.items())):
if child != "recipes":
node = self.addNodeToTree(tree, treeNode, child, child.name,
normalPic, expandedPic)
self.addRecipesToTree(tree, node, data,
normalPic, expandedPic, recipePic)
try:
for recipe in groups["recipes"]:
self.addNodeToTree(tree, treeNode, recipe, recipe.title,
recipePic, expandedPic)
except:
pass
def setupRecipes(self, recipes, selected=None):
tree = self.recipesList
try:
tree.DeleteChildren(self.recipesRoot)
except:
self.recipesRoot = tree.AddRoot(_("Recipes"))
if recipes["recipes"]:
self.addRecipesToTree(tree, self.recipesRoot, recipes,
self.icons[FOLDER_ICON],
self.icons[FOLDER_OPEN_ICON],
self.icons[FILE_ICON])
item, cookie = tree.GetFirstChild(tree.GetRootItem())
while item and tree.ItemHasChildren(item):
item, cookie = tree.GetFirstChild(item)
with session_scope(self.database) as session:
recipe = session.query(Recipe)\
.get(self.recipesList.GetPyData(item))
self.setStatusBarText(recipe)
self.showRecipe(recipe, self.recipe_panel)
else:
self.clearRecipe(self.recipe_panel)
def showRecipe(self, recipe, panel):
panel.set_title(recipe.title)
panel.set_description(recipe.description,
not recipe.description.startswith("<?xml"))
panel.set_algorythm(recipe.algorythm,
not recipe.algorythm.startswith("<?xml"))
ingredients = ""
for i in recipe.ingredients:
print i.standardise_amount()
ingredients += unicode(i) + "\r\n"
panel.set_ingredients(ingredients)
groups = ""
for group in recipe.groups:
if groups:
groups += ", "
groups += group.name
panel.set_groups(groups)
panel.set_time(recipe.time)
panel.set_difficulty(recipe.difficulty)
def clearRecipe(self, panel):
panel.set_title('')
panel.set_description('', True)
panel.set_algorythm('', True)
panel.set_ingredients('')
panel.set_groups('')
panel.set_time('')
panel.set_difficulty('')
def showRecipesMenu(self, event):
event.Skip()
item = event.GetItem()
if item and not self.recipesList.ItemHasChildren(item):
self.selected_recipe = self.recipesList.GetPyData(item)
self.selected_node = item
menu = wx.Menu()
for id in range(0, len(self.recipes_menu_options)):
menu.Append(id, self.recipes_menu_options[id])
wx.EVT_MENU(menu, id, self.recipe_options)
self.PopupMenu(menu, event.GetPoint())
menu.Destroy()
def recipe_options(self, event):
with session_scope(self.database) as session:
recipe = session.query(Recipe).get(self.selected_recipe)
if event.GetId() == 0: # open in new tab
self.open_recipe_tab(recipe)
elif event.GetId() == 1: # edit
self.edit_recipe(recipe)
elif event.GetId() == 2: # new
self.edit_recipe()
elif event.GetId() == 3: # delete
session.delete(recipe)
session.commit()
self.selected_recipe = None
self.setupRecipes(self.database.getRecipesByGroups(session))
self.Layout()
def open_recipe_tab(self, recipe):
tab = wx.Panel(self.tabsContainer, wx.ID_ANY,
wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL)
tabs_sizer = wx.BoxSizer(wx.VERTICAL)
recipe_panel = RecipePanel(tab)
tabs_sizer.Add(recipe_panel, 1, wx.ALL | wx.EXPAND, 5)
tab.SetSizer(tabs_sizer)
tab.Layout()
tabs_sizer.Fit(tab)
self.tabsContainer.AddPage(tab, recipe.title, True, wx.NullBitmap)
self.showRecipe(recipe, recipe_panel)
def selectRecipe(self, event):
event.Skip()
item = event.GetItem()
if item and not self.recipesList.ItemHasChildren(item):
with session_scope(self.database) as session:
recipe = session.query(Recipe)\
.get(self.recipesList.GetPyData(item))
self.setStatusBarText(recipe)
self.showRecipe(recipe, self.recipe_panel)
else:
self.recipesList.Toggle(item)
def tabChanged(self, event):
event.Skip()
if self.tabs["recipes"] == self.tabsContainer.GetSelection():
name = None
if self.searchRecipeName.GetValue():
name = "%" + self.searchRecipeName.GetValue() + "%"
self.getRecipes(name, self.searchIngredients.GetItems())
# elif self.tabs["edit"] == self.tabsContainer.GetSelection():
# self.setupEditRecipe(self.edit_recipe_tab)
def setStatusBarText(self, text):
self.m_statusBar1.SetStatusText(unicode(text))
def AddRTCHandlers():
# make sure we haven't already added them.
if rt.RichTextBuffer.FindHandlerByType(rt.RICHTEXT_TYPE_HTML) is not None:
return
rt.RichTextBuffer.AddHandler(rt.RichTextHTMLHandler())
rt.RichTextBuffer.AddHandler(rt.RichTextXMLHandler())
# This is needed for the view as HTML option since we tell it
# to store the images in the memory file system.
wx.FileSystem.AddHandler(wx.MemoryFSHandler())
logging.debug("creating app")
app = wx.App()
AddRTCHandlers()
logging.debug("added RTC handlers")
mainWindow = RecipesWindow(None)
mainWindow.setup()
logging.debug("setup done")
mainWindow.Show()
logging.debug("showing window")
logging.debug("starting main loop")
app.MainLoop()
session = mainWindow.database.getSession()
session.close()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC
# Copyright 2011 Ilya Alekseyev
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
TOPDIR = os.path.normpath(os.path.join(
os.path.dirname(os.path.abspath(__file__)),
os.pardir,
os.pardir))
NOVA_MANAGE_PATH = os.path.join(TOPDIR, 'bin', 'nova-manage')
sys.dont_write_bytecode = True
import imp
nova_manage = imp.load_source('nova_manage.py', NOVA_MANAGE_PATH)
sys.dont_write_bytecode = False
import mox
import stubout
import StringIO
from nova import context
from nova import db
from nova import exception
from nova import test
from nova.tests.db import fakes as db_fakes
class FixedIpCommandsTestCase(test.TestCase):
def setUp(self):
super(FixedIpCommandsTestCase, self).setUp()
self.stubs = stubout.StubOutForTesting()
db_fakes.stub_out_db_network_api(self.stubs)
self.commands = nova_manage.FixedIpCommands()
def tearDown(self):
super(FixedIpCommandsTestCase, self).tearDown()
self.stubs.UnsetAll()
def test_reserve(self):
self.commands.reserve('192.168.0.100')
address = db.fixed_ip_get_by_address(context.get_admin_context(),
'192.168.0.100')
self.assertEqual(address['reserved'], True)
def test_reserve_nonexistent_address(self):
self.assertRaises(SystemExit,
self.commands.reserve,
'55.55.55.55')
def test_unreserve(self):
self.commands.unreserve('192.168.0.100')
address = db.fixed_ip_get_by_address(context.get_admin_context(),
'192.168.0.100')
self.assertEqual(address['reserved'], False)
def test_unreserve_nonexistent_address(self):
self.assertRaises(SystemExit,
self.commands.unreserve,
'55.55.55.55')
class NetworkCommandsTestCase(test.TestCase):
def setUp(self):
super(NetworkCommandsTestCase, self).setUp()
self.stubs = stubout.StubOutForTesting()
self.commands = nova_manage.NetworkCommands()
self.context = context.get_admin_context()
self.net = {'id': 0,
'label': 'fake',
'injected': False,
'cidr': '192.168.0.0/24',
'cidr_v6': 'dead:beef::/64',
'multi_host': False,
'gateway_v6': 'dead:beef::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa0',
'bridge_interface': 'fake_fa0',
'gateway': '192.168.0.1',
'broadcast': '192.168.0.255',
'dns1': '8.8.8.8',
'dns2': '8.8.4.4',
'vlan': 200,
'vpn_public_address': '10.0.0.2',
'vpn_public_port': '2222',
'vpn_private_address': '192.168.0.2',
'dhcp_start': '192.168.0.3',
'project_id': 'fake_project',
'host': 'fake_host',
'uuid': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'}
def fake_network_get_by_cidr(context, cidr):
self.assertTrue(context.to_dict()['is_admin'])
self.assertEqual(cidr, self.fake_net['cidr'])
return db_fakes.FakeModel(self.fake_net)
def fake_network_update(context, network_id, values):
self.assertTrue(context.to_dict()['is_admin'])
self.assertEqual(network_id, self.fake_net['id'])
self.assertEqual(values, self.fake_update_value)
self.fake_network_get_by_cidr = fake_network_get_by_cidr
self.fake_network_update = fake_network_update
def tearDown(self):
super(NetworkCommandsTestCase, self).tearDown()
self.stubs.UnsetAll()
def test_create(self):
def fake_create_networks(obj, context, **kwargs):
self.assertTrue(context.to_dict()['is_admin'])
self.assertEqual(kwargs['label'], 'Test')
self.assertEqual(kwargs['cidr'], '10.2.0.0/24')
self.assertEqual(kwargs['multi_host'], False)
self.assertEqual(kwargs['num_networks'], 1)
self.assertEqual(kwargs['network_size'], 256)
self.assertEqual(kwargs['vlan_start'], 200)
self.assertEqual(kwargs['vpn_start'], 2000)
self.assertEqual(kwargs['cidr_v6'], 'fd00:2::/120')
self.assertEqual(kwargs['gateway_v6'], 'fd00:2::22')
self.assertEqual(kwargs['bridge'], 'br200')
self.assertEqual(kwargs['bridge_interface'], 'eth0')
self.assertEqual(kwargs['dns1'], '8.8.8.8')
self.assertEqual(kwargs['dns2'], '8.8.4.4')
self.flags(network_manager='nova.network.manager.VlanManager')
from nova.network import manager as net_manager
self.stubs.Set(net_manager.VlanManager, 'create_networks',
fake_create_networks)
self.commands.create(
label='Test',
fixed_range_v4='10.2.0.0/24',
num_networks=1,
network_size=256,
multi_host='F',
vlan_start=200,
vpn_start=2000,
fixed_range_v6='fd00:2::/120',
gateway_v6='fd00:2::22',
bridge='br200',
bridge_interface='eth0',
dns1='8.8.8.8',
dns2='8.8.4.4')
def test_list(self):
def fake_network_get_all(context):
return [db_fakes.FakeModel(self.net)]
self.stubs.Set(db, 'network_get_all', fake_network_get_all)
output = StringIO.StringIO()
sys.stdout = output
self.commands.list()
sys.stdout = sys.__stdout__
result = output.getvalue()
_fmt = "%(id)-5s\t%(cidr)-18s\t%(cidr_v6)-15s\t%(dhcp_start)-15s\t" +\
"%(dns1)-15s\t%(dns2)-15s\t%(vlan)-15s\t%(project_id)-15s\t" +\
"%(uuid)-15s"
head = _fmt % {'id': _('id'),
'cidr': _('IPv4'),
'cidr_v6': _('IPv6'),
'dhcp_start': _('start address'),
'dns1': _('DNS1'),
'dns2': _('DNS2'),
'vlan': _('VlanID'),
'project_id': _('project'),
'uuid': _("uuid")}
body = _fmt % {'id': self.net['id'],
'cidr': self.net['cidr'],
'cidr_v6': self.net['cidr_v6'],
'dhcp_start': self.net['dhcp_start'],
'dns1': self.net['dns1'],
'dns2': self.net['dns2'],
'vlan': self.net['vlan'],
'project_id': self.net['project_id'],
'uuid': self.net['uuid']}
answer = '%s\n%s\n' % (head, body)
self.assertEqual(result, answer)
def test_delete(self):
self.fake_net = self.net
self.fake_net['project_id'] = None
self.fake_net['host'] = None
self.stubs.Set(db, 'network_get_by_cidr',
self.fake_network_get_by_cidr)
def fake_network_delete_safe(context, network_id):
self.assertTrue(context.to_dict()['is_admin'])
self.assertEqual(network_id, self.fake_net['id'])
self.stubs.Set(db, 'network_delete_safe', fake_network_delete_safe)
self.commands.delete(fixed_range=self.fake_net['cidr'])
def _test_modify_base(self, update_value, project, host, dis_project=None,
dis_host=None):
self.fake_net = self.net
self.fake_update_value = update_value
self.stubs.Set(db, 'network_get_by_cidr',
self.fake_network_get_by_cidr)
self.stubs.Set(db, 'network_update', self.fake_network_update)
self.commands.modify(self.fake_net['cidr'], project=project, host=host,
dis_project=dis_project, dis_host=dis_host)
def test_modify_associate(self):
self._test_modify_base(update_value={'project_id': 'test_project',
'host': 'test_host'},
project='test_project', host='test_host')
def test_modify_unchanged(self):
self._test_modify_base(update_value={}, project=None, host=None)
def test_modify_disassociate(self):
self._test_modify_base(update_value={'project_id': None, 'host': None},
project=None, host=None, dis_project=True,
dis_host=True)
|
|
from __future__ import absolute_import # Same name as silme library.
"""
Parser for silme-compatible translation formats.
"""
import codecs
import os
from collections import OrderedDict
from copy import copy
import silme
from silme.format.dtd import FormatParser as DTDParser
from silme.format.ini import FormatParser as IniParser
from silme.format.inc import FormatParser as IncParser
from silme.format.properties import FormatParser as PropertiesParser
from pontoon.sync import SyncError
from pontoon.sync.formats.base import ParsedResource
from pontoon.sync.vcs_models import VCSTranslation
class SilmeEntity(VCSTranslation):
def __init__(self, silme_object, comments=None, order=0, copy_string=True):
"""
:param copy_string:
If True, copy the string from the silme_object. Otherwise,
self.strings will be an empty dict. Used for creating empty
copies of translations from source resources.
"""
self.silme_object = silme_object
self.comments = comments or []
self.order = order
self.last_translator = None
self.last_update = None
if copy_string:
self.strings = {None: self.silme_object.value}
else:
self.strings = {}
@property
def key(self):
return self.silme_object.id
@property
def source_string(self):
return self.silme_object.value
@property
def source_string_plural(self):
return ''
@property
def fuzzy(self):
return False
@fuzzy.setter
def fuzzy(self, fuzzy):
pass # We don't use fuzzy in silme
@property
def source(self):
return []
def __eq__(self, other):
return self.key == other.key and self.strings.get(None) == other.strings.get(None)
def __ne__(self, other):
return not self.__eq__(other)
def __nonzero__(self):
return bool(self.strings)
class SilmeResource(ParsedResource):
def __init__(self, parser, path, source_resource=None):
self.parser = parser
self.path = path
self.source_resource = source_resource
self.entities = OrderedDict() # Preserve entity order.
# Copy entities from the source_resource if it's available.
if source_resource:
for key, entity in source_resource.entities.items():
self.entities[key] = copy_source_entity(entity)
try:
# Only uncomment MOZ_LANGPACK_CONTRIBUTORS if this is a .inc
# file and a source resource (i.e. it has no source resource
# itself).
self.structure = parser.get_structure(read_file(
path,
uncomment_moz_langpack=parser is IncParser and not source_resource
))
except IOError:
# If the file doesn't exist, but we have a source resource,
# we can keep going, we'll just not have any translations.
if source_resource:
return
else:
raise
comments = []
current_order = 0
for obj in self.structure:
if isinstance(obj, silme.core.entity.Entity):
entity = SilmeEntity(obj, comments, current_order)
self.entities[entity.key] = entity
current_order += 1
comments = []
elif isinstance(obj, silme.core.structure.Comment):
for comment in obj:
# Silme groups comments together, so we strip
# whitespace and split them up.
lines = unicode(comment).strip().split('\n')
comments += [line.strip() for line in lines]
@property
def translations(self):
return self.entities.values()
def save(self, locale):
"""
Load the source resource, modify it with changes made to this
Resource instance, and save it over the locale-specific
resource.
"""
if self.source_resource is None:
raise SyncError('Cannot save silme resource {0}: No source resource given.'
.format(self.path))
# Only uncomment MOZ_LANGPACK_CONTRIBUTORS if we have a
# translation for it
new_structure = self.parser.get_structure(read_file(
self.source_resource.path,
uncomment_moz_langpack=self.entities.get('MOZ_LANGPACK_CONTRIBUTORS', False)
))
# Update translations in the copied resource.
entities = [
SilmeEntity(obj) for obj in new_structure if isinstance(obj, silme.core.entity.Entity)
]
for silme_entity in entities:
key = silme_entity.key
translated_entity = self.entities.get(key)
if translated_entity and None in translated_entity.strings:
new_structure.modify_entity(key, translated_entity.strings[None])
else:
# Remove untranslated entity and following newline
pos = new_structure.entity_pos(key)
new_structure.remove_entity(key)
try:
line = new_structure[pos]
except IndexError:
# No newline at end of file
continue
if type(line) == unicode and line.startswith('\n'):
line = line[len('\n'):]
new_structure[pos] = line
if len(line) is 0:
new_structure.remove_element(pos)
# Create parent directory if it doesn't exist.
try:
os.makedirs(os.path.dirname(self.path))
except OSError:
pass # Already exists, phew!
with codecs.open(self.path, 'w', 'utf-8') as f:
f.write(self.parser.dump_structure(new_structure))
def read_file(path, uncomment_moz_langpack=False):
"""Read the resource at the given path."""
with codecs.open(path, 'r', 'utf-8') as f:
# .inc files have a special commented-out entity called
# MOZ_LANGPACK_CONTRIBUTORS. We optionally un-comment it before
# parsing so locales can translate it.
if uncomment_moz_langpack:
lines = []
for line in f:
if line.startswith('# #define MOZ_LANGPACK_CONTRIBUTORS'):
line = line[2:]
lines.append(line)
content = ''.join(lines)
else:
content = f.read()
return content
def copy_source_entity(entity):
"""
Copy an entity from a source file to a new SilmeEntity instance.
The new copy will have an empty strings attribute so that entities
that are copied but not modified during sync will not be saved in
the translated resource.
"""
return SilmeEntity(
entity.silme_object,
copy(entity.comments), # Members are strings, shallow is fine.
entity.order,
copy_string=False
)
def parse(parser, path, source_path=None):
# TODO: Cache the source resource to avoid re-parsing it a bunch.
if source_path is not None:
source_resource = SilmeResource(parser, source_path)
else:
source_resource = None
return SilmeResource(parser, path, source_resource=source_resource)
def parse_properties(path, source_path=None):
return parse(PropertiesParser, path, source_path)
def parse_ini(path, source_path=None):
return parse(IniParser, path, source_path)
def parse_inc(path, source_path=None):
return parse(IncParser, path, source_path)
def parse_dtd(path, source_path=None):
return parse(DTDParser, path, source_path)
|
|
#!/usr/bin/env python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests Google Test's exception catching behavior.
This script invokes gtest_catch_exceptions_test_ and
gtest_catch_exceptions_ex_test_ (programs written with
Google Test) and verifies their output.
"""
__author__ = 'vladl@google.com (Vlad Losev)'
import os
import gtest_test_utils
# Constants.
FLAG_PREFIX = '--gtest_'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
NO_CATCH_EXCEPTIONS_FLAG = FLAG_PREFIX + 'catch_exceptions=0'
FILTER_FLAG = FLAG_PREFIX + 'filter'
# Path to the gtest_catch_exceptions_ex_test_ binary, compiled with
# exceptions enabled.
EX_EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_catch_exceptions_ex_test_')
# Path to the gtest_catch_exceptions_test_ binary, compiled with
# exceptions disabled.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_catch_exceptions_no_ex_test_')
TEST_LIST = gtest_test_utils.Subprocess([EXE_PATH, LIST_TESTS_FLAG]).output
SUPPORTS_SEH_EXCEPTIONS = 'ThrowsSehException' in TEST_LIST
if SUPPORTS_SEH_EXCEPTIONS:
BINARY_OUTPUT = gtest_test_utils.Subprocess([EXE_PATH]).output
EX_BINARY_OUTPUT = gtest_test_utils.Subprocess([EX_EXE_PATH]).output
# The tests.
if SUPPORTS_SEH_EXCEPTIONS:
# pylint:disable-msg=C6302
class CatchSehExceptionsTest(gtest_test_utils.TestCase):
"""Tests exception-catching behavior."""
def TestSehExceptions(self, test_output):
self.assert_('SEH exception with code 0x2a thrown '
'in the test fixture\'s constructor'
in test_output)
self.assert_('SEH exception with code 0x2a thrown '
'in the test fixture\'s destructor'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in SetUpTestCase()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in TearDownTestCase()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in SetUp()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in TearDown()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in the test body'
in test_output)
def testCatchesSehExceptionsWithCxxExceptionsEnabled(self):
self.TestSehExceptions(EX_BINARY_OUTPUT)
def testCatchesSehExceptionsWithCxxExceptionsDisabled(self):
self.TestSehExceptions(BINARY_OUTPUT)
class CatchCxxExceptionsTest(gtest_test_utils.TestCase):
"""Tests C++ exception-catching behavior.
Tests in this test case verify that:
* C++ exceptions are caught and logged as C++ (not SEH) exceptions
* Exception thrown affect the remainder of the test work flow in the
expected manner.
"""
def testCatchesCxxExceptionsInFixtureConstructor(self):
self.assert_('C++ exception with description '
'"Standard C++ exception" thrown '
'in the test fixture\'s constructor'
in EX_BINARY_OUTPUT)
self.assert_('unexpected' not in EX_BINARY_OUTPUT,
'This failure belongs in this test only if '
'"CxxExceptionInConstructorTest" (no quotes) '
'appears on the same line as words "called unexpectedly"')
def testCatchesCxxExceptionsInFixtureDestructor(self):
self.assert_('C++ exception with description '
'"Standard C++ exception" thrown '
'in the test fixture\'s destructor'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInDestructorTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInSetUpTestCase(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in SetUpTestCase()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInConstructorTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest constructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest::SetUp() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest test body '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInTearDownTestCase(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in TearDownTestCase()'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInSetUp(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in SetUp()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('unexpected' not in EX_BINARY_OUTPUT,
'This failure belongs in this test only if '
'"CxxExceptionInSetUpTest" (no quotes) '
'appears on the same line as words "called unexpectedly"')
def testCatchesCxxExceptionsInTearDown(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in TearDown()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTearDownTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTearDownTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInTestBody(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in the test body'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesNonStdCxxExceptions(self):
self.assert_('Unknown C++ exception thrown in the test body'
in EX_BINARY_OUTPUT)
def testUnhandledCxxExceptionsAbortTheProgram(self):
# Filters out SEH exception tests on Windows. Unhandled SEH exceptions
# cause tests to show pop-up windows there.
FITLER_OUT_SEH_TESTS_FLAG = FILTER_FLAG + '=-*Seh*'
# By default, Google Test doesn't catch the exceptions.
uncaught_exceptions_ex_binary_output = gtest_test_utils.Subprocess(
[EX_EXE_PATH,
NO_CATCH_EXCEPTIONS_FLAG,
FITLER_OUT_SEH_TESTS_FLAG]).output
self.assert_('Unhandled C++ exception terminating the program'
in uncaught_exceptions_ex_binary_output)
self.assert_('unexpected' not in uncaught_exceptions_ex_binary_output)
if __name__ == '__main__':
gtest_test_utils.Main()
|
|
# By Samhan Salahuddin
# 25th November 2016
from midiutil.MidiFile import MIDIFile
from random import randint
from random import randrange
class MIDIGenerator(object):
def __init__(self,fileName):
self.outputFileName = fileName
self.MIDIObject = MIDIFile(1)
self.track = 0
self.MIDIObject.addTrackName(self.track,0,"Sample Track")
self.MIDIObject.addTempo(self.track,0,420)
self.volume = 100
self.channel = 0
self.notes = ['C','C#','D','D#','E','F','F#','G','G#','A','A#','B']
self.basePitchOfC = 50
def addNote(self,note,time,duration,octave,volume):
if note != "S":
self.MIDIObject.addNote(self.track,self.channel,self.notePitch(note,octave),time,duration,volume)
else:
self.MIDIObject.addNote(self.track,self.channel,50,time,duration,0)
def addChord(self,notes,time,duration):
for noteInfo in notes:
note = noteInfo[0]
octave = noteInfo[1]
self.MIDIObject.addNote(self.track,self.channel,self.notePitch(note,octave),time,duration,self.volume)
def notePitch(self,note,octave):
return self.notes.index(note) + self.basePitchOfC + (12 * octave)
def addMelody(self,melody):
trackTime = 0
for noteInfo in melody:
note = noteInfo[0]
octave = noteInfo[1]
duration = noteInfo[2]
volume = noteInfo[3]
if(note != ''):
self.addNote(note,trackTime,duration,octave,volume)
trackTime = trackTime + duration
def writeMidiToFile(self):
binfile = open(self.outputFileName, 'wb')
self.MIDIObject.writeFile(binfile)
binfile.close()
class Composer(object):
def compose(self,scaleNotes,duration):
scaleLen = len(scaleNotes)
melody = []
octave = 1
cycle_length = 2**randint(0,3)
octaveOffset = 0
octaveMod = 0
tension = 0
tension_cycle = randint(1,3)
tension_direction = 1
tension_count = 0
meta_tension_cycle = randint(2,5)
meta_tension = 0
meta_tension_direction = 1
beat_length = 4
beat_accumulated = 0
total_meta_cycles = 0
meta_cycle_max = 10
cycle_counter = 0
counter = 0
previous_cycle_length = cycle_length
# parameters to the random note offset generator
multiplier = randint(2,55000)
noteCounter = randint(20,55000)
base = randint(1,51000)
counter = 0
duration_sequence = generateDurationSequence(cycle_length,beat_length,tension,tension_direction,counter,noteCounter,multiplier,base)
while counter < duration:
finalOffset = 0
previousOctave = 0
# suppress the note offset lower proportional to tension
noteMod = 3
if tension >= 2:
noteMod = 2
if tension >= 3:
noteMod = 2
if tension >= 5:
noteMod = 2
if tension >= 7:
noteMod = 1
if tension >=9:
noteMod = 1
noteOffset = generateNoteDelta(noteCounter+counter,multiplier,base) / (noteMod)
# extend durations by even multiples every now and then
noteDuration = duration_sequence[(noteCounter + counter) % len(duration_sequence)]
num_cycles = 0
#switch things up every now and then ie cycles
if counter % cycle_length == 0 and counter != 0:
# this makes a repetition
melody_stringified = melody_string(melody[-min(len(melody),40):])
compress_ratio = float(len(compress(melody_stringified))) / len(melody_stringified)
if compress_ratio <= 0.2 :
noteCounter = noteCounter - cycle_length - 1
counter = counter + 1
continue
octaveOffset = 0
# this actually makes the output more varied . Need to see why its required
if randint(1,10) % 2 == 0 :
octaveMod = (octaveMod + 1) % randint(2,4)
# dont get too varied when tension has fallen
if tension_direction == -1 and tension <= 5:
octaveMod = max(0,octaveMod-2)
tension_count = tension_count + 1
tension_ended = False
# When cycle of rising and falling tension is complete
if tension_count % tension_cycle == 0:
#Reset note generator when lowering tension from peak
if tension_direction == -1 :
# reset the random variables only after a "phrase"
tension_ended = True
multiplier = randint(2,1000)
noteCounter = randint(1,5000)
base = randint(1,1000)
if len(melody) > 1:
noteIn = melody.pop()
n = noteIn[0]
o = noteIn[1]
d = noteIn[2]
v = noteIn[3]
if d <= 2:
d = 4
melody.append(("C" if randint(1,2) % 3 == 0 else n,o,d, v))
melody.append(("S",finalOctaveOffset,8, 25 + 20*int(tension/9) + randint(4,8)))
# Restart tension cycle
tension_count = 0
tension_direction = tension_direction * -1
# This adds long range structure . Starts and ends slow. Can increase randomn upper range if
# output is too dull.
tension_cycle = meta_tension + randint(1,3)
meta_tension = meta_tension + randint(1,2)
# same thing as tension but meta
if meta_tension_cycle % meta_tension == 0:
meta_tension = 0
meta_tension_direction = meta_tension_direction * -1
meta_tension_cycle = randint(2,5)
total_meta_cycles = total_meta_cycles + 1
octaveMod = (octaveMod + 1) % 3
if total_meta_cycles == meta_cycle_max:
if len(melody) > 1:
noteIn = melody.pop()
n = noteIn[0]
o = noteIn[1]
d = noteIn[2]
v = noteIn[3]
melody.append((n,o,8, v))
return melody
meta_term = meta_tension if meta_tension_direction == 1 else -1*meta_tension
# this is a hack to force tension back down. Need to see if it can be avoided
fraction = tension_count / tension_cycle
tension = (meta_term + (tension + tension_direction*randint(1,2) )) % randint(6,8)
if fraction >= 0.75 and tension_direction == -1:
tension = randint(1,3)
elif (tension_count + 1) % tension_cycle == 0:
tension = randint(1,2)
# generate note lengths based on cycle length , beat length and tension . beat length not used
# need to fix to accomodate both odd and even rhythms
duration_sequence = generateDurationSequence(cycle_length,beat_length,tension,tension_direction,counter,noteCounter,multiplier,base)
# Every now and then make the phrases longer or shorter based on the tension
melody_stringified = melody_string(melody[-len(melody):])
compress_ratio = float(len(compress(melody_stringified))) / len(melody_stringified)
if compress_ratio >= 0.2:
prev_cyc = cycle_length
cycle_length = 2 + 2**randint(1,2)
if tension >= 4:
cycle_length = 2**randint(2,2)
if tension >= 6:
cycle_length = 2**randint(2,3)
if tension >= 7:
cycle_length = 2**randint(1,2)
octaveOffset = randint(0,1)
tension = 0
if prev_cyc > 4:
duration_sequence = generateDurationSequence(cycle_length,beat_length,tension,tension_direction,counter,noteCounter,multiplier,base)
counter = counter + 1
continue
finalOctaveOffset = 0 if octaveMod == 0 else (octave + octaveOffset) % octaveMod
finalOctaveOffset = finalOctaveOffset + 2
previousOctave = finalOctaveOffset
previousOffset = finalOffset
chromatic = ['C','C#','D','D#','E','F','F#','G','G#','A','A#','B']
finalOffset = chromatic.index(scaleNotes[noteOffset % scaleLen]) + 12*finalOctaveOffset
if finalOffset / 12 >= 6:
finalOctaveOffset = 5
melody.append((scaleNotes[noteOffset % scaleLen],finalOctaveOffset,noteDuration, 30 + 20*int(tension/9) + randint(4,8) + meta_tension))
counter = counter + 1
previousOctave = finalOffset / 12
return melody
def melody_string(notes):
noteList = [note[0]+str(note[2]) for note in notes]
return ''.join(noteList)
def numberToBase(n, b):
if n == 0:
return [0]
digits = []
while n:
digits.append(int(n % b))
n /= b
return digits[::-1]
def sumOfDigits(num):
result = 0
for digit in num:
result = result + int(digit)
return result
def generateNoteDelta(counter,base,multiplier):
return sumOfDigits(numberToBase((counter * multiplier),base))
def generateDurationSequence(cycle_length,beat_length,tension,tension_direction,counter,noteCounter,multiplier,base):
base_duration = 2
# longer notes for lower tension and vice versa
fractal_seq = [generateNoteDelta(noteCounter+counter + x,multiplier,base) for x in range(0,9)]
fractal_seq = [round(float(max(fractal_seq))/float(x)) for x in fractal_seq]
ascending_rhythm_powers = [randint(2,3),randint(2,3),randint(2,3),randint(3,4),randint(3,4),randint(2,3),randint(2,3),randint(1,2),randint(1,2)]
max_power = int(ascending_rhythm_powers[tension] if randint(1,2) % 2 == 0 else fractal_seq[tension])
uniform_beats = [2*randint(1,max_power) for x in range(0,cycle_length)]
target = 8
if tension >= 0 and tension <= 2:
target = 8*randint(1,3)
elif tension <= 4:
target = 8 *randint (1,2)
else:
target = 4*randint(1,3)
counter = 0
# this is what keeps it on beat. keep generating random note lengths until you get something that
# lines up with the beat. On failure try worse and worse alignments.
while True:
uniform_beats = [2*randint(1,max_power) for x in range(0,cycle_length)]
if counter % 5000 == 0:
target = target/2
sum_beats = sum(uniform_beats)
if sum_beats % target == 0:
break
return uniform_beats
## LZW Compression Implementation Used To See If Output Is Repetitive
## Source : Rosetta Code
def compress(uncompressed):
dict_size = 256
dictionary = dict((chr(i), i) for i in xrange(dict_size))
w = ""
result = []
for c in uncompressed:
wc = w + c
if wc in dictionary:
w = wc
else:
result.append(dictionary[w])
# Add wc to the dictionary.
dictionary[wc] = dict_size
dict_size += 1
w = c
if w:
result.append(dictionary[w])
return result
majorScaleNotes = ['C','D','E','F','G','A']
pentatonic = ['C','D','E','G','A']
bluesScaleNotes = ['C','D#','F','F#','A#']
arabScaleNotes = ['C','C#','E','F','G','G#']
spanish = ['C', 'C#', 'E' ,'F' ,'G' , 'G#' ,'A#']
def composeAndWriteToFile(scale,duration,fileName):
mozart = Composer()
testMelody = mozart.compose(scale,duration)
MIDIGen = MIDIGenerator(fileName)
MIDIGen.addMelody(testMelody)
MIDIGen.writeMidiToFile()
composeAndWriteToFile(pentatonic,500,"output.mid")
|
|
from pathlib import Path
import pandas as pd
from sqlalchemy import create_engine, inspect, Table, Column
from collections import defaultdict
from ilxutils.tools import light_degrade, open_pickle, create_pickle
import os
#ELASTIC = 'https://5f86098ac2b28a982cebf64e82db4ea2.us-west-2.aws.found.io:9243/interlex/term/'
TERMS_COMPLETE_BACKUP_PATH = Path.home()/'Dropbox/interlex_backups/ilx_db_terms_complete_backup.pickle'
TERMS_BACKUP_PATH = Path.home()/'Dropbox/interlex_backups/ilx_db_terms_backup.pickle'
ANNOS_BACKUP_PATH = Path.home()/'Dropbox/interlex_backups/ilx_db_annotations_backup.pickle'
RELAS_BACKUP_PATH = Path.home()/'Dropbox/interlex_backups/ilx_db_relationships_backup.pickle'
SUPER_BACKUP_PATH = Path.home()/'Dropbox/interlex_backups/ilx_db_superclasses_backup.pickle'
SYNOS_BACKUP_PATH = Path.home()/'Dropbox/interlex_backups/ilx_db_synonyms_backup.pickle'
EXIDS_BACKUP_PATH = Path.home()/'Dropbox/interlex_backups/ilx_db_ex_backup.pickle'
class IlxSql():
def __init__(self, db_url, pre_load=False, from_backup=False):
self.db_url = db_url
self.engine = create_engine(self.db_url)
self.local_degrade = lambda string: string.lower().strip() # current degrade of choice for sql
self.from_backup = from_backup
self.terms_complete = self.get_terms_complete() if pre_load else pd.DataFrame
self.terms = self.get_terms() if pre_load else pd.DataFrame
self.superclasses = self.get_superclasses if pre_load else pd.DataFrame
self.annotations = self.get_annotations() if pre_load else pd.DataFrame
self.existing_ids = self.get_existing_ids() if pre_load else pd.DataFrame
self.relationships = self.get_relationships() if pre_load else pd.DataFrame
self.synonyms = self.get_synonyms() if pre_load else pd.DataFrame
def fetch_terms_complete(self):
if self.terms_complete.empty:
return self.get_terms_complete()
return self.terms_complete
def fetch_terms(self):
if self.terms.empty:
return self.get_terms()
return self.terms
def fetch_annotations(self):
if self.annotations.empty:
return self.get_annotations()
return self.annotations
def fetch_existing_ids(self):
if self.existing_ids.empty:
return self.get_existing_ids()
return self.existing_ids
def fetch_relationships(self):
if self.relationships.empty:
return self.get_relationships()
return self.relationships
def fetch_synonyms(self):
if self.synonyms.empty:
return self.get_synonyms()
return self.synonyms
def fetch_superclasses(self):
if self.superclasses.empty:
return self.get_superclasses()
return self.superclasses
def get_terms(self):
''' GROUP BY is a shortcut to only getting the first in every list of group '''
if not self.terms.empty:
return self.terms
if self.from_backup:
self.terms = open_pickle(TERMS_BACKUP_PATH)
return self.terms
engine = create_engine(self.db_url)
data = """
SELECT t.id as tid, t.ilx, t.label, t.definition, t.type, t.comment, t.version, t.uid, t.time
FROM terms t
GROUP BY t.ilx
"""
self.terms = pd.read_sql(data, engine)
create_pickle(self.terms, TERMS_BACKUP_PATH)
return self.terms
def get_annotations(self):
if not self.annotations:
return self.fetch_annotations()
if self.from_backup:
self.annotations = open_pickle(ANNOS_BACKUP_PATH)
return self.annotations
engine = create_engine(self.db_url)
data = """
SELECT
ta.tid, ta.annotation_tid as annotation_type_tid,
t1.ilx as term_ilx, t2.ilx as annotation_type_ilx,
t2.label as annotation_type_label,
ta.value
FROM term_annotations AS ta
JOIN (
SELECT *
FROM terms
GROUP BY terms.ilx
) AS t1 ON ta.tid=t1.id
JOIN (
SELECT *
FROM terms
GROUP BY terms.ilx
) AS t2 ON ta.annotation_tid=t2.id
"""
self.annotations = pd.read_sql(data, engine)
create_pickle(self.annotations, ANNOS_BACKUP_PATH)
return self.annotations
def get_existing_ids(self):
if not self.existing_ids.empty:
return self.existing_ids
if self.from_backup:
self.existing_ids = open_pickle(EXIDS_BACKUP_PATH)
return self.existing_ids
engine = create_engine(self.db_url)
data = """
SELECT tei.tid, tei.curie, tei.iri, tei.preferred, t.ilx, t.label, t.definition
FROM (
SELECT *
FROM terms
GROUP BY terms.ilx
) as t
JOIN term_existing_ids AS tei
ON t.id = tei.tid
"""
self.existing_ids = pd.read_sql(data, engine)
create_pickle(self.existing_ids, EXIDS_BACKUP_PATH)
return self.existing_ids
def get_relationships(self):
if not self.relationships.empty:
return self.relationships
if self.from_backup:
self.relationships = open_pickle(RELAS_BACKUP_PATH)
return self.relationships
engine = create_engine(self.db_url)
data = """
SELECT
t1.id as term1_tid, t1.ilx AS term1_ilx, t1.type as term1_type,
t2.id as term2_tid, t2.ilx AS term2_ilx, t2.type as term2_type,
t3.id as relationship_tid, t3.ilx AS relationship_ilx, t3.label as relationship_label
FROM term_relationships AS tr
JOIN (
SELECT *
FROM terms
GROUP BY terms.ilx
) t1 ON t1.id = tr.term1_id
JOIN (
SELECT *
FROM terms
GROUP BY terms.ilx
) AS t2 ON t2.id = tr.term2_id
JOIN (
SELECT *
FROM terms
GROUP BY terms.ilx
) AS t3 ON t3.id = tr.relationship_tid
"""
self.relationships = pd.read_sql(data, engine)
create_pickle(self.relationships, RELAS_BACKUP_PATH)
return self.relationships
def get_superclasses(self):
if not self.superclasses.empty:
return self.superclasses
if self.from_backup:
self.superclasses = open_pickle(SUPER_BACKUP_PATH)
return self.superclasses
engine = create_engine(self.db_url)
data = """
SELECT
ts.tid, ts.superclass_tid,
t1.label as term_label, t1.ilx as term_ilx,
t2.label as superclass_label, t2.ilx as superclass_ilx
FROM term_superclasses AS ts
JOIN (
SELECT *
FROM terms
GROUP BY terms.ilx
) as t1
ON t1.id = ts.tid
JOIN (
SELECT *
FROM terms
GROUP BY terms.ilx
) AS t2
ON t2.id = ts.superclass_tid
"""
self.superclasses = pd.read_sql(data, engine)
create_pickle(self.superclasses, SUPER_BACKUP_PATH)
return self.superclasses
def get_synonyms(self):
if not self.synonyms.empty:
return self.synonyms
if self.from_backup:
self.synonyms = open_pickle(SYNOS_BACKUP_PATH)
return self.synonyms
engine = create_engine(self.db_url)
data = """
SELECT ts.tid as tid, t.ilx, ts.literal, ts.type
FROM term_synonyms AS ts
JOIN (
SELECT *
FROM terms
GROUP BY terms.ilx
) AS t
WHERE ts.tid=t.id
"""
self.synonyms = pd.read_sql(data, engine)
create_pickle(self.synonyms, SYNOS_BACKUP_PATH)
return self.synonyms
def get_terms_complete(self) -> pd.DataFrame:
''' Gets complete entity data like term/view '''
if not self.terms_complete.empty:
return self.terms_complete
if self.from_backup:
self.terms_complete = open_pickle(TERMS_COMPLETE_BACKUP_PATH)
return self.terms_complete
ilx2synonyms = self.get_ilx2synonyms()
ilx2existing_ids = self.get_ilx2existing_ids()
ilx2annotations = self.get_ilx2annotations()
ilx2superclass = self.get_ilx2superclass()
ilx_complete = []
header = ['Index'] + list(self.fetch_terms().columns)
for row in self.fetch_terms().itertuples():
row = {header[i]:val for i, val in enumerate(row)}
row['synonyms'] = ilx2synonyms.get(row['ilx'])
row['existing_ids'] = ilx2existing_ids[row['ilx']] # if breaks we have worse problems
row['annotations'] = ilx2annotations.get(row['ilx'])
row['superclass'] = ilx2superclass.get(row['ilx'])
ilx_complete.append(row)
terms_complete = pd.DataFrame(ilx_complete)
create_pickle(terms_complete, TERMS_COMPLETE_BACKUP_PATH)
return terms_complete
def get_label2id(self):
self.terms = self.fetch_terms()
visited = {}
label_to_id = defaultdict(lambda: defaultdict(list))
for row in self.terms.itertuples():
label = self.local_degrade(row.label)
if not visited.get((label, row.type, row.ilx)):
if row.type == 'term':
label_to_id[label]['term'].append(int(row.id))
visited[(label, row.type, row.ilx)] = True
elif row.type == 'cde':
label_to_id[label]['cde'].append(int(row.id))
visited[(label, row.type, row.ilx)] = True
elif row.type == 'fde':
label_to_id[label]['fde'].append(int(row.id))
visited[(label, row.type, row.ilx)] = True
return label_to_id
def get_label2ilxs(self):
self.terms = self.fetch_terms()
visited = {}
label_to_ilx = defaultdict(list)
for row in self.terms.itertuples():
label = self.local_degrade(row.label)
if not visited.get((label, row.type, row.ilx)):
label_to_ilx[label].append(str(row.ilx))
visited[(label, row.type, row.ilx)] = True
return label_to_ilx
def get_label2rows(self):
self.terms_complete = self.fetch_terms_complete()
visited = {}
label2rows = defaultdict(list)
header = ['Index'] + list(self.terms_complete.columns)
for row in self.terms_complete.itertuples():
row = {header[i]:val for i, val in enumerate(row)}
label = self.local_degrade(row['label'])
if not visited.get((label, row['type'], row['ilx'])):
label2rows[label].append(row)
visited[(label, row['type'], row['ilx'])] = True
return label2rows
def get_definition2rows(self):
self.terms = self.fetch_terms()
visited = {}
definition2rows = defaultdict(list)
header = ['Index'] + list(self.terms.columns)
for row in self.terms.itertuples():
row = {header[i]:val for i, val in enumerate(row)}
definition = self.local_degrade(row['definition'])
if not definition or definition == ' ':
continue
if not visited.get((definition, row['type'], row['ilx'])):
definition2rows[definition].append(row)
visited[(definition, row['type'], row['ilx'])] = True
return definition2rows
def get_tid2row(self):
tid2row = {}
header = ['Index'] + list(self.fetch_terms().columns)
for row in self.fetch_terms().itertuples():
row = {header[i]:val for i, val in enumerate(row)}
tid2row[row['tid']] = row
return tid2row
def get_ilx2row(self):
ilx2row = {}
header = ['Index'] + list(self.fetch_terms().columns)
for row in self.fetch_terms().itertuples():
row = {header[i]:val for i, val in enumerate(row)}
ilx2row[row['ilx']] = row
return ilx2row
def get_ilx2superclass(self, clean:bool=True):
''' clean: for list of literals only '''
ilx2superclass = defaultdict(list)
header = ['Index'] + list(self.fetch_superclasses().columns)
for row in self.fetch_superclasses().itertuples():
row = {header[i]:val for i, val in enumerate(row)}
if clean:
superclass = {
'tid': row['superclass_tid'],
'ilx': row['superclass_ilx'],
}
ilx2superclass[row['term_ilx']].append(superclass)
elif not clean:
ilx2superclass[row['term_ilx']].append(row)
return ilx2superclass
def get_tid2annotations(self, clean:bool=True):
''' clean: for list of literals only '''
tid2annotations = defaultdict(list)
header = ['Index'] + list(self.fetch_annotations().columns)
for row in self.fetch_annotations().itertuples():
row = {header[i]:val for i, val in enumerate(row)}
if clean:
annotation = {
'tid': row['tid'],
'annotation_type_tid': row['annotation_type_tid'],
'value': row['value'],
'annotation_type_label': row['annotation_type_label'],
}
tid2annotations[row['tid']].append(annotation)
elif not clean:
tid2annotations[row['tid']].append(row)
return tid2annotations
def get_ilx2annotations(self, clean:bool=True):
''' clean: for list of literals only '''
ilx2annotations = defaultdict(list)
header = ['Index'] + list(self.fetch_annotations().columns)
for row in self.fetch_annotations().itertuples():
row = {header[i]:val for i, val in enumerate(row)}
if clean:
annotation = {
'tid': row['tid'],
'annotation_type_tid': row['annotation_type_tid'],
'value': row['value'],
'annotation_type_label': row['annotation_type_label'],
}
ilx2annotations[row['term_ilx']].append(annotation)
elif not clean:
ilx2annotations[row['term_ilx']].append(row)
return ilx2annotations
def get_tid2synonyms(self, clean:bool=True):
''' clean: for list of literals only '''
tid2synonyms = {}
header = ['Index'] + list(self.fetch_synonyms().columns)
for row in self.fetch_synonyms().itertuples():
row = {header[i]:val for i, val in enumerate(row)}
if clean:
synonym = {'literal':row['literal'], 'type':row['type']}
tid2synonyms[row['tid']].append(synonym)
elif not clean:
tid2synonyms[row['tid']].append(row)
return tid2synonyms
def get_ilx2synonyms(self, clean:bool=True):
''' clean: for list of literals only '''
ilx2synonyms = defaultdict(list)
header = ['Index'] + list(self.fetch_synonyms().columns)
for row in self.fetch_synonyms().itertuples():
row = {header[i]:val for i, val in enumerate(row)}
if clean:
synonym = {'literal':row['literal'], 'type':row['type']}
ilx2synonyms[row['ilx']].append(synonym)
elif not clean:
ilx2synonyms[row['ilx']].append(row)
return ilx2synonyms
def get_iri2row(self):
iri2row = {}
header = ['Index'] + list(self.fetch_existing_ids().columns)
for row in self.fetch_existing_ids().itertuples():
row = {header[i]:val for i, val in enumerate(row)}
iri2row[row['iri']] = row
return iri2row
def get_tid2existing_ids(self, clean=True):
tid2existing_ids = defaultdict(list)
header = ['Index'] + list(self.fetch_existing_ids().columns)
for row in self.fetch_existing_ids().itertuples():
row = {header[i]:val for i, val in enumerate(row)}
if clean:
existing_id = {'iri':row['iri'], 'curie':row['curie']}
tid2existing_ids[row['tid']].append(existing_id)
elif not clean:
tid2existing_ids[row['tid']].append(row)
return tid2existing_ids
def get_ilx2existing_ids(self, clean=True):
ilx2existing_ids = defaultdict(list)
header = ['Index'] + list(self.fetch_existing_ids().columns)
for row in self.fetch_existing_ids().itertuples():
row = {header[i]:val for i, val in enumerate(row)}
if clean:
existing_id = {'iri':row['iri'], 'curie':row['curie']}
ilx2existing_ids[row['ilx']].append(existing_id)
elif not clean:
ilx2existing_ids[row['ilx']].append(row)
return ilx2existing_ids
def get_curie2row(self):
curie2row = {}
header = ['Index'] + list(self.fetch_existing_ids().columns)
for row in self.fetch_existing_ids().itertuples():
row = {header[i]:val for i, val in enumerate(row)}
curie2row[row['curie']] = row
return curie2row
def get_fragment2rows(self):
fragement2rows = defaultdict(list)
header = ['Index'] + list(self.fetch_existing_ids().columns)
for row in self.fetch_existing_ids().itertuples():
row = {header[i]:val for i, val in enumerate(row)}
if not row['curie']: # there are a few with no curies that will cause a false positive
continue
fragment = row['curie'].split(':')[-1]
fragement2rows[fragment].append(row)
return fragement2rows
def show_tables(self):
data = "SHOW tables;"
return pd.read_sql(data, self.engine)
def get_table(self, tablename, limit=5):
data = """
SELECT *
FROM {tablename}
LIMIT {limit}
""".format(tablename=tablename, limit=limit)
return pd.read_sql(data, self.engine)
def get_custom(self, data):
return pd.read_sql(data, self.engine)
def main():
db_url = os.environ.get('SCICRUNCH_DB_URL_PRODUCTION')
sql = IlxSql(db_url)
rels = sql.get_relationships()
print(rels.head())
if __name__ == '__main__':
main()
|
|
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from django.db import connection
from django.db.models import Prefetch, QuerySet
from django.db.models.query import get_prefetcher
from django.test import TestCase, override_settings
from django.test.utils import CaptureQueriesContext
from .models import (
Author, Author2, AuthorAddress, AuthorWithAge, Bio, Book, Bookmark,
BookReview, BookWithYear, Comment, Department, Employee, FavoriteAuthors,
House, LessonEntry, Person, Qualification, Reader, Room, TaggedItem,
Teacher, WordEntry,
)
class PrefetchRelatedTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.book1 = Book.objects.create(title='Poems')
cls.book2 = Book.objects.create(title='Jane Eyre')
cls.book3 = Book.objects.create(title='Wuthering Heights')
cls.book4 = Book.objects.create(title='Sense and Sensibility')
cls.author1 = Author.objects.create(name='Charlotte', first_book=cls.book1)
cls.author2 = Author.objects.create(name='Anne', first_book=cls.book1)
cls.author3 = Author.objects.create(name='Emily', first_book=cls.book1)
cls.author4 = Author.objects.create(name='Jane', first_book=cls.book4)
cls.book1.authors.add(cls.author1, cls.author2, cls.author3)
cls.book2.authors.add(cls.author1)
cls.book3.authors.add(cls.author3)
cls.book4.authors.add(cls.author4)
cls.reader1 = Reader.objects.create(name='Amy')
cls.reader2 = Reader.objects.create(name='Belinda')
cls.reader1.books_read.add(cls.book1, cls.book4)
cls.reader2.books_read.add(cls.book2, cls.book4)
def assertWhereContains(self, sql, needle):
where_idx = sql.index('WHERE')
self.assertEqual(
sql.count(str(needle), where_idx), 1,
msg="WHERE clause doesn't contain %s, actual SQL: %s" % (needle, sql[where_idx:])
)
def test_m2m_forward(self):
with self.assertNumQueries(2):
lists = [list(b.authors.all()) for b in Book.objects.prefetch_related('authors')]
normal_lists = [list(b.authors.all()) for b in Book.objects.all()]
self.assertEqual(lists, normal_lists)
def test_m2m_reverse(self):
with self.assertNumQueries(2):
lists = [list(a.books.all()) for a in Author.objects.prefetch_related('books')]
normal_lists = [list(a.books.all()) for a in Author.objects.all()]
self.assertEqual(lists, normal_lists)
def test_foreignkey_forward(self):
with self.assertNumQueries(2):
books = [a.first_book for a in Author.objects.prefetch_related('first_book')]
normal_books = [a.first_book for a in Author.objects.all()]
self.assertEqual(books, normal_books)
def test_foreignkey_reverse(self):
with self.assertNumQueries(2):
[list(b.first_time_authors.all())
for b in Book.objects.prefetch_related('first_time_authors')]
self.assertQuerysetEqual(self.book2.authors.all(), ["<Author: Charlotte>"])
def test_onetoone_reverse_no_match(self):
# Regression for #17439
with self.assertNumQueries(2):
book = Book.objects.prefetch_related('bookwithyear').all()[0]
with self.assertNumQueries(0):
with self.assertRaises(BookWithYear.DoesNotExist):
book.bookwithyear
def test_survives_clone(self):
with self.assertNumQueries(2):
[list(b.first_time_authors.all())
for b in Book.objects.prefetch_related('first_time_authors').exclude(id=1000)]
def test_len(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related('first_time_authors')
len(qs)
[list(b.first_time_authors.all()) for b in qs]
def test_bool(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related('first_time_authors')
bool(qs)
[list(b.first_time_authors.all()) for b in qs]
def test_count(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related('first_time_authors')
[b.first_time_authors.count() for b in qs]
def test_exists(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related('first_time_authors')
[b.first_time_authors.exists() for b in qs]
def test_in_and_prefetch_related(self):
"""
Regression test for #20242 - QuerySet "in" didn't work the first time
when using prefetch_related. This was fixed by the removal of chunked
reads from QuerySet iteration in
70679243d1786e03557c28929f9762a119e3ac14.
"""
qs = Book.objects.prefetch_related('first_time_authors')
self.assertIn(qs[0], qs)
def test_clear(self):
with self.assertNumQueries(5):
with_prefetch = Author.objects.prefetch_related('books')
without_prefetch = with_prefetch.prefetch_related(None)
[list(a.books.all()) for a in without_prefetch]
def test_m2m_then_m2m(self):
"""A m2m can be followed through another m2m."""
with self.assertNumQueries(3):
qs = Author.objects.prefetch_related('books__read_by')
lists = [[[str(r) for r in b.read_by.all()]
for b in a.books.all()]
for a in qs]
self.assertEqual(lists, [
[["Amy"], ["Belinda"]], # Charlotte - Poems, Jane Eyre
[["Amy"]], # Anne - Poems
[["Amy"], []], # Emily - Poems, Wuthering Heights
[["Amy", "Belinda"]], # Jane - Sense and Sense
])
def test_overriding_prefetch(self):
with self.assertNumQueries(3):
qs = Author.objects.prefetch_related('books', 'books__read_by')
lists = [[[str(r) for r in b.read_by.all()]
for b in a.books.all()]
for a in qs]
self.assertEqual(lists, [
[["Amy"], ["Belinda"]], # Charlotte - Poems, Jane Eyre
[["Amy"]], # Anne - Poems
[["Amy"], []], # Emily - Poems, Wuthering Heights
[["Amy", "Belinda"]], # Jane - Sense and Sense
])
with self.assertNumQueries(3):
qs = Author.objects.prefetch_related('books__read_by', 'books')
lists = [[[str(r) for r in b.read_by.all()]
for b in a.books.all()]
for a in qs]
self.assertEqual(lists, [
[["Amy"], ["Belinda"]], # Charlotte - Poems, Jane Eyre
[["Amy"]], # Anne - Poems
[["Amy"], []], # Emily - Poems, Wuthering Heights
[["Amy", "Belinda"]], # Jane - Sense and Sense
])
def test_get(self):
"""
Objects retrieved with .get() get the prefetch behavior.
"""
# Need a double
with self.assertNumQueries(3):
author = Author.objects.prefetch_related('books__read_by').get(name="Charlotte")
lists = [[str(r) for r in b.read_by.all()] for b in author.books.all()]
self.assertEqual(lists, [["Amy"], ["Belinda"]]) # Poems, Jane Eyre
def test_foreign_key_then_m2m(self):
"""
A m2m relation can be followed after a relation like ForeignKey that
doesn't have many objects.
"""
with self.assertNumQueries(2):
qs = Author.objects.select_related('first_book').prefetch_related('first_book__read_by')
lists = [[str(r) for r in a.first_book.read_by.all()]
for a in qs]
self.assertEqual(lists, [["Amy"], ["Amy"], ["Amy"], ["Amy", "Belinda"]])
def test_reverse_one_to_one_then_m2m(self):
"""
A m2m relation can be followed afterr going through the select_related
reverse of an o2o.
"""
qs = Author.objects.prefetch_related('bio__books').select_related('bio')
with self.assertNumQueries(1):
list(qs.all())
Bio.objects.create(author=self.author1)
with self.assertNumQueries(2):
list(qs.all())
def test_attribute_error(self):
qs = Reader.objects.all().prefetch_related('books_read__xyz')
with self.assertRaises(AttributeError) as cm:
list(qs)
self.assertIn('prefetch_related', str(cm.exception))
def test_invalid_final_lookup(self):
qs = Book.objects.prefetch_related('authors__name')
with self.assertRaises(ValueError) as cm:
list(qs)
self.assertIn('prefetch_related', str(cm.exception))
self.assertIn("name", str(cm.exception))
def test_forward_m2m_to_attr_conflict(self):
msg = 'to_attr=authors conflicts with a field on the Book model.'
authors = Author.objects.all()
with self.assertRaisesMessage(ValueError, msg):
list(Book.objects.prefetch_related(
Prefetch('authors', queryset=authors, to_attr='authors'),
))
# Without the ValueError, an author was deleted due to the implicit
# save of the relation assignment.
self.assertEqual(self.book1.authors.count(), 3)
def test_reverse_m2m_to_attr_conflict(self):
msg = 'to_attr=books conflicts with a field on the Author model.'
poems = Book.objects.filter(title='Poems')
with self.assertRaisesMessage(ValueError, msg):
list(Author.objects.prefetch_related(
Prefetch('books', queryset=poems, to_attr='books'),
))
# Without the ValueError, a book was deleted due to the implicit
# save of reverse relation assignment.
self.assertEqual(self.author1.books.count(), 2)
def test_m2m_then_reverse_fk_object_ids(self):
with CaptureQueriesContext(connection) as queries:
list(Book.objects.prefetch_related('authors__addresses'))
sql = queries[-1]['sql']
self.assertWhereContains(sql, self.author1.name)
def test_m2m_then_m2m_object_ids(self):
with CaptureQueriesContext(connection) as queries:
list(Book.objects.prefetch_related('authors__favorite_authors'))
sql = queries[-1]['sql']
self.assertWhereContains(sql, self.author1.name)
def test_m2m_then_reverse_one_to_one_object_ids(self):
with CaptureQueriesContext(connection) as queries:
list(Book.objects.prefetch_related('authors__authorwithage'))
sql = queries[-1]['sql']
self.assertWhereContains(sql, self.author1.id)
class CustomPrefetchTests(TestCase):
@classmethod
def traverse_qs(cls, obj_iter, path):
"""
Helper method that returns a list containing a list of the objects in the
obj_iter. Then for each object in the obj_iter, the path will be
recursively travelled and the found objects are added to the return value.
"""
ret_val = []
if hasattr(obj_iter, 'all'):
obj_iter = obj_iter.all()
try:
iter(obj_iter)
except TypeError:
obj_iter = [obj_iter]
for obj in obj_iter:
rel_objs = []
for part in path:
if not part:
continue
try:
related = getattr(obj, part[0])
except ObjectDoesNotExist:
continue
if related is not None:
rel_objs.extend(cls.traverse_qs(related, [part[1:]]))
ret_val.append((obj, rel_objs))
return ret_val
@classmethod
def setUpTestData(cls):
cls.person1 = Person.objects.create(name='Joe')
cls.person2 = Person.objects.create(name='Mary')
# Set main_room for each house before creating the next one for
# databases where supports_nullable_unique_constraints is False.
cls.house1 = House.objects.create(name='House 1', address='123 Main St', owner=cls.person1)
cls.room1_1 = Room.objects.create(name='Dining room', house=cls.house1)
cls.room1_2 = Room.objects.create(name='Lounge', house=cls.house1)
cls.room1_3 = Room.objects.create(name='Kitchen', house=cls.house1)
cls.house1.main_room = cls.room1_1
cls.house1.save()
cls.person1.houses.add(cls.house1)
cls.house2 = House.objects.create(name='House 2', address='45 Side St', owner=cls.person1)
cls.room2_1 = Room.objects.create(name='Dining room', house=cls.house2)
cls.room2_2 = Room.objects.create(name='Lounge', house=cls.house2)
cls.room2_3 = Room.objects.create(name='Kitchen', house=cls.house2)
cls.house2.main_room = cls.room2_1
cls.house2.save()
cls.person1.houses.add(cls.house2)
cls.house3 = House.objects.create(name='House 3', address='6 Downing St', owner=cls.person2)
cls.room3_1 = Room.objects.create(name='Dining room', house=cls.house3)
cls.room3_2 = Room.objects.create(name='Lounge', house=cls.house3)
cls.room3_3 = Room.objects.create(name='Kitchen', house=cls.house3)
cls.house3.main_room = cls.room3_1
cls.house3.save()
cls.person2.houses.add(cls.house3)
cls.house4 = House.objects.create(name='house 4', address="7 Regents St", owner=cls.person2)
cls.room4_1 = Room.objects.create(name='Dining room', house=cls.house4)
cls.room4_2 = Room.objects.create(name='Lounge', house=cls.house4)
cls.room4_3 = Room.objects.create(name='Kitchen', house=cls.house4)
cls.house4.main_room = cls.room4_1
cls.house4.save()
cls.person2.houses.add(cls.house4)
def test_traverse_qs(self):
qs = Person.objects.prefetch_related('houses')
related_objs_normal = [list(p.houses.all()) for p in qs],
related_objs_from_traverse = [[inner[0] for inner in o[1]]
for o in self.traverse_qs(qs, [['houses']])]
self.assertEqual(related_objs_normal, (related_objs_from_traverse,))
def test_ambiguous(self):
# Ambiguous: Lookup was already seen with a different queryset.
with self.assertRaises(ValueError):
self.traverse_qs(
Person.objects.prefetch_related('houses__rooms', Prefetch('houses', queryset=House.objects.all())),
[['houses', 'rooms']]
)
# Ambiguous: Lookup houses_lst doesn't yet exist when performing houses_lst__rooms.
with self.assertRaises(AttributeError):
self.traverse_qs(
Person.objects.prefetch_related(
'houses_lst__rooms',
Prefetch('houses', queryset=House.objects.all(), to_attr='houses_lst')
),
[['houses', 'rooms']]
)
# Not ambiguous.
self.traverse_qs(
Person.objects.prefetch_related('houses__rooms', 'houses'),
[['houses', 'rooms']]
)
self.traverse_qs(
Person.objects.prefetch_related(
'houses__rooms',
Prefetch('houses', queryset=House.objects.all(), to_attr='houses_lst')
),
[['houses', 'rooms']]
)
def test_m2m(self):
# Control lookups.
with self.assertNumQueries(2):
lst1 = self.traverse_qs(
Person.objects.prefetch_related('houses'),
[['houses']]
)
# Test lookups.
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses')),
[['houses']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses', to_attr='houses_lst')),
[['houses_lst']]
)
self.assertEqual(lst1, lst2)
def test_reverse_m2m(self):
# Control lookups.
with self.assertNumQueries(2):
lst1 = self.traverse_qs(
House.objects.prefetch_related('occupants'),
[['occupants']]
)
# Test lookups.
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
House.objects.prefetch_related(Prefetch('occupants')),
[['occupants']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
House.objects.prefetch_related(Prefetch('occupants', to_attr='occupants_lst')),
[['occupants_lst']]
)
self.assertEqual(lst1, lst2)
def test_m2m_through_fk(self):
# Control lookups.
with self.assertNumQueries(3):
lst1 = self.traverse_qs(
Room.objects.prefetch_related('house__occupants'),
[['house', 'occupants']]
)
# Test lookups.
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Room.objects.prefetch_related(Prefetch('house__occupants')),
[['house', 'occupants']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Room.objects.prefetch_related(Prefetch('house__occupants', to_attr='occupants_lst')),
[['house', 'occupants_lst']]
)
self.assertEqual(lst1, lst2)
def test_m2m_through_gfk(self):
TaggedItem.objects.create(tag="houses", content_object=self.house1)
TaggedItem.objects.create(tag="houses", content_object=self.house2)
# Control lookups.
with self.assertNumQueries(3):
lst1 = self.traverse_qs(
TaggedItem.objects.filter(tag='houses').prefetch_related('content_object__rooms'),
[['content_object', 'rooms']]
)
# Test lookups.
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
TaggedItem.objects.prefetch_related(
Prefetch('content_object'),
Prefetch('content_object__rooms', to_attr='rooms_lst')
),
[['content_object', 'rooms_lst']]
)
self.assertEqual(lst1, lst2)
def test_o2m_through_m2m(self):
# Control lookups.
with self.assertNumQueries(3):
lst1 = self.traverse_qs(
Person.objects.prefetch_related('houses', 'houses__rooms'),
[['houses', 'rooms']]
)
# Test lookups.
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses'), 'houses__rooms'),
[['houses', 'rooms']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses'), Prefetch('houses__rooms')),
[['houses', 'rooms']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses', to_attr='houses_lst'), 'houses_lst__rooms'),
[['houses_lst', 'rooms']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(
Prefetch('houses', to_attr='houses_lst'),
Prefetch('houses_lst__rooms', to_attr='rooms_lst')
),
[['houses_lst', 'rooms_lst']]
)
self.assertEqual(lst1, lst2)
def test_generic_rel(self):
bookmark = Bookmark.objects.create(url='http://www.djangoproject.com/')
TaggedItem.objects.create(content_object=bookmark, tag='django')
TaggedItem.objects.create(content_object=bookmark, favorite=bookmark, tag='python')
# Control lookups.
with self.assertNumQueries(4):
lst1 = self.traverse_qs(
Bookmark.objects.prefetch_related('tags', 'tags__content_object', 'favorite_tags'),
[['tags', 'content_object'], ['favorite_tags']]
)
# Test lookups.
with self.assertNumQueries(4):
lst2 = self.traverse_qs(
Bookmark.objects.prefetch_related(
Prefetch('tags', to_attr='tags_lst'),
Prefetch('tags_lst__content_object'),
Prefetch('favorite_tags'),
),
[['tags_lst', 'content_object'], ['favorite_tags']]
)
self.assertEqual(lst1, lst2)
def test_traverse_single_item_property(self):
# Control lookups.
with self.assertNumQueries(5):
lst1 = self.traverse_qs(
Person.objects.prefetch_related(
'houses__rooms',
'primary_house__occupants__houses',
),
[['primary_house', 'occupants', 'houses']]
)
# Test lookups.
with self.assertNumQueries(5):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(
'houses__rooms',
Prefetch('primary_house__occupants', to_attr='occupants_lst'),
'primary_house__occupants_lst__houses',
),
[['primary_house', 'occupants_lst', 'houses']]
)
self.assertEqual(lst1, lst2)
def test_traverse_multiple_items_property(self):
# Control lookups.
with self.assertNumQueries(4):
lst1 = self.traverse_qs(
Person.objects.prefetch_related(
'houses',
'all_houses__occupants__houses',
),
[['all_houses', 'occupants', 'houses']]
)
# Test lookups.
with self.assertNumQueries(4):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(
'houses',
Prefetch('all_houses__occupants', to_attr='occupants_lst'),
'all_houses__occupants_lst__houses',
),
[['all_houses', 'occupants_lst', 'houses']]
)
self.assertEqual(lst1, lst2)
def test_custom_qs(self):
# Test basic.
with self.assertNumQueries(2):
lst1 = list(Person.objects.prefetch_related('houses'))
with self.assertNumQueries(2):
lst2 = list(Person.objects.prefetch_related(
Prefetch('houses', queryset=House.objects.all(), to_attr='houses_lst')))
self.assertEqual(
self.traverse_qs(lst1, [['houses']]),
self.traverse_qs(lst2, [['houses_lst']])
)
# Test queryset filtering.
with self.assertNumQueries(2):
lst2 = list(
Person.objects.prefetch_related(
Prefetch(
'houses',
queryset=House.objects.filter(pk__in=[self.house1.pk, self.house3.pk]),
to_attr='houses_lst',
)
)
)
self.assertEqual(len(lst2[0].houses_lst), 1)
self.assertEqual(lst2[0].houses_lst[0], self.house1)
self.assertEqual(len(lst2[1].houses_lst), 1)
self.assertEqual(lst2[1].houses_lst[0], self.house3)
# Test flattened.
with self.assertNumQueries(3):
lst1 = list(Person.objects.prefetch_related('houses__rooms'))
with self.assertNumQueries(3):
lst2 = list(Person.objects.prefetch_related(
Prefetch('houses__rooms', queryset=Room.objects.all(), to_attr='rooms_lst')))
self.assertEqual(
self.traverse_qs(lst1, [['houses', 'rooms']]),
self.traverse_qs(lst2, [['houses', 'rooms_lst']])
)
# Test inner select_related.
with self.assertNumQueries(3):
lst1 = list(Person.objects.prefetch_related('houses__owner'))
with self.assertNumQueries(2):
lst2 = list(Person.objects.prefetch_related(
Prefetch('houses', queryset=House.objects.select_related('owner'))))
self.assertEqual(
self.traverse_qs(lst1, [['houses', 'owner']]),
self.traverse_qs(lst2, [['houses', 'owner']])
)
# Test inner prefetch.
inner_rooms_qs = Room.objects.filter(pk__in=[self.room1_1.pk, self.room1_2.pk])
houses_qs_prf = House.objects.prefetch_related(
Prefetch('rooms', queryset=inner_rooms_qs, to_attr='rooms_lst'))
with self.assertNumQueries(4):
lst2 = list(Person.objects.prefetch_related(
Prefetch('houses', queryset=houses_qs_prf.filter(pk=self.house1.pk), to_attr='houses_lst'),
Prefetch('houses_lst__rooms_lst__main_room_of')
))
self.assertEqual(len(lst2[0].houses_lst[0].rooms_lst), 2)
self.assertEqual(lst2[0].houses_lst[0].rooms_lst[0], self.room1_1)
self.assertEqual(lst2[0].houses_lst[0].rooms_lst[1], self.room1_2)
self.assertEqual(lst2[0].houses_lst[0].rooms_lst[0].main_room_of, self.house1)
self.assertEqual(len(lst2[1].houses_lst), 0)
# Test ForwardManyToOneDescriptor.
houses = House.objects.select_related('owner')
with self.assertNumQueries(6):
rooms = Room.objects.all().prefetch_related('house')
lst1 = self.traverse_qs(rooms, [['house', 'owner']])
with self.assertNumQueries(2):
rooms = Room.objects.all().prefetch_related(Prefetch('house', queryset=houses.all()))
lst2 = self.traverse_qs(rooms, [['house', 'owner']])
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
houses = House.objects.select_related('owner')
rooms = Room.objects.all().prefetch_related(Prefetch('house', queryset=houses.all(), to_attr='house_attr'))
lst2 = self.traverse_qs(rooms, [['house_attr', 'owner']])
self.assertEqual(lst1, lst2)
room = Room.objects.all().prefetch_related(
Prefetch('house', queryset=houses.filter(address='DoesNotExist'))
).first()
with self.assertRaises(ObjectDoesNotExist):
getattr(room, 'house')
room = Room.objects.all().prefetch_related(
Prefetch('house', queryset=houses.filter(address='DoesNotExist'), to_attr='house_attr')
).first()
self.assertIsNone(room.house_attr)
rooms = Room.objects.all().prefetch_related(Prefetch('house', queryset=House.objects.only('name')))
with self.assertNumQueries(2):
getattr(rooms.first().house, 'name')
with self.assertNumQueries(3):
getattr(rooms.first().house, 'address')
# Test ReverseOneToOneDescriptor.
houses = House.objects.select_related('owner')
with self.assertNumQueries(6):
rooms = Room.objects.all().prefetch_related('main_room_of')
lst1 = self.traverse_qs(rooms, [['main_room_of', 'owner']])
with self.assertNumQueries(2):
rooms = Room.objects.all().prefetch_related(Prefetch('main_room_of', queryset=houses.all()))
lst2 = self.traverse_qs(rooms, [['main_room_of', 'owner']])
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
rooms = list(
Room.objects.all().prefetch_related(
Prefetch('main_room_of', queryset=houses.all(), to_attr='main_room_of_attr')
)
)
lst2 = self.traverse_qs(rooms, [['main_room_of_attr', 'owner']])
self.assertEqual(lst1, lst2)
room = Room.objects.filter(main_room_of__isnull=False).prefetch_related(
Prefetch('main_room_of', queryset=houses.filter(address='DoesNotExist'))
).first()
with self.assertRaises(ObjectDoesNotExist):
getattr(room, 'main_room_of')
room = Room.objects.filter(main_room_of__isnull=False).prefetch_related(
Prefetch('main_room_of', queryset=houses.filter(address='DoesNotExist'), to_attr='main_room_of_attr')
).first()
self.assertIsNone(room.main_room_of_attr)
# The custom queryset filters should be applied to the queryset
# instance returned by the manager.
person = Person.objects.prefetch_related(
Prefetch('houses', queryset=House.objects.filter(name='House 1')),
).get(pk=self.person1.pk)
self.assertEqual(
list(person.houses.all()),
list(person.houses.all().all()),
)
def test_nested_prefetch_related_are_not_overwritten(self):
# Regression test for #24873
houses_2 = House.objects.prefetch_related(Prefetch('rooms'))
persons = Person.objects.prefetch_related(Prefetch('houses', queryset=houses_2))
houses = House.objects.prefetch_related(Prefetch('occupants', queryset=persons))
list(houses) # queryset must be evaluated once to reproduce the bug.
self.assertEqual(
houses.all()[0].occupants.all()[0].houses.all()[1].rooms.all()[0],
self.room2_1
)
def test_values_queryset(self):
with self.assertRaisesMessage(ValueError, 'Prefetch querysets cannot use values().'):
Prefetch('houses', House.objects.values('pk'))
def test_to_attr_doesnt_cache_through_attr_as_list(self):
house = House.objects.prefetch_related(
Prefetch('rooms', queryset=Room.objects.all(), to_attr='to_rooms'),
).get(pk=self.house3.pk)
self.assertIsInstance(house.rooms.all(), QuerySet)
def test_to_attr_cached_property(self):
persons = Person.objects.prefetch_related(
Prefetch('houses', House.objects.all(), to_attr='cached_all_houses'),
)
for person in persons:
# To bypass caching at the related descriptor level, don't use
# person.houses.all() here.
all_houses = list(House.objects.filter(occupants=person))
with self.assertNumQueries(0):
self.assertEqual(person.cached_all_houses, all_houses)
class DefaultManagerTests(TestCase):
def setUp(self):
self.qual1 = Qualification.objects.create(name="BA")
self.qual2 = Qualification.objects.create(name="BSci")
self.qual3 = Qualification.objects.create(name="MA")
self.qual4 = Qualification.objects.create(name="PhD")
self.teacher1 = Teacher.objects.create(name="Mr Cleese")
self.teacher2 = Teacher.objects.create(name="Mr Idle")
self.teacher3 = Teacher.objects.create(name="Mr Chapman")
self.teacher1.qualifications.add(self.qual1, self.qual2, self.qual3, self.qual4)
self.teacher2.qualifications.add(self.qual1)
self.teacher3.qualifications.add(self.qual2)
self.dept1 = Department.objects.create(name="English")
self.dept2 = Department.objects.create(name="Physics")
self.dept1.teachers.add(self.teacher1, self.teacher2)
self.dept2.teachers.add(self.teacher1, self.teacher3)
def test_m2m_then_m2m(self):
with self.assertNumQueries(3):
# When we prefetch the teachers, and force the query, we don't want
# the default manager on teachers to immediately get all the related
# qualifications, since this will do one query per teacher.
qs = Department.objects.prefetch_related('teachers')
depts = "".join("%s department: %s\n" %
(dept.name, ", ".join(str(t) for t in dept.teachers.all()))
for dept in qs)
self.assertEqual(depts,
"English department: Mr Cleese (BA, BSci, MA, PhD), Mr Idle (BA)\n"
"Physics department: Mr Cleese (BA, BSci, MA, PhD), Mr Chapman (BSci)\n")
class GenericRelationTests(TestCase):
@classmethod
def setUpTestData(cls):
book1 = Book.objects.create(title="Winnie the Pooh")
book2 = Book.objects.create(title="Do you like green eggs and spam?")
book3 = Book.objects.create(title="Three Men In A Boat")
reader1 = Reader.objects.create(name="me")
reader2 = Reader.objects.create(name="you")
reader3 = Reader.objects.create(name="someone")
book1.read_by.add(reader1, reader2)
book2.read_by.add(reader2)
book3.read_by.add(reader3)
cls.book1, cls.book2, cls.book3 = book1, book2, book3
cls.reader1, cls.reader2, cls.reader3 = reader1, reader2, reader3
def test_prefetch_GFK(self):
TaggedItem.objects.create(tag="awesome", content_object=self.book1)
TaggedItem.objects.create(tag="great", content_object=self.reader1)
TaggedItem.objects.create(tag="outstanding", content_object=self.book2)
TaggedItem.objects.create(tag="amazing", content_object=self.reader3)
# 1 for TaggedItem table, 1 for Book table, 1 for Reader table
with self.assertNumQueries(3):
qs = TaggedItem.objects.prefetch_related('content_object')
list(qs)
def test_prefetch_GFK_nonint_pk(self):
Comment.objects.create(comment="awesome", content_object=self.book1)
# 1 for Comment table, 1 for Book table
with self.assertNumQueries(2):
qs = Comment.objects.prefetch_related('content_object')
[c.content_object for c in qs]
def test_traverse_GFK(self):
"""
A 'content_object' can be traversed with prefetch_related() and
get to related objects on the other side (assuming it is suitably
filtered)
"""
TaggedItem.objects.create(tag="awesome", content_object=self.book1)
TaggedItem.objects.create(tag="awesome", content_object=self.book2)
TaggedItem.objects.create(tag="awesome", content_object=self.book3)
TaggedItem.objects.create(tag="awesome", content_object=self.reader1)
TaggedItem.objects.create(tag="awesome", content_object=self.reader2)
ct = ContentType.objects.get_for_model(Book)
# We get 3 queries - 1 for main query, 1 for content_objects since they
# all use the same table, and 1 for the 'read_by' relation.
with self.assertNumQueries(3):
# If we limit to books, we know that they will have 'read_by'
# attributes, so the following makes sense:
qs = TaggedItem.objects.filter(content_type=ct, tag='awesome').prefetch_related('content_object__read_by')
readers_of_awesome_books = {r.name for tag in qs
for r in tag.content_object.read_by.all()}
self.assertEqual(readers_of_awesome_books, {"me", "you", "someone"})
def test_nullable_GFK(self):
TaggedItem.objects.create(tag="awesome", content_object=self.book1,
created_by=self.reader1)
TaggedItem.objects.create(tag="great", content_object=self.book2)
TaggedItem.objects.create(tag="rubbish", content_object=self.book3)
with self.assertNumQueries(2):
result = [t.created_by for t in TaggedItem.objects.prefetch_related('created_by')]
self.assertEqual(result,
[t.created_by for t in TaggedItem.objects.all()])
def test_generic_relation(self):
bookmark = Bookmark.objects.create(url='http://www.djangoproject.com/')
TaggedItem.objects.create(content_object=bookmark, tag='django')
TaggedItem.objects.create(content_object=bookmark, tag='python')
with self.assertNumQueries(2):
tags = [t.tag for b in Bookmark.objects.prefetch_related('tags')
for t in b.tags.all()]
self.assertEqual(sorted(tags), ["django", "python"])
def test_charfield_GFK(self):
b = Bookmark.objects.create(url='http://www.djangoproject.com/')
TaggedItem.objects.create(content_object=b, tag='django')
TaggedItem.objects.create(content_object=b, favorite=b, tag='python')
with self.assertNumQueries(3):
bookmark = Bookmark.objects.filter(pk=b.pk).prefetch_related('tags', 'favorite_tags')[0]
self.assertEqual(sorted([i.tag for i in bookmark.tags.all()]), ["django", "python"])
self.assertEqual([i.tag for i in bookmark.favorite_tags.all()], ["python"])
def test_custom_queryset(self):
bookmark = Bookmark.objects.create(url='http://www.djangoproject.com/')
django_tag = TaggedItem.objects.create(content_object=bookmark, tag='django')
TaggedItem.objects.create(content_object=bookmark, tag='python')
with self.assertNumQueries(2):
bookmark = Bookmark.objects.prefetch_related(
Prefetch('tags', TaggedItem.objects.filter(tag='django')),
).get()
with self.assertNumQueries(0):
self.assertEqual(list(bookmark.tags.all()), [django_tag])
# The custom queryset filters should be applied to the queryset
# instance returned by the manager.
self.assertEqual(list(bookmark.tags.all()), list(bookmark.tags.all().all()))
class MultiTableInheritanceTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.book1 = BookWithYear.objects.create(title='Poems', published_year=2010)
cls.book2 = BookWithYear.objects.create(title='More poems', published_year=2011)
cls.author1 = AuthorWithAge.objects.create(name='Jane', first_book=cls.book1, age=50)
cls.author2 = AuthorWithAge.objects.create(name='Tom', first_book=cls.book1, age=49)
cls.author3 = AuthorWithAge.objects.create(name='Robert', first_book=cls.book2, age=48)
cls.author_address = AuthorAddress.objects.create(author=cls.author1, address='SomeStreet 1')
cls.book2.aged_authors.add(cls.author2, cls.author3)
cls.br1 = BookReview.objects.create(book=cls.book1, notes='review book1')
cls.br2 = BookReview.objects.create(book=cls.book2, notes='review book2')
def test_foreignkey(self):
with self.assertNumQueries(2):
qs = AuthorWithAge.objects.prefetch_related('addresses')
addresses = [[str(address) for address in obj.addresses.all()] for obj in qs]
self.assertEqual(addresses, [[str(self.author_address)], [], []])
def test_foreignkey_to_inherited(self):
with self.assertNumQueries(2):
qs = BookReview.objects.prefetch_related('book')
titles = [obj.book.title for obj in qs]
self.assertEqual(titles, ["Poems", "More poems"])
def test_m2m_to_inheriting_model(self):
qs = AuthorWithAge.objects.prefetch_related('books_with_year')
with self.assertNumQueries(2):
lst = [[str(book) for book in author.books_with_year.all()] for author in qs]
qs = AuthorWithAge.objects.all()
lst2 = [[str(book) for book in author.books_with_year.all()] for author in qs]
self.assertEqual(lst, lst2)
qs = BookWithYear.objects.prefetch_related('aged_authors')
with self.assertNumQueries(2):
lst = [[str(author) for author in book.aged_authors.all()] for book in qs]
qs = BookWithYear.objects.all()
lst2 = [[str(author) for author in book.aged_authors.all()] for book in qs]
self.assertEqual(lst, lst2)
def test_parent_link_prefetch(self):
with self.assertNumQueries(2):
[a.author for a in AuthorWithAge.objects.prefetch_related('author')]
@override_settings(DEBUG=True)
def test_child_link_prefetch(self):
with self.assertNumQueries(2):
authors = [a.authorwithage for a in Author.objects.prefetch_related('authorwithage')]
# Regression for #18090: the prefetching query must include an IN clause.
# Note that on Oracle the table name is upper case in the generated SQL,
# thus the .lower() call.
self.assertIn('authorwithage', connection.queries[-1]['sql'].lower())
self.assertIn(' IN ', connection.queries[-1]['sql'])
self.assertEqual(authors, [a.authorwithage for a in Author.objects.all()])
class ForeignKeyToFieldTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.book = Book.objects.create(title='Poems')
cls.author1 = Author.objects.create(name='Jane', first_book=cls.book)
cls.author2 = Author.objects.create(name='Tom', first_book=cls.book)
cls.author3 = Author.objects.create(name='Robert', first_book=cls.book)
cls.author_address = AuthorAddress.objects.create(author=cls.author1, address='SomeStreet 1')
FavoriteAuthors.objects.create(author=cls.author1, likes_author=cls.author2)
FavoriteAuthors.objects.create(author=cls.author2, likes_author=cls.author3)
FavoriteAuthors.objects.create(author=cls.author3, likes_author=cls.author1)
def test_foreignkey(self):
with self.assertNumQueries(2):
qs = Author.objects.prefetch_related('addresses')
addresses = [[str(address) for address in obj.addresses.all()]
for obj in qs]
self.assertEqual(addresses, [[str(self.author_address)], [], []])
def test_m2m(self):
with self.assertNumQueries(3):
qs = Author.objects.all().prefetch_related('favorite_authors', 'favors_me')
favorites = [(
[str(i_like) for i_like in author.favorite_authors.all()],
[str(likes_me) for likes_me in author.favors_me.all()]
) for author in qs]
self.assertEqual(
favorites,
[
([str(self.author2)], [str(self.author3)]),
([str(self.author3)], [str(self.author1)]),
([str(self.author1)], [str(self.author2)])
]
)
class LookupOrderingTest(TestCase):
"""
Test cases that demonstrate that ordering of lookups is important, and
ensure it is preserved.
"""
def setUp(self):
self.person1 = Person.objects.create(name="Joe")
self.person2 = Person.objects.create(name="Mary")
# Set main_room for each house before creating the next one for
# databases where supports_nullable_unique_constraints is False.
self.house1 = House.objects.create(address="123 Main St")
self.room1_1 = Room.objects.create(name="Dining room", house=self.house1)
self.room1_2 = Room.objects.create(name="Lounge", house=self.house1)
self.room1_3 = Room.objects.create(name="Kitchen", house=self.house1)
self.house1.main_room = self.room1_1
self.house1.save()
self.person1.houses.add(self.house1)
self.house2 = House.objects.create(address="45 Side St")
self.room2_1 = Room.objects.create(name="Dining room", house=self.house2)
self.room2_2 = Room.objects.create(name="Lounge", house=self.house2)
self.house2.main_room = self.room2_1
self.house2.save()
self.person1.houses.add(self.house2)
self.house3 = House.objects.create(address="6 Downing St")
self.room3_1 = Room.objects.create(name="Dining room", house=self.house3)
self.room3_2 = Room.objects.create(name="Lounge", house=self.house3)
self.room3_3 = Room.objects.create(name="Kitchen", house=self.house3)
self.house3.main_room = self.room3_1
self.house3.save()
self.person2.houses.add(self.house3)
self.house4 = House.objects.create(address="7 Regents St")
self.room4_1 = Room.objects.create(name="Dining room", house=self.house4)
self.room4_2 = Room.objects.create(name="Lounge", house=self.house4)
self.house4.main_room = self.room4_1
self.house4.save()
self.person2.houses.add(self.house4)
def test_order(self):
with self.assertNumQueries(4):
# The following two queries must be done in the same order as written,
# otherwise 'primary_house' will cause non-prefetched lookups
qs = Person.objects.prefetch_related('houses__rooms',
'primary_house__occupants')
[list(p.primary_house.occupants.all()) for p in qs]
class NullableTest(TestCase):
@classmethod
def setUpTestData(cls):
boss = Employee.objects.create(name="Peter")
Employee.objects.create(name="Joe", boss=boss)
Employee.objects.create(name="Angela", boss=boss)
def test_traverse_nullable(self):
# Because we use select_related() for 'boss', it doesn't need to be
# prefetched, but we can still traverse it although it contains some nulls
with self.assertNumQueries(2):
qs = Employee.objects.select_related('boss').prefetch_related('boss__serfs')
co_serfs = [list(e.boss.serfs.all()) if e.boss is not None else []
for e in qs]
qs2 = Employee.objects.select_related('boss')
co_serfs2 = [list(e.boss.serfs.all()) if e.boss is not None else [] for e in qs2]
self.assertEqual(co_serfs, co_serfs2)
def test_prefetch_nullable(self):
# One for main employee, one for boss, one for serfs
with self.assertNumQueries(3):
qs = Employee.objects.prefetch_related('boss__serfs')
co_serfs = [list(e.boss.serfs.all()) if e.boss is not None else []
for e in qs]
qs2 = Employee.objects.all()
co_serfs2 = [list(e.boss.serfs.all()) if e.boss is not None else [] for e in qs2]
self.assertEqual(co_serfs, co_serfs2)
def test_in_bulk(self):
"""
In-bulk does correctly prefetch objects by not using .iterator()
directly.
"""
boss1 = Employee.objects.create(name="Peter")
boss2 = Employee.objects.create(name="Jack")
with self.assertNumQueries(2):
# Prefetch is done and it does not cause any errors.
bulk = Employee.objects.prefetch_related('serfs').in_bulk([boss1.pk, boss2.pk])
for b in bulk.values():
list(b.serfs.all())
class MultiDbTests(TestCase):
multi_db = True
def test_using_is_honored_m2m(self):
B = Book.objects.using('other')
A = Author.objects.using('other')
book1 = B.create(title="Poems")
book2 = B.create(title="Jane Eyre")
book3 = B.create(title="Wuthering Heights")
book4 = B.create(title="Sense and Sensibility")
author1 = A.create(name="Charlotte", first_book=book1)
author2 = A.create(name="Anne", first_book=book1)
author3 = A.create(name="Emily", first_book=book1)
author4 = A.create(name="Jane", first_book=book4)
book1.authors.add(author1, author2, author3)
book2.authors.add(author1)
book3.authors.add(author3)
book4.authors.add(author4)
# Forward
qs1 = B.prefetch_related('authors')
with self.assertNumQueries(2, using='other'):
books = "".join("%s (%s)\n" %
(book.title, ", ".join(a.name for a in book.authors.all()))
for book in qs1)
self.assertEqual(books,
"Poems (Charlotte, Anne, Emily)\n"
"Jane Eyre (Charlotte)\n"
"Wuthering Heights (Emily)\n"
"Sense and Sensibility (Jane)\n")
# Reverse
qs2 = A.prefetch_related('books')
with self.assertNumQueries(2, using='other'):
authors = "".join("%s: %s\n" %
(author.name, ", ".join(b.title for b in author.books.all()))
for author in qs2)
self.assertEqual(authors,
"Charlotte: Poems, Jane Eyre\n"
"Anne: Poems\n"
"Emily: Poems, Wuthering Heights\n"
"Jane: Sense and Sensibility\n")
def test_using_is_honored_fkey(self):
B = Book.objects.using('other')
A = Author.objects.using('other')
book1 = B.create(title="Poems")
book2 = B.create(title="Sense and Sensibility")
A.create(name="Charlotte Bronte", first_book=book1)
A.create(name="Jane Austen", first_book=book2)
# Forward
with self.assertNumQueries(2, using='other'):
books = ", ".join(a.first_book.title for a in A.prefetch_related('first_book'))
self.assertEqual("Poems, Sense and Sensibility", books)
# Reverse
with self.assertNumQueries(2, using='other'):
books = "".join("%s (%s)\n" %
(b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related('first_time_authors'))
self.assertEqual(books,
"Poems (Charlotte Bronte)\n"
"Sense and Sensibility (Jane Austen)\n")
def test_using_is_honored_inheritance(self):
B = BookWithYear.objects.using('other')
A = AuthorWithAge.objects.using('other')
book1 = B.create(title="Poems", published_year=2010)
B.create(title="More poems", published_year=2011)
A.create(name='Jane', first_book=book1, age=50)
A.create(name='Tom', first_book=book1, age=49)
# parent link
with self.assertNumQueries(2, using='other'):
authors = ", ".join(a.author.name for a in A.prefetch_related('author'))
self.assertEqual(authors, "Jane, Tom")
# child link
with self.assertNumQueries(2, using='other'):
ages = ", ".join(str(a.authorwithage.age) for a in A.prefetch_related('authorwithage'))
self.assertEqual(ages, "50, 49")
def test_using_is_honored_custom_qs(self):
B = Book.objects.using('other')
A = Author.objects.using('other')
book1 = B.create(title="Poems")
book2 = B.create(title="Sense and Sensibility")
A.create(name="Charlotte Bronte", first_book=book1)
A.create(name="Jane Austen", first_book=book2)
# Implicit hinting
with self.assertNumQueries(2, using='other'):
prefetch = Prefetch('first_time_authors', queryset=Author.objects.all())
books = "".join("%s (%s)\n" %
(b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related(prefetch))
self.assertEqual(books,
"Poems (Charlotte Bronte)\n"
"Sense and Sensibility (Jane Austen)\n")
# Explicit using on the same db.
with self.assertNumQueries(2, using='other'):
prefetch = Prefetch('first_time_authors', queryset=Author.objects.using('other'))
books = "".join("%s (%s)\n" %
(b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related(prefetch))
self.assertEqual(books,
"Poems (Charlotte Bronte)\n"
"Sense and Sensibility (Jane Austen)\n")
# Explicit using on a different db.
with self.assertNumQueries(1, using='default'), self.assertNumQueries(1, using='other'):
prefetch = Prefetch('first_time_authors', queryset=Author.objects.using('default'))
books = "".join("%s (%s)\n" %
(b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related(prefetch))
self.assertEqual(books,
"Poems ()\n"
"Sense and Sensibility ()\n")
class Ticket19607Tests(TestCase):
def setUp(self):
for id, name1, name2 in [
(1, 'einfach', 'simple'),
(2, 'schwierig', 'difficult'),
]:
LessonEntry.objects.create(id=id, name1=name1, name2=name2)
for id, lesson_entry_id, name in [
(1, 1, 'einfach'),
(2, 1, 'simple'),
(3, 2, 'schwierig'),
(4, 2, 'difficult'),
]:
WordEntry.objects.create(id=id, lesson_entry_id=lesson_entry_id, name=name)
def test_bug(self):
list(WordEntry.objects.prefetch_related('lesson_entry', 'lesson_entry__wordentry_set'))
class Ticket21410Tests(TestCase):
def setUp(self):
self.book1 = Book.objects.create(title="Poems")
self.book2 = Book.objects.create(title="Jane Eyre")
self.book3 = Book.objects.create(title="Wuthering Heights")
self.book4 = Book.objects.create(title="Sense and Sensibility")
self.author1 = Author2.objects.create(name="Charlotte", first_book=self.book1)
self.author2 = Author2.objects.create(name="Anne", first_book=self.book1)
self.author3 = Author2.objects.create(name="Emily", first_book=self.book1)
self.author4 = Author2.objects.create(name="Jane", first_book=self.book4)
self.author1.favorite_books.add(self.book1, self.book2, self.book3)
self.author2.favorite_books.add(self.book1)
self.author3.favorite_books.add(self.book2)
self.author4.favorite_books.add(self.book3)
def test_bug(self):
list(Author2.objects.prefetch_related('first_book', 'favorite_books'))
class Ticket21760Tests(TestCase):
def setUp(self):
self.rooms = []
for _ in range(3):
house = House.objects.create()
for _ in range(3):
self.rooms.append(Room.objects.create(house=house))
# Set main_room for each house before creating the next one for
# databases where supports_nullable_unique_constraints is False.
house.main_room = self.rooms[-3]
house.save()
def test_bug(self):
prefetcher = get_prefetcher(self.rooms[0], 'house', 'house')[0]
queryset = prefetcher.get_prefetch_queryset(list(Room.objects.all()))[0]
self.assertNotIn(' JOIN ', str(queryset.query))
class Ticket25546Tests(TestCase):
"""
Nested prefetch_related() shouldn't trigger duplicate queries for the same
lookup.
Before, prefetch queries were for 'addresses', 'first_time_authors', and
'first_time_authors__addresses'. The last query is the duplicate.
"""
@classmethod
def setUpTestData(cls):
cls.book1, cls.book2 = [
Book.objects.create(title='book1'),
Book.objects.create(title='book2'),
]
cls.author11, cls.author12, cls.author21 = [
Author.objects.create(first_book=cls.book1, name='Author11'),
Author.objects.create(first_book=cls.book1, name='Author12'),
Author.objects.create(first_book=cls.book2, name='Author21'),
]
cls.author1_address1, cls.author1_address2, cls.author2_address1 = [
AuthorAddress.objects.create(author=cls.author11, address='Happy place'),
AuthorAddress.objects.create(author=cls.author12, address='Haunted house'),
AuthorAddress.objects.create(author=cls.author21, address='Happy place'),
]
def test_prefetch(self):
with self.assertNumQueries(3):
books = Book.objects.filter(
title__in=['book1', 'book2'],
).prefetch_related(
Prefetch(
'first_time_authors',
Author.objects.prefetch_related(
Prefetch(
'addresses',
AuthorAddress.objects.filter(address='Happy place'),
)
),
),
)
book1, book2 = list(books)
with self.assertNumQueries(0):
self.assertListEqual(list(book1.first_time_authors.all()), [self.author11, self.author12])
self.assertListEqual(list(book2.first_time_authors.all()), [self.author21])
self.assertListEqual(list(book1.first_time_authors.all()[0].addresses.all()), [self.author1_address1])
self.assertListEqual(list(book1.first_time_authors.all()[1].addresses.all()), [])
self.assertListEqual(list(book2.first_time_authors.all()[0].addresses.all()), [self.author2_address1])
self.assertEqual(
list(book1.first_time_authors.all()), list(book1.first_time_authors.all().all())
)
self.assertEqual(
list(book2.first_time_authors.all()), list(book2.first_time_authors.all().all())
)
self.assertEqual(
list(book1.first_time_authors.all()[0].addresses.all()),
list(book1.first_time_authors.all()[0].addresses.all().all())
)
self.assertEqual(
list(book1.first_time_authors.all()[1].addresses.all()),
list(book1.first_time_authors.all()[1].addresses.all().all())
)
self.assertEqual(
list(book2.first_time_authors.all()[0].addresses.all()),
list(book2.first_time_authors.all()[0].addresses.all().all())
)
def test_prefetch_with_to_attr(self):
with self.assertNumQueries(3):
books = Book.objects.filter(
title__in=['book1', 'book2'],
).prefetch_related(
Prefetch(
'first_time_authors',
Author.objects.prefetch_related(
Prefetch(
'addresses',
AuthorAddress.objects.filter(address='Happy place'),
to_attr='happy_place',
)
),
to_attr='first_authors',
),
)
book1, book2 = list(books)
with self.assertNumQueries(0):
self.assertListEqual(book1.first_authors, [self.author11, self.author12])
self.assertListEqual(book2.first_authors, [self.author21])
self.assertListEqual(book1.first_authors[0].happy_place, [self.author1_address1])
self.assertListEqual(book1.first_authors[1].happy_place, [])
self.assertListEqual(book2.first_authors[0].happy_place, [self.author2_address1])
|
|
# __init__.py - fsmonitor initialization and overrides
#
# Copyright 2013-2016 Facebook, Inc.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
'''Faster status operations with the Watchman file monitor (EXPERIMENTAL)
Integrates the file-watching program Watchman with Mercurial to produce faster
status results.
On a particular Linux system, for a real-world repository with over 400,000
files hosted on ext4, vanilla `hg status` takes 1.3 seconds. On the same
system, with fsmonitor it takes about 0.3 seconds.
fsmonitor requires no configuration -- it will tell Watchman about your
repository as necessary. You'll need to install Watchman from
https://facebook.github.io/watchman/ and make sure it is in your PATH.
fsmonitor is incompatible with the largefiles and eol extensions, and
will disable itself if any of those are active.
The following configuration options exist:
::
[fsmonitor]
mode = {off, on, paranoid}
When `mode = off`, fsmonitor will disable itself (similar to not loading the
extension at all). When `mode = on`, fsmonitor will be enabled (the default).
When `mode = paranoid`, fsmonitor will query both Watchman and the filesystem,
and ensure that the results are consistent.
::
[fsmonitor]
timeout = (float)
A value, in seconds, that determines how long fsmonitor will wait for Watchman
to return results. Defaults to `2.0`.
::
[fsmonitor]
blacklistusers = (list of userids)
A list of usernames for which fsmonitor will disable itself altogether.
::
[fsmonitor]
walk_on_invalidate = (boolean)
Whether or not to walk the whole repo ourselves when our cached state has been
invalidated, for example when Watchman has been restarted or .hgignore rules
have been changed. Walking the repo in that case can result in competing for
I/O with Watchman. For large repos it is recommended to set this value to
false. You may wish to set this to true if you have a very fast filesystem
that can outpace the IPC overhead of getting the result data for the full repo
from Watchman. Defaults to false.
::
[fsmonitor]
warn_when_unused = (boolean)
Whether to print a warning during certain operations when fsmonitor would be
beneficial to performance but isn't enabled.
::
[fsmonitor]
warn_update_file_count = (integer)
# or when mercurial is built with rust support
warn_update_file_count_rust = (integer)
If ``warn_when_unused`` is set and fsmonitor isn't enabled, a warning will
be printed during working directory updates if this many files will be
created.
'''
# Platforms Supported
# ===================
#
# **Linux:** *Stable*. Watchman and fsmonitor are both known to work reliably,
# even under severe loads.
#
# **Mac OS X:** *Stable*. The Mercurial test suite passes with fsmonitor
# turned on, on case-insensitive HFS+. There has been a reasonable amount of
# user testing under normal loads.
#
# **Solaris, BSD:** *Alpha*. watchman and fsmonitor are believed to work, but
# very little testing has been done.
#
# **Windows:** *Alpha*. Not in a release version of watchman or fsmonitor yet.
#
# Known Issues
# ============
#
# * fsmonitor will disable itself if any of the following extensions are
# enabled: largefiles, inotify, eol; or if the repository has subrepos.
# * fsmonitor will produce incorrect results if nested repos that are not
# subrepos exist. *Workaround*: add nested repo paths to your `.hgignore`.
#
# The issues related to nested repos and subrepos are probably not fundamental
# ones. Patches to fix them are welcome.
from __future__ import absolute_import
import codecs
import os
import stat
import sys
import tempfile
import weakref
from mercurial.i18n import _
from mercurial.node import hex
from mercurial.pycompat import open
from mercurial import (
context,
encoding,
error,
extensions,
localrepo,
merge,
pathutil,
pycompat,
registrar,
scmutil,
util,
)
from mercurial import match as matchmod
from mercurial.utils import (
hashutil,
stringutil,
)
from . import (
pywatchman,
state,
watchmanclient,
)
# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
# be specifying the version(s) of Mercurial they are tested with, or
# leave the attribute unspecified.
testedwith = b'ships-with-hg-core'
configtable = {}
configitem = registrar.configitem(configtable)
configitem(
b'fsmonitor',
b'mode',
default=b'on',
)
configitem(
b'fsmonitor',
b'walk_on_invalidate',
default=False,
)
configitem(
b'fsmonitor',
b'timeout',
default=b'2',
)
configitem(
b'fsmonitor',
b'blacklistusers',
default=list,
)
configitem(
b'fsmonitor',
b'watchman_exe',
default=b'watchman',
)
configitem(
b'fsmonitor',
b'verbose',
default=True,
experimental=True,
)
configitem(
b'experimental',
b'fsmonitor.transaction_notify',
default=False,
)
# This extension is incompatible with the following blacklisted extensions
# and will disable itself when encountering one of these:
_blacklist = [b'largefiles', b'eol']
def debuginstall(ui, fm):
fm.write(
b"fsmonitor-watchman",
_(b"fsmonitor checking for watchman binary... (%s)\n"),
ui.configpath(b"fsmonitor", b"watchman_exe"),
)
root = tempfile.mkdtemp()
c = watchmanclient.client(ui, root)
err = None
try:
v = c.command(b"version")
fm.write(
b"fsmonitor-watchman-version",
_(b" watchman binary version %s\n"),
pycompat.bytestr(v["version"]),
)
except watchmanclient.Unavailable as e:
err = stringutil.forcebytestr(e)
fm.condwrite(
err,
b"fsmonitor-watchman-error",
_(b" watchman binary missing or broken: %s\n"),
err,
)
return 1 if err else 0
def _handleunavailable(ui, state, ex):
"""Exception handler for Watchman interaction exceptions"""
if isinstance(ex, watchmanclient.Unavailable):
# experimental config: fsmonitor.verbose
if ex.warn and ui.configbool(b'fsmonitor', b'verbose'):
if b'illegal_fstypes' not in stringutil.forcebytestr(ex):
ui.warn(stringutil.forcebytestr(ex) + b'\n')
if ex.invalidate:
state.invalidate()
# experimental config: fsmonitor.verbose
if ui.configbool(b'fsmonitor', b'verbose'):
ui.log(
b'fsmonitor',
b'Watchman unavailable: %s\n',
stringutil.forcebytestr(ex.msg),
)
else:
ui.log(
b'fsmonitor',
b'Watchman exception: %s\n',
stringutil.forcebytestr(ex),
)
def _hashignore(ignore):
"""Calculate hash for ignore patterns and filenames
If this information changes between Mercurial invocations, we can't
rely on Watchman information anymore and have to re-scan the working
copy.
"""
sha1 = hashutil.sha1()
sha1.update(pycompat.byterepr(ignore))
return pycompat.sysbytes(sha1.hexdigest())
_watchmanencoding = pywatchman.encoding.get_local_encoding()
_fsencoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
_fixencoding = codecs.lookup(_watchmanencoding) != codecs.lookup(_fsencoding)
def _watchmantofsencoding(path):
"""Fix path to match watchman and local filesystem encoding
watchman's paths encoding can differ from filesystem encoding. For example,
on Windows, it's always utf-8.
"""
try:
decoded = path.decode(_watchmanencoding)
except UnicodeDecodeError as e:
raise error.Abort(
stringutil.forcebytestr(e), hint=b'watchman encoding error'
)
try:
encoded = decoded.encode(_fsencoding, 'strict')
except UnicodeEncodeError as e:
raise error.Abort(stringutil.forcebytestr(e))
return encoded
def overridewalk(orig, self, match, subrepos, unknown, ignored, full=True):
"""Replacement for dirstate.walk, hooking into Watchman.
Whenever full is False, ignored is False, and the Watchman client is
available, use Watchman combined with saved state to possibly return only a
subset of files."""
def bail(reason):
self._ui.debug(b'fsmonitor: fallback to core status, %s\n' % reason)
return orig(match, subrepos, unknown, ignored, full=True)
if full:
return bail(b'full rewalk requested')
if ignored:
return bail(b'listing ignored files')
if not self._watchmanclient.available():
return bail(b'client unavailable')
state = self._fsmonitorstate
clock, ignorehash, notefiles = state.get()
if not clock:
if state.walk_on_invalidate:
return bail(b'no clock')
# Initial NULL clock value, see
# https://facebook.github.io/watchman/docs/clockspec.html
clock = b'c:0:0'
notefiles = []
ignore = self._ignore
dirignore = self._dirignore
if unknown:
if _hashignore(ignore) != ignorehash and clock != b'c:0:0':
# ignore list changed -- can't rely on Watchman state any more
if state.walk_on_invalidate:
return bail(b'ignore rules changed')
notefiles = []
clock = b'c:0:0'
else:
# always ignore
ignore = util.always
dirignore = util.always
matchfn = match.matchfn
matchalways = match.always()
dmap = self._map
if util.safehasattr(dmap, b'_map'):
# for better performance, directly access the inner dirstate map if the
# standard dirstate implementation is in use.
dmap = dmap._map
nonnormalset = self._map.nonnormalset
copymap = self._map.copymap
getkind = stat.S_IFMT
dirkind = stat.S_IFDIR
regkind = stat.S_IFREG
lnkkind = stat.S_IFLNK
join = self._join
normcase = util.normcase
fresh_instance = False
exact = skipstep3 = False
if match.isexact(): # match.exact
exact = True
dirignore = util.always # skip step 2
elif match.prefix(): # match.match, no patterns
skipstep3 = True
if not exact and self._checkcase:
# note that even though we could receive directory entries, we're only
# interested in checking if a file with the same name exists. So only
# normalize files if possible.
normalize = self._normalizefile
skipstep3 = False
else:
normalize = None
# step 1: find all explicit files
results, work, dirsnotfound = self._walkexplicit(match, subrepos)
skipstep3 = skipstep3 and not (work or dirsnotfound)
work = [d for d in work if not dirignore(d[0])]
if not work and (exact or skipstep3):
for s in subrepos:
del results[s]
del results[b'.hg']
return results
# step 2: query Watchman
try:
# Use the user-configured timeout for the query.
# Add a little slack over the top of the user query to allow for
# overheads while transferring the data
self._watchmanclient.settimeout(state.timeout + 0.1)
result = self._watchmanclient.command(
b'query',
{
b'fields': [b'mode', b'mtime', b'size', b'exists', b'name'],
b'since': clock,
b'expression': [
b'not',
[
b'anyof',
[b'dirname', b'.hg'],
[b'name', b'.hg', b'wholename'],
],
],
b'sync_timeout': int(state.timeout * 1000),
b'empty_on_fresh_instance': state.walk_on_invalidate,
},
)
except Exception as ex:
_handleunavailable(self._ui, state, ex)
self._watchmanclient.clearconnection()
return bail(b'exception during run')
else:
# We need to propagate the last observed clock up so that we
# can use it for our next query
state.setlastclock(pycompat.sysbytes(result[b'clock']))
if result[b'is_fresh_instance']:
if state.walk_on_invalidate:
state.invalidate()
return bail(b'fresh instance')
fresh_instance = True
# Ignore any prior noteable files from the state info
notefiles = []
# for file paths which require normalization and we encounter a case
# collision, we store our own foldmap
if normalize:
foldmap = {normcase(k): k for k in results}
switch_slashes = pycompat.ossep == b'\\'
# The order of the results is, strictly speaking, undefined.
# For case changes on a case insensitive filesystem we may receive
# two entries, one with exists=True and another with exists=False.
# The exists=True entries in the same response should be interpreted
# as being happens-after the exists=False entries due to the way that
# Watchman tracks files. We use this property to reconcile deletes
# for name case changes.
for entry in result[b'files']:
fname = entry[b'name']
# Watchman always give us a str. Normalize to bytes on Python 3
# using Watchman's encoding, if needed.
if not isinstance(fname, bytes):
fname = fname.encode(_watchmanencoding)
if _fixencoding:
fname = _watchmantofsencoding(fname)
if switch_slashes:
fname = fname.replace(b'\\', b'/')
if normalize:
normed = normcase(fname)
fname = normalize(fname, True, True)
foldmap[normed] = fname
fmode = entry[b'mode']
fexists = entry[b'exists']
kind = getkind(fmode)
if b'/.hg/' in fname or fname.endswith(b'/.hg'):
return bail(b'nested-repo-detected')
if not fexists:
# if marked as deleted and we don't already have a change
# record, mark it as deleted. If we already have an entry
# for fname then it was either part of walkexplicit or was
# an earlier result that was a case change
if (
fname not in results
and fname in dmap
and (matchalways or matchfn(fname))
):
results[fname] = None
elif kind == dirkind:
if fname in dmap and (matchalways or matchfn(fname)):
results[fname] = None
elif kind == regkind or kind == lnkkind:
if fname in dmap:
if matchalways or matchfn(fname):
results[fname] = entry
elif (matchalways or matchfn(fname)) and not ignore(fname):
results[fname] = entry
elif fname in dmap and (matchalways or matchfn(fname)):
results[fname] = None
# step 3: query notable files we don't already know about
# XXX try not to iterate over the entire dmap
if normalize:
# any notable files that have changed case will already be handled
# above, so just check membership in the foldmap
notefiles = {
normalize(f, True, True)
for f in notefiles
if normcase(f) not in foldmap
}
visit = {
f
for f in notefiles
if (f not in results and matchfn(f) and (f in dmap or not ignore(f)))
}
if not fresh_instance:
if matchalways:
visit.update(f for f in nonnormalset if f not in results)
visit.update(f for f in copymap if f not in results)
else:
visit.update(
f for f in nonnormalset if f not in results and matchfn(f)
)
visit.update(f for f in copymap if f not in results and matchfn(f))
else:
if matchalways:
visit.update(
f for f, st in pycompat.iteritems(dmap) if f not in results
)
visit.update(f for f in copymap if f not in results)
else:
visit.update(
f
for f, st in pycompat.iteritems(dmap)
if f not in results and matchfn(f)
)
visit.update(f for f in copymap if f not in results and matchfn(f))
audit = pathutil.pathauditor(self._root, cached=True).check
auditpass = [f for f in visit if audit(f)]
auditpass.sort()
auditfail = visit.difference(auditpass)
for f in auditfail:
results[f] = None
nf = iter(auditpass)
for st in util.statfiles([join(f) for f in auditpass]):
f = next(nf)
if st or f in dmap:
results[f] = st
for s in subrepos:
del results[s]
del results[b'.hg']
return results
def overridestatus(
orig,
self,
node1=b'.',
node2=None,
match=None,
ignored=False,
clean=False,
unknown=False,
listsubrepos=False,
):
listignored = ignored
listclean = clean
listunknown = unknown
def _cmpsets(l1, l2):
try:
if b'FSMONITOR_LOG_FILE' in encoding.environ:
fn = encoding.environ[b'FSMONITOR_LOG_FILE']
f = open(fn, b'wb')
else:
fn = b'fsmonitorfail.log'
f = self.vfs.open(fn, b'wb')
except (IOError, OSError):
self.ui.warn(_(b'warning: unable to write to %s\n') % fn)
return
try:
for i, (s1, s2) in enumerate(zip(l1, l2)):
if set(s1) != set(s2):
f.write(b'sets at position %d are unequal\n' % i)
f.write(b'watchman returned: %s\n' % s1)
f.write(b'stat returned: %s\n' % s2)
finally:
f.close()
if isinstance(node1, context.changectx):
ctx1 = node1
else:
ctx1 = self[node1]
if isinstance(node2, context.changectx):
ctx2 = node2
else:
ctx2 = self[node2]
working = ctx2.rev() is None
parentworking = working and ctx1 == self[b'.']
match = match or matchmod.always()
# Maybe we can use this opportunity to update Watchman's state.
# Mercurial uses workingcommitctx and/or memctx to represent the part of
# the workingctx that is to be committed. So don't update the state in
# that case.
# HG_PENDING is set in the environment when the dirstate is being updated
# in the middle of a transaction; we must not update our state in that
# case, or we risk forgetting about changes in the working copy.
updatestate = (
parentworking
and match.always()
and not isinstance(ctx2, (context.workingcommitctx, context.memctx))
and b'HG_PENDING' not in encoding.environ
)
try:
if self._fsmonitorstate.walk_on_invalidate:
# Use a short timeout to query the current clock. If that
# takes too long then we assume that the service will be slow
# to answer our query.
# walk_on_invalidate indicates that we prefer to walk the
# tree ourselves because we can ignore portions that Watchman
# cannot and we tend to be faster in the warmer buffer cache
# cases.
self._watchmanclient.settimeout(0.1)
else:
# Give Watchman more time to potentially complete its walk
# and return the initial clock. In this mode we assume that
# the filesystem will be slower than parsing a potentially
# very large Watchman result set.
self._watchmanclient.settimeout(self._fsmonitorstate.timeout + 0.1)
startclock = self._watchmanclient.getcurrentclock()
except Exception as ex:
self._watchmanclient.clearconnection()
_handleunavailable(self.ui, self._fsmonitorstate, ex)
# boo, Watchman failed. bail
return orig(
node1,
node2,
match,
listignored,
listclean,
listunknown,
listsubrepos,
)
if updatestate:
# We need info about unknown files. This may make things slower the
# first time, but whatever.
stateunknown = True
else:
stateunknown = listunknown
if updatestate:
ps = poststatus(startclock)
self.addpostdsstatus(ps)
r = orig(
node1, node2, match, listignored, listclean, stateunknown, listsubrepos
)
modified, added, removed, deleted, unknown, ignored, clean = r
if not listunknown:
unknown = []
# don't do paranoid checks if we're not going to query Watchman anyway
full = listclean or match.traversedir is not None
if self._fsmonitorstate.mode == b'paranoid' and not full:
# run status again and fall back to the old walk this time
self.dirstate._fsmonitordisable = True
# shut the UI up
quiet = self.ui.quiet
self.ui.quiet = True
fout, ferr = self.ui.fout, self.ui.ferr
self.ui.fout = self.ui.ferr = open(os.devnull, b'wb')
try:
rv2 = orig(
node1,
node2,
match,
listignored,
listclean,
listunknown,
listsubrepos,
)
finally:
self.dirstate._fsmonitordisable = False
self.ui.quiet = quiet
self.ui.fout, self.ui.ferr = fout, ferr
# clean isn't tested since it's set to True above
with self.wlock():
_cmpsets(
[modified, added, removed, deleted, unknown, ignored, clean],
rv2,
)
modified, added, removed, deleted, unknown, ignored, clean = rv2
return scmutil.status(
modified, added, removed, deleted, unknown, ignored, clean
)
class poststatus(object):
def __init__(self, startclock):
self._startclock = pycompat.sysbytes(startclock)
def __call__(self, wctx, status):
clock = wctx.repo()._fsmonitorstate.getlastclock() or self._startclock
hashignore = _hashignore(wctx.repo().dirstate._ignore)
notefiles = (
status.modified
+ status.added
+ status.removed
+ status.deleted
+ status.unknown
)
wctx.repo()._fsmonitorstate.set(clock, hashignore, notefiles)
def makedirstate(repo, dirstate):
class fsmonitordirstate(dirstate.__class__):
def _fsmonitorinit(self, repo):
# _fsmonitordisable is used in paranoid mode
self._fsmonitordisable = False
self._fsmonitorstate = repo._fsmonitorstate
self._watchmanclient = repo._watchmanclient
self._repo = weakref.proxy(repo)
def walk(self, *args, **kwargs):
orig = super(fsmonitordirstate, self).walk
if self._fsmonitordisable:
return orig(*args, **kwargs)
return overridewalk(orig, self, *args, **kwargs)
def rebuild(self, *args, **kwargs):
self._fsmonitorstate.invalidate()
return super(fsmonitordirstate, self).rebuild(*args, **kwargs)
def invalidate(self, *args, **kwargs):
self._fsmonitorstate.invalidate()
return super(fsmonitordirstate, self).invalidate(*args, **kwargs)
dirstate.__class__ = fsmonitordirstate
dirstate._fsmonitorinit(repo)
def wrapdirstate(orig, self):
ds = orig(self)
# only override the dirstate when Watchman is available for the repo
if util.safehasattr(self, b'_fsmonitorstate'):
makedirstate(self, ds)
return ds
def extsetup(ui):
extensions.wrapfilecache(
localrepo.localrepository, b'dirstate', wrapdirstate
)
if pycompat.isdarwin:
# An assist for avoiding the dangling-symlink fsevents bug
extensions.wrapfunction(os, b'symlink', wrapsymlink)
extensions.wrapfunction(merge, b'_update', wrapupdate)
def wrapsymlink(orig, source, link_name):
"""if we create a dangling symlink, also touch the parent dir
to encourage fsevents notifications to work more correctly"""
try:
return orig(source, link_name)
finally:
try:
os.utime(os.path.dirname(link_name), None)
except OSError:
pass
class state_update(object):
"""This context manager is responsible for dispatching the state-enter
and state-leave signals to the watchman service. The enter and leave
methods can be invoked manually (for scenarios where context manager
semantics are not possible). If parameters oldnode and newnode are None,
they will be populated based on current working copy in enter and
leave, respectively. Similarly, if the distance is none, it will be
calculated based on the oldnode and newnode in the leave method."""
def __init__(
self,
repo,
name,
oldnode=None,
newnode=None,
distance=None,
partial=False,
):
self.repo = repo.unfiltered()
self.name = name
self.oldnode = oldnode
self.newnode = newnode
self.distance = distance
self.partial = partial
self._lock = None
self.need_leave = False
def __enter__(self):
self.enter()
def enter(self):
# Make sure we have a wlock prior to sending notifications to watchman.
# We don't want to race with other actors. In the update case,
# merge.update is going to take the wlock almost immediately. We are
# effectively extending the lock around several short sanity checks.
if self.oldnode is None:
self.oldnode = self.repo[b'.'].node()
if self.repo.currentwlock() is None:
if util.safehasattr(self.repo, b'wlocknostateupdate'):
self._lock = self.repo.wlocknostateupdate()
else:
self._lock = self.repo.wlock()
self.need_leave = self._state(b'state-enter', hex(self.oldnode))
return self
def __exit__(self, type_, value, tb):
abort = True if type_ else False
self.exit(abort=abort)
def exit(self, abort=False):
try:
if self.need_leave:
status = b'failed' if abort else b'ok'
if self.newnode is None:
self.newnode = self.repo[b'.'].node()
if self.distance is None:
self.distance = calcdistance(
self.repo, self.oldnode, self.newnode
)
self._state(b'state-leave', hex(self.newnode), status=status)
finally:
self.need_leave = False
if self._lock:
self._lock.release()
def _state(self, cmd, commithash, status=b'ok'):
if not util.safehasattr(self.repo, b'_watchmanclient'):
return False
try:
self.repo._watchmanclient.command(
cmd,
{
b'name': self.name,
b'metadata': {
# the target revision
b'rev': commithash,
# approximate number of commits between current and target
b'distance': self.distance if self.distance else 0,
# success/failure (only really meaningful for state-leave)
b'status': status,
# whether the working copy parent is changing
b'partial': self.partial,
},
},
)
return True
except Exception as e:
# Swallow any errors; fire and forget
self.repo.ui.log(
b'watchman', b'Exception %s while running %s\n', e, cmd
)
return False
# Estimate the distance between two nodes
def calcdistance(repo, oldnode, newnode):
anc = repo.changelog.ancestor(oldnode, newnode)
ancrev = repo[anc].rev()
distance = abs(repo[oldnode].rev() - ancrev) + abs(
repo[newnode].rev() - ancrev
)
return distance
# Bracket working copy updates with calls to the watchman state-enter
# and state-leave commands. This allows clients to perform more intelligent
# settling during bulk file change scenarios
# https://facebook.github.io/watchman/docs/cmd/subscribe.html#advanced-settling
def wrapupdate(
orig,
repo,
node,
branchmerge,
force,
ancestor=None,
mergeancestor=False,
labels=None,
matcher=None,
**kwargs
):
distance = 0
partial = True
oldnode = repo[b'.'].node()
newnode = repo[node].node()
if matcher is None or matcher.always():
partial = False
distance = calcdistance(repo.unfiltered(), oldnode, newnode)
with state_update(
repo,
name=b"hg.update",
oldnode=oldnode,
newnode=newnode,
distance=distance,
partial=partial,
):
return orig(
repo,
node,
branchmerge,
force,
ancestor,
mergeancestor,
labels,
matcher,
**kwargs
)
def repo_has_depth_one_nested_repo(repo):
for f in repo.wvfs.listdir():
if os.path.isdir(os.path.join(repo.root, f, b'.hg')):
msg = b'fsmonitor: sub-repository %r detected, fsmonitor disabled\n'
repo.ui.debug(msg % f)
return True
return False
def reposetup(ui, repo):
# We don't work with largefiles or inotify
exts = extensions.enabled()
for ext in _blacklist:
if ext in exts:
ui.warn(
_(
b'The fsmonitor extension is incompatible with the %s '
b'extension and has been disabled.\n'
)
% ext
)
return
if repo.local():
# We don't work with subrepos either.
#
# if repo[None].substate can cause a dirstate parse, which is too
# slow. Instead, look for a file called hgsubstate,
if repo.wvfs.exists(b'.hgsubstate') or repo.wvfs.exists(b'.hgsub'):
return
if repo_has_depth_one_nested_repo(repo):
return
fsmonitorstate = state.state(repo)
if fsmonitorstate.mode == b'off':
return
try:
client = watchmanclient.client(repo.ui, repo.root)
except Exception as ex:
_handleunavailable(ui, fsmonitorstate, ex)
return
repo._fsmonitorstate = fsmonitorstate
repo._watchmanclient = client
dirstate, cached = localrepo.isfilecached(repo, b'dirstate')
if cached:
# at this point since fsmonitorstate wasn't present,
# repo.dirstate is not a fsmonitordirstate
makedirstate(repo, dirstate)
class fsmonitorrepo(repo.__class__):
def status(self, *args, **kwargs):
orig = super(fsmonitorrepo, self).status
return overridestatus(orig, self, *args, **kwargs)
def wlocknostateupdate(self, *args, **kwargs):
return super(fsmonitorrepo, self).wlock(*args, **kwargs)
def wlock(self, *args, **kwargs):
l = super(fsmonitorrepo, self).wlock(*args, **kwargs)
if not ui.configbool(
b"experimental", b"fsmonitor.transaction_notify"
):
return l
if l.held != 1:
return l
origrelease = l.releasefn
def staterelease():
if origrelease:
origrelease()
if l.stateupdate:
l.stateupdate.exit()
l.stateupdate = None
try:
l.stateupdate = None
l.stateupdate = state_update(self, name=b"hg.transaction")
l.stateupdate.enter()
l.releasefn = staterelease
except Exception as e:
# Swallow any errors; fire and forget
self.ui.log(
b'watchman', b'Exception in state update %s\n', e
)
return l
repo.__class__ = fsmonitorrepo
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
import sys
import warnings
from optparse import OptionParser, make_option as Option
from .. import __version__, Celery
from ..exceptions import CDeprecationWarning, CPendingDeprecationWarning
# always enable DeprecationWarnings, so our users can see them.
for warning in (CDeprecationWarning, CPendingDeprecationWarning):
warnings.simplefilter("once", warning, 0)
class Command(object):
"""Base class for command line applications.
:keyword app: The current app.
:keyword get_app: Callable returning the current app if no app provided.
"""
_default_broker_url = r'amqp://guest:guest@localhost:5672//'
#: Arg list used in help.
args = ''
#: Application version.
version = __version__
#: If false the parser will raise an exception if positional
#: args are provided.
supports_args = True
#: List of options (without preload options).
option_list = ()
#: List of options to parse before parsing other options.
preload_options = (
Option("--app",
default=None, action="store", dest="app",
help="Name of the app instance to use. "),
Option("-b", "--broker",
default=None, action="store", dest="broker",
help="Broker URL. Default is %s" % (
_default_broker_url, )),
Option("--loader",
default=None, action="store", dest="loader",
help="Name of the loader class to use. "
"Taken from the environment variable CELERY_LOADER, "
"or 'default' if that is not set."),
Option("--config",
default="celeryconfig", action="store",
dest="config_module",
help="Name of the module to read configuration from."),
)
#: Enable if the application should support config from the cmdline.
enable_config_from_cmdline = False
#: Default configuration namespace.
namespace = "celery"
Parser = OptionParser
def __init__(self, app=None, get_app=None):
self.app = app
self.get_app = get_app or self._get_default_app
def run(self, *args, **options):
"""This is the body of the command called by :meth:`handle_argv`."""
raise NotImplementedError("subclass responsibility")
def execute_from_commandline(self, argv=None):
"""Execute application from command line.
:keyword argv: The list of command line arguments.
Defaults to ``sys.argv``.
"""
if argv is None:
argv = list(sys.argv)
argv = self.setup_app_from_commandline(argv)
prog_name = os.path.basename(argv[0])
return self.handle_argv(prog_name, argv[1:])
def usage(self):
"""Returns the command-line usage string for this app."""
return "%%prog [options] %s" % (self.args, )
def get_options(self):
"""Get supported command line options."""
return self.option_list
def expanduser(self, value):
if isinstance(value, basestring):
return os.path.expanduser(value)
return value
def handle_argv(self, prog_name, argv):
"""Parses command line arguments from ``argv`` and dispatches
to :meth:`run`.
:param prog_name: The program name (``argv[0]``).
:param argv: Command arguments.
Exits with an error message if :attr:`supports_args` is disabled
and ``argv`` contains positional arguments.
"""
options, args = self.parse_options(prog_name, argv)
options = dict((k, self.expanduser(v))
for k, v in vars(options).iteritems()
if not k.startswith('_'))
argv = map(self.expanduser, argv)
if not self.supports_args and args:
sys.stderr.write(
"\nUnrecognized command line arguments: %s\n" % (
", ".join(args), ))
sys.stderr.write("\nTry --help?\n")
sys.exit(1)
return self.run(*args, **options)
def parse_options(self, prog_name, arguments):
"""Parse the available options."""
# Don't want to load configuration to just print the version,
# so we handle --version manually here.
if "--version" in arguments:
print(self.version)
sys.exit(0)
parser = self.create_parser(prog_name)
return parser.parse_args(arguments)
def create_parser(self, prog_name):
return self.Parser(prog=prog_name,
usage=self.usage(),
version=self.version,
option_list=(self.preload_options +
self.get_options()))
def prepare_preload_options(self, options):
"""Optional handler to do additional processing of preload options.
Configuration must not have been initialized
until after this is called.
"""
pass
def setup_app_from_commandline(self, argv):
preload_options = self.parse_preload_options(argv)
self.prepare_preload_options(preload_options)
app = (preload_options.get("app") or
os.environ.get("CELERY_APP") or
self.app)
loader = (preload_options.get("loader") or
os.environ.get("CELERY_LOADER") or
"default")
broker = preload_options.get("broker", None)
if broker:
os.environ["CELERY_BROKER_URL"] = broker
config_module = preload_options.get("config_module")
if config_module:
os.environ["CELERY_CONFIG_MODULE"] = config_module
if app:
self.app = self.get_cls_by_name(app)
else:
self.app = self.get_app(loader=loader)
if self.enable_config_from_cmdline:
argv = self.process_cmdline_config(argv)
return argv
def get_cls_by_name(self, name):
from ..utils import get_cls_by_name, import_from_cwd
return get_cls_by_name(name, imp=import_from_cwd)
def process_cmdline_config(self, argv):
try:
cargs_start = argv.index('--')
except ValueError:
return argv
argv, cargs = argv[:cargs_start], argv[cargs_start + 1:]
self.app.config_from_cmdline(cargs, namespace=self.namespace)
return argv
def parse_preload_options(self, args):
acc = {}
opts = {}
for opt in self.preload_options:
for t in (opt._long_opts, opt._short_opts):
opts.update(dict(zip(t, [opt.dest] * len(t))))
index = 0
length = len(args)
while index < length:
arg = args[index]
if arg.startswith('--') and '=' in arg:
key, value = arg.split('=', 1)
dest = opts.get(key)
if dest:
acc[dest] = value
elif arg.startswith('-'):
dest = opts.get(arg)
if dest:
acc[dest] = args[index + 1]
index += 1
index += 1
return acc
def _get_default_app(self, *args, **kwargs):
return Celery(*args, **kwargs)
def daemon_options(default_pidfile=None, default_logfile=None):
return (
Option('-f', '--logfile', default=default_logfile,
action="store", dest="logfile",
help="Path to the logfile"),
Option('--pidfile', default=default_pidfile,
action="store", dest="pidfile",
help="Path to the pidfile."),
Option('--uid', default=None,
action="store", dest="uid",
help="Effective user id to run as when detached."),
Option('--gid', default=None,
action="store", dest="gid",
help="Effective group id to run as when detached."),
Option('--umask', default=0,
action="store", type="int", dest="umask",
help="Umask of the process when detached."),
Option('--workdir', default=None,
action="store", dest="working_directory",
help="Directory to change to when detached."),
)
|
|
from __future__ import absolute_import, print_function
import os
import sys
import requests
import json
import numpy as np
import time
import logging
cur_dir = os.path.dirname(os.path.abspath(__file__))
import torch
import torch.utils.data as data
from torch import nn, optim
from torch.autograd import Variable
import torch.nn.functional as F
from test_utils import (create_docker_connection, BenchmarkException, headers,
log_clipper_state)
cur_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.abspath("%s/../clipper_admin" % cur_dir))
from clipper_admin.deployers.onnx import deploy_pytorch_model, create_pytorch_endpoint
logging.basicConfig(
format='%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%y-%m-%d:%H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
app_name = "caffe2-test"
model_name = "caffe2-model"
def normalize(x):
return x.astype(np.double) / 255.0
def objective(y, pos_label):
# prediction objective
if y == pos_label:
return 1
else:
return 0
def parsedata(train_path, pos_label):
trainData = np.genfromtxt(train_path, delimiter=',', dtype=int)
records = trainData[:, 1:]
labels = trainData[:, :1]
transformedlabels = [objective(ele, pos_label) for ele in labels]
return (records, transformedlabels)
def predict(model, inputs):
preds = model.run(np.array(inputs).astype(np.float32))
return [str(p) for p in preds[0]]
def deploy_and_test_model(clipper_conn,
model,
inputs,
version,
link_model=False,
predict_fn=predict):
deploy_pytorch_model(
clipper_conn,
model_name,
version,
"integers",
inputs,
predict_fn,
model,
onnx_backend="caffe2")
time.sleep(5)
if link_model:
clipper_conn.link_model_to_app(app_name, model_name)
time.sleep(5)
test_model(clipper_conn, app_name, version)
def test_model(clipper_conn, app, version):
time.sleep(25)
num_preds = 25
num_defaults = 0
addr = clipper_conn.get_query_addr()
for i in range(num_preds):
response = requests.post(
"http://%s/%s/predict" % (addr, app),
headers=headers,
data=json.dumps({
'input': get_test_point()
}))
result = response.json()
if response.status_code == requests.codes.ok and result["default"]:
num_defaults += 1
elif response.status_code != requests.codes.ok:
logger.error(result)
raise BenchmarkException(response.text)
if num_defaults > 0:
logger.error("Error: %d/%d predictions were default" % (num_defaults,
num_preds))
if num_defaults > num_preds / 2:
raise BenchmarkException("Error querying APP %s, MODEL %s:%d" %
(app, model_name, version))
# Define a simple NN model
class BasicNN(nn.Module):
def __init__(self):
super(BasicNN, self).__init__()
self.net = nn.Linear(28 * 28, 2)
def forward(self, x):
if isinstance(x, np.ndarray):
x = torch.from_numpy(x)
x = x.float()
if isinstance(x, type(torch.randn(1))):
x = Variable(x)
x = x.view(1, 1, 28, 28)
x = x / 255.0
batch_size = x.size(0)
x = x.view(batch_size, -1)
output = self.net(x.float())
return F.softmax(output)
def train(model):
model.train()
optimizer = optim.SGD(model.parameters(), lr=0.001)
for epoch in range(10):
for i, d in enumerate(train_loader, 1):
image, j = d
optimizer.zero_grad()
output = model(image)
loss = F.cross_entropy(output,
Variable(
torch.LongTensor([train_y[i - 1]])))
loss.backward()
optimizer.step()
return model
def get_test_point():
return [np.random.randint(255) for _ in range(784)]
# Define a dataloader to read data
class TrainingDataset(data.Dataset):
def __init__(self, data, label):
self.imgs = data
self.classes = label
def __getitem__(self, index):
img = self.imgs[index]
label = self.classes[index]
img = torch.Tensor(img)
return img, torch.Tensor(label)
if __name__ == "__main__":
pos_label = 3
import random
cluster_name = "onnx-{}".format(random.randint(0, 5000))
try:
clipper_conn = create_docker_connection(
cleanup=False, start_clipper=True, new_name=cluster_name)
train_path = os.path.join(cur_dir, "data/train.data")
train_x, train_y = parsedata(train_path, pos_label)
train_x = normalize(train_x)
train_loader = TrainingDataset(train_x, train_y)
try:
clipper_conn.register_application(app_name, "integers",
"default_pred", 100000)
time.sleep(1)
addr = clipper_conn.get_query_addr()
response = requests.post(
"http://%s/%s/predict" % (addr, app_name),
headers=headers,
data=json.dumps({
'input': get_test_point()
}))
result = response.json()
if response.status_code != requests.codes.ok:
logger.error("Error: %s" % response.text)
raise BenchmarkException("Error creating app %s" % app_name)
version = 1
model = BasicNN()
nn_model = train(model)
inputs = Variable(torch.randn(len(get_test_point())))
deploy_and_test_model(
clipper_conn, nn_model, inputs, version, link_model=True)
app_and_model_name = "easy-register-app-model"
create_pytorch_endpoint(
clipper_conn,
app_and_model_name,
"integers",
inputs,
predict,
nn_model,
onnx_backend="caffe2")
test_model(clipper_conn, app_and_model_name, 1)
except BenchmarkException:
sys.exit(1)
log_clipper_state(clipper_conn)
logger.exception("BenchmarkException")
clipper_conn = create_docker_connection(
cleanup=True, start_clipper=False, cleanup_name=cluster_name)
sys.exit(1)
else:
clipper_conn = create_docker_connection(
cleanup=True, start_clipper=False, cleanup_name=cluster_name)
except Exception:
logger.exception("Exception")
clipper_conn = create_docker_connection(
cleanup=True, start_clipper=False, cleanup_name=cluster_name)
sys.exit(1)
|
|
# Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Start and stop Web Page Replay."""
import atexit
import logging
import os
import re
import signal
import subprocess
import sys
import tempfile
import urllib
from telemetry.core import exceptions
from telemetry.core import util
_REPLAY_DIR = os.path.join(
util.GetTelemetryThirdPartyDir(), 'webpagereplay')
class ReplayError(Exception):
"""Catch-all exception for the module."""
pass
class ReplayNotFoundError(ReplayError):
def __init__(self, label, path):
super(ReplayNotFoundError, self).__init__()
self.args = (label, path)
def __str__(self):
label, path = self.args
return 'Path does not exist for %s: %s' % (label, path)
class ReplayNotStartedError(ReplayError):
pass
class ReplayServer(object):
"""Start and Stop Web Page Replay.
Web Page Replay is a proxy that can record and "replay" web pages with
simulated network characteristics -- without having to edit the pages
by hand. With WPR, tests can use "real" web content, and catch
performance issues that may result from introducing network delays and
bandwidth throttling.
Example:
with ReplayServer(archive_path):
self.NavigateToURL(start_url)
self.WaitUntil(...)
"""
def __init__(self, archive_path, replay_host, http_port, https_port, dns_port,
replay_options):
"""Initialize ReplayServer.
Args:
archive_path: a path to a specific WPR archive (required).
replay_host: the hostname to serve traffic.
http_port: an integer port on which to serve HTTP traffic. May be zero
to let the OS choose an available port.
https_port: an integer port on which to serve HTTPS traffic. May be zero
to let the OS choose an available port.
dns_port: an integer port on which to serve DNS traffic. May be zero
to let the OS choose an available port. If None DNS forwarding is
disabled.
replay_options: an iterable of options strings to forward to replay.py.
"""
self.archive_path = archive_path
self._replay_host = replay_host
self._use_dns_server = dns_port is not None
self._started_ports = {} # a dict such as {'http': 80, 'https': 443}
# A temporary path for storing stdout & stderr of the webpagereplay
# subprocess.
self._temp_log_file_path = None
replay_py = os.path.join(_REPLAY_DIR, 'replay.py')
self._cmd_line = self._GetCommandLine(
replay_py, self._replay_host, http_port, https_port, dns_port,
replay_options, archive_path)
if '--record' in replay_options:
self._CheckPath('archive directory', os.path.dirname(self.archive_path))
elif not os.path.exists(self.archive_path):
self._CheckPath('archive file', self.archive_path)
self._CheckPath('replay script', replay_py)
self.replay_process = None
@staticmethod
def _GetCommandLine(replay_py, host_ip, http_port, https_port, dns_port,
replay_options, archive_path):
"""Set WPR command-line options. Can be overridden if needed."""
cmd_line = [sys.executable, replay_py]
cmd_line.extend([
'--host=%s' % host_ip,
'--port=%s' % http_port,
'--ssl_port=%s' % https_port
])
if dns_port is not None:
# Note that if --host is not '127.0.0.1', Replay will override the local
# DNS nameserver settings to point to the replay-started DNS server.
cmd_line.append('--dns_port=%s' % dns_port)
else:
cmd_line.append('--no-dns_forwarding')
cmd_line.extend([
'--use_closest_match',
'--log_level=warning'
])
cmd_line.extend(replay_options)
cmd_line.append(archive_path)
return cmd_line
def _CheckPath(self, label, path):
if not os.path.exists(path):
raise ReplayNotFoundError(label, path)
def _OpenLogFile(self):
"""Opens the log file for writing."""
log_dir = os.path.dirname(self._temp_log_file_path)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
return open(self._temp_log_file_path, 'w')
def _LogLines(self):
"""Yields the log lines."""
if not os.path.isfile(self._temp_log_file_path):
return
with open(self._temp_log_file_path) as f:
for line in f:
yield line
def _IsStarted(self):
"""Returns true if the server is up and running."""
if self.replay_process.poll() is not None:
# The process terminated.
return False
def HasIncompleteStartedPorts():
return ('http' not in self._started_ports or
'https' not in self._started_ports or
(self._use_dns_server and 'dns' not in self._started_ports))
if HasIncompleteStartedPorts():
self._started_ports = self._ParseLogFilePorts(self._LogLines())
if HasIncompleteStartedPorts():
return False
try:
# HTTPS may require SNI (which urllib does not speak), so only check
# that HTTP responds.
return 200 == self._UrlOpen('web-page-replay-generate-200').getcode()
except IOError:
return False
@staticmethod
def _ParseLogFilePorts(log_lines):
"""Returns the ports on which replay listens as reported in its log file.
Only matches HTTP, HTTPS, and DNS. One call may return only some
of the ports depending on what has been written to the log file.
Example log lines:
2014-09-03 17:04:27,978 WARNING HTTP server started on 127.0.0.1:51673
2014-09-03 17:04:27,978 WARNING HTTPS server started on 127.0.0.1:35270
Returns:
a dict with ports available in log_lines. For example,
{} # no ports found
{'http': 1234, 'https': 2345, 'dns': 3456}
"""
ports = {}
port_re = re.compile(
r'.*?(?P<protocol>HTTP|HTTPS|DNS)'
r' server started on '
r'(?P<host>[^:]*):'
r'(?P<port>\d+)')
for line in log_lines:
m = port_re.match(line.strip())
if m:
protocol = m.group('protocol').lower()
ports[protocol] = int(m.group('port'))
return ports
def StartServer(self):
"""Start Web Page Replay and verify that it started.
Returns:
(HTTP_PORT, HTTPS_PORT, DNS_PORT) # DNS_PORT is None if unused.
Raises:
ReplayNotStartedError: if Replay start-up fails.
"""
is_posix = sys.platform.startswith('linux') or sys.platform == 'darwin'
logging.debug('Starting Web-Page-Replay: %s', self._cmd_line)
self._CreateTempLogFilePath()
with self._OpenLogFile() as log_fh:
self.replay_process = subprocess.Popen(
self._cmd_line, stdout=log_fh, stderr=subprocess.STDOUT,
preexec_fn=(_ResetInterruptHandler if is_posix else None))
try:
util.WaitFor(self._IsStarted, 30)
atexit.register(self.StopServer)
return (
self._started_ports['http'],
self._started_ports['https'],
self._started_ports.get('dns'), # None if unused
)
except exceptions.TimeoutException:
raise ReplayNotStartedError(
'Web Page Replay failed to start. Log output:\n%s' %
''.join(self._LogLines()))
def StopServer(self):
"""Stop Web Page Replay."""
if self._IsStarted():
try:
self._StopReplayProcess()
finally:
# TODO(rnephew): Upload logs to google storage. crbug.com/525787
self._CleanUpTempLogFilePath()
else:
logging.warning('Attempting to stop WPR server that is not running.')
def _StopReplayProcess(self):
if not self.replay_process:
return
logging.debug('Trying to stop Web-Page-Replay gracefully')
try:
if self._started_ports:
self._UrlOpen('web-page-replay-command-exit').close()
except IOError:
# IOError is possible because the server might exit without response.
pass
try:
util.WaitFor(lambda: self.replay_process.poll() is not None, 10)
except exceptions.TimeoutException:
try:
# Use a SIGINT so that it can do graceful cleanup.
self.replay_process.send_signal(signal.SIGINT)
except: # pylint: disable=W0702
# On Windows, we are left with no other option than terminate().
is_primary_nameserver_changed_by_replay = (
self._use_dns_server and self._replay_host == '127.0.0.1')
if is_primary_nameserver_changed_by_replay:
# Replay changes the DNS nameserver configuration so that DNS
# requests are resolved by replay's own DNS server. It resolves
# all DNS requests to it own IP address to it can server the
# HTTP and HTTPS requests.
# If the replay host is not '127.0.0.1', then replay skips the
# nameserver change because it assumes a different mechanism
# will be used to route DNS requests to replay's DNS server.
logging.warning(
'Unable to stop Web-Page-Replay gracefully.\n'
'Replay changed the DNS nameserver configuration to make replay '
'the primary nameserver. That might not be restored!')
try:
self.replay_process.terminate()
except: # pylint: disable=W0702
pass
self.replay_process.wait()
def _CreateTempLogFilePath(self):
assert self._temp_log_file_path is None
handle, self._temp_log_file_path = tempfile.mkstemp()
os.close(handle)
def _CleanUpTempLogFilePath(self):
assert self._temp_log_file_path
if logging.getLogger('').isEnabledFor(logging.INFO):
with open(self._temp_log_file_path, 'r') as f:
wpr_log_content = '\n'.join([
'************************** WPR LOG *****************************',
f.read(),
'************************** END OF WPR LOG **********************'])
logging.debug(wpr_log_content)
os.remove(self._temp_log_file_path)
self._temp_log_file_path = None
def __enter__(self):
"""Add support for with-statement."""
self.StartServer()
return self
def __exit__(self, unused_exc_type, unused_exc_val, unused_exc_tb):
"""Add support for with-statement."""
self.StopServer()
def _UrlOpen(self, url_path, protocol='http'):
"""Open a Replay URL.
For matching requests in the archive, Replay relies on the "Host:" header.
For Replay command URLs, the "Host:" header is not needed.
Args:
url_path: WPR server request path.
protocol: 'http' or 'https'
Returns:
a file-like object from urllib.urlopen
"""
url = '%s://%s:%s/%s' % (
protocol, self._replay_host, self._started_ports[protocol], url_path)
return urllib.urlopen(url, proxies={})
def _ResetInterruptHandler():
"""Reset the interrupt handler back to the default.
The replay process is stopped gracefully by making an HTTP request
('web-page-replay-command-exit'). The graceful exit is important for
restoring the DNS configuration. If the HTTP request fails, the fallback
is to send SIGINT to the process.
On posix system, running this function before starting replay fixes a
bug that shows up when Telemetry is run as a background command from a
script. https://crbug.com/254572.
Background: Signal masks on Linux are inherited from parent
processes. If anything invoking us accidentally masks SIGINT
(e.g. by putting a process in the background from a shell script),
sending a SIGINT to the child will fail to terminate it.
"""
signal.signal(signal.SIGINT, signal.SIG_DFL)
|
|
# Django settings for django-photographer - deployment with Heroku.
import os
import sys
import urlparse
import django.conf.global_settings as DEFAULT_SETTINGS
import dj_database_url
DEBUG = bool(os.environ.get('DJANGO_DEBUG', ''))
TEMPLATE_DEBUG = DEBUG
ADMINS = (
(os.environ.get('ADMIN_NAME', ''), os.environ.get('ADMIN_EMAIL', '')),
)
MANAGERS = ADMINS
RB_SITE_TITLE = os.environ.get('RB_SITE_TITLE', '')
RB_THEME = os.environ.get('RB_THEME', 'plain')
RB_SITE_URL = os.environ.get('RB_SITE_URL', '/')
RB_BLOG_RSS_URL = os.environ.get('RB_BLOG_RSS_URL', '/blog/feed/')
BLOG_TITLE = os.environ.get('BLOG_TITLE', RB_SITE_TITLE)
BLOG_DESCRIPTION = os.environ.get('BLOG_DESCRIPTION', 'The blog of '+RB_SITE_TITLE)
RB_FACEBOOK = os.environ.get('RB_FACEBOOK', '')
RB_TWITTER_HANDLE = os.environ.get('RB_TWITTER_HANDLE', '')
# Template names:
RB_TEMPLATE_BASE = os.environ.get('RB_TEMPLATE_BASE', 'base.html')
RB_TEMPLATE_HEAD = os.environ.get('RB_TEMPLATE_HEAD', '_head.html')
RB_TEMPLATE_HEADER = os.environ.get('RB_TEMPLATE_HEADER', '_header.html')
RB_TEMPLATE_NAV = os.environ.get('RB_TEMPLATE_NAV', '_nav.html')
RB_TEMPLATE_FOOTER = os.environ.get('RB_TEMPLATE_FOOTER', '_footer.html')
DATABASES = DEFAULT_SETTINGS.DATABASES
try:
# Load settings from the environment variable DATABASE_URL.
DATABASES['default'] = dj_database_url.config()
except:
print 'Unexpected error:', sys.exc_info()
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
TIME_ZONE = os.environ.get('TIME_ZONE', 'America/Los_Angeles')
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = os.environ.get('LANGUAGE_CODE', 'en-us')
SITE_ID = int(os.environ.get('SITE_ID', 1))
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Additional locations of static files
STATICFILES_DIRS = (
os.path.abspath(os.path.join(os.path.dirname(__file__), 'static')),
)
DEFAULT_FILE_STORAGE = 's3_folder_storage.s3.DefaultStorage'
DEFAULT_S3_PATH = 'media'
STATICFILES_STORAGE = 's3_folder_storage.s3.StaticStorage'
STATIC_S3_PATH = 'static'
AWS_QUERYSTRING_AUTH = False # Don't include auth in every url
AWS_S3_SECURE_URLS = False
AWS_STORAGE_BUCKET_NAME = os.environ.get('S3_BUCKET', '')
AWS_ACCESS_KEY_ID = os.environ.get('S3_KEY', '')
AWS_SECRET_ACCESS_KEY = os.environ.get('S3_SECRET', '')
MEDIA_ROOT = '/%s/' % DEFAULT_S3_PATH
MEDIA_URL = '//s3.amazonaws.com/%s/media/' % AWS_STORAGE_BUCKET_NAME
STATIC_ROOT = "/%s/" % STATIC_S3_PATH
STATIC_URL = '//s3.amazonaws.com/%s/static/' % AWS_STORAGE_BUCKET_NAME
ADMIN_MEDIA_PREFIX = STATIC_URL+'admin/' # Deprecated but required for django-admin-tools (temporarily).
if not (AWS_STORAGE_BUCKET_NAME and AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY):
# For debug when we don't want to have to use S3.
DEFAULT_FILE_STORAGE = DEFAULT_SETTINGS.DEFAULT_FILE_STORAGE
STATICFILES_STORAGE = DEFAULT_SETTINGS.STATICFILES_STORAGE
MEDIA_ROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'mediafiles')
MEDIA_URL = '/media/'
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'sitestatic')
ADMIN_TOOLS_THEMING_CSS = 'themes/%s/styles/admin.css' % RB_THEME
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', 'secretkey!JohnCleese!')
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'photographer.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'photographer.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.abspath(os.path.join(os.path.dirname(__file__), 'static',
'themes', RB_THEME, 'templates')),
os.path.abspath(os.path.join(os.path.dirname(__file__), 'templates'))
)
TEMPLATE_CONTEXT_PROCESSORS = DEFAULT_SETTINGS.TEMPLATE_CONTEXT_PROCESSORS + (
'django.core.context_processors.request',
'photographer.context_processors.defaults',
'feincms.context_processors.add_page_if_missing',
)
INSTALLED_APPS = (
'admin_tools',
'admin_tools.theming',
'admin_tools.menu',
'admin_tools.dashboard',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
's3_folder_storage',
'gunicorn',
'haystack',
'mptt',
'feincms',
'feincms.module.page',
'feincms.module.medialibrary',
'markupmirror',
'markupmirror.feincms',
'form_designer',
'elephantblog',
'photographer' # To register the feincms content modules, etc.
)
MARKUPMIRROR_DEFAULT_MARKUP_TYPE = 'markdown'
def elephantblog_entry_url_app(self):
from feincms.content.application.models import app_reverse
return app_reverse('elephantblog_entry_detail', 'elephantblog.urls', kwargs={
'year': self.published_on.strftime('%Y'),
'month': self.published_on.strftime('%m'),
'day': self.published_on.strftime('%d'),
'slug': self.slug,
})
def elephantblog_categorytranslation_url_app(self):
from feincms.content.application.models import app_reverse
return app_reverse('elephantblog_category_detail', 'elephantblog.urls', kwargs={
'slug': self.slug,
})
ABSOLUTE_URL_OVERRIDES = {
'elephantblog.entry': elephantblog_entry_url_app,
'elephantblog.categorytranslation': elephantblog_categorytranslation_url_app,
}
BLOG_PAGINATE_BY = 10
# Search with Haystack and ElasticSearch.
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine',
'URL': os.environ.get('ELASTICSEARCH_URL', ''),
'INDEX_NAME': 'haystack',
},
}
# Check for SendGrid for email delivery first:
if os.environ.get('SENDGRID_USERNAME') and os.environ.get('SENDGRID_PASSWORD'):
EMAIL_HOST = 'smtp.sendgrid.net'
EMAIL_HOST_USER = os.environ['SENDGRID_USERNAME']
EMAIL_HOST_PASSWORD = os.environ['SENDGRID_PASSWORD']
EMAIL_PORT = 587
EMAIL_USE_TLS = True
else:
# Standard mail configuration: configure your env vars as required:
EMAIL_HOST = os.environ.get('EMAIL_HOST_USER', DEFAULT_SETTINGS.EMAIL_HOST)
EMAIL_HOST_USER = os.environ.get('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD')
EMAIL_PORT = int(os.environ.get('EMAIL_PORT', DEFAULT_SETTINGS.EMAIL_PORT))
EMAIL_USE_TLS = bool(os.environ.get('EMAIL_USE_TLS',
DEFAULT_SETTINGS.EMAIL_USE_TLS))
DEFAULT_FROM_EMAIL = os.environ.get('ADMIN_EMAIL',
DEFAULT_SETTINGS.DEFAULT_FROM_EMAIL)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
|
# -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import re
import socket
from errno import ENOTCONN
from gunicorn.http.unreader import SocketUnreader
from gunicorn.http.body import ChunkedReader, LengthReader, EOFReader, Body
from gunicorn.http.errors import (InvalidHeader, InvalidHeaderName, NoMoreData,
InvalidRequestLine, InvalidRequestMethod, InvalidHTTPVersion,
LimitRequestLine, LimitRequestHeaders)
from gunicorn.http.errors import InvalidProxyLine, ForbiddenProxyRequest
from gunicorn.six import BytesIO, urlsplit, bytes_to_str
MAX_REQUEST_LINE = 8190
MAX_HEADERS = 32768
MAX_HEADERFIELD_SIZE = 8190
HEADER_RE = re.compile("[\x00-\x1F\x7F()<>@,;:\[\]={} \t\\\\\"]")
METH_RE = re.compile(r"[A-Z0-9$-_.]{3,20}")
VERSION_RE = re.compile(r"HTTP/(\d+).(\d+)")
class Message(object):
def __init__(self, cfg, unreader):
self.cfg = cfg
self.unreader = unreader
self.version = None
self.headers = []
self.trailers = []
self.body = None
# set headers limits
self.limit_request_fields = cfg.limit_request_fields
if (self.limit_request_fields <= 0
or self.limit_request_fields > MAX_HEADERS):
self.limit_request_fields = MAX_HEADERS
self.limit_request_field_size = cfg.limit_request_field_size
if (self.limit_request_field_size < 0
or self.limit_request_field_size > MAX_HEADERFIELD_SIZE):
self.limit_request_field_size = MAX_HEADERFIELD_SIZE
# set max header buffer size
max_header_field_size = self.limit_request_field_size or MAX_HEADERFIELD_SIZE
self.max_buffer_headers = self.limit_request_fields * \
(max_header_field_size + 2) + 4
unused = self.parse(self.unreader)
self.unreader.unread(unused)
self.set_body_reader()
def parse(self):
raise NotImplementedError()
def parse_headers(self, data):
headers = []
# Split lines on \r\n keeping the \r\n on each line
lines = [bytes_to_str(line) + "\r\n" for line in data.split(b"\r\n")]
# Parse headers into key/value pairs paying attention
# to continuation lines.
while len(lines):
if len(headers) >= self.limit_request_fields:
raise LimitRequestHeaders("limit request headers fields")
# Parse initial header name : value pair.
curr = lines.pop(0)
header_length = len(curr)
if curr.find(":") < 0:
raise InvalidHeader(curr.strip())
name, value = curr.split(":", 1)
name = name.rstrip(" \t").upper()
if HEADER_RE.search(name):
raise InvalidHeaderName(name)
name, value = name.strip(), [value.lstrip()]
# Consume value continuation lines
while len(lines) and lines[0].startswith((" ", "\t")):
curr = lines.pop(0)
header_length += len(curr)
if header_length > self.limit_request_field_size > 0:
raise LimitRequestHeaders("limit request headers "
+ "fields size")
value.append(curr)
value = ''.join(value).rstrip()
if header_length > self.limit_request_field_size > 0:
raise LimitRequestHeaders("limit request headers fields size")
headers.append((name, value))
return headers
def set_body_reader(self):
chunked = False
content_length = None
for (name, value) in self.headers:
if name == "CONTENT-LENGTH":
content_length = value
elif name == "TRANSFER-ENCODING":
chunked = value.lower() == "chunked"
elif name == "SEC-WEBSOCKET-KEY1":
content_length = 8
if chunked:
self.body = Body(ChunkedReader(self, self.unreader))
elif content_length is not None:
try:
content_length = int(content_length)
except ValueError:
raise InvalidHeader("CONTENT-LENGTH", req=self)
if content_length < 0:
raise InvalidHeader("CONTENT-LENGTH", req=self)
self.body = Body(LengthReader(self.unreader, content_length))
else:
self.body = Body(EOFReader(self.unreader))
def should_close(self):
for (h, v) in self.headers:
if h == "CONNECTION":
v = v.lower().strip()
if v == "close":
return True
elif v == "keep-alive":
return False
break
return self.version <= (1, 0)
class Request(Message):
def __init__(self, cfg, unreader, req_number=1):
self.method = None
self.uri = None
self.path = None
self.query = None
self.fragment = None
# get max request line size
self.limit_request_line = cfg.limit_request_line
if (self.limit_request_line < 0
or self.limit_request_line >= MAX_REQUEST_LINE):
self.limit_request_line = MAX_REQUEST_LINE
self.req_number = req_number
self.proxy_protocol_info = None
super(Request, self).__init__(cfg, unreader)
def get_data(self, unreader, buf, stop=False):
data = unreader.read()
if not data:
if stop:
raise StopIteration()
raise NoMoreData(buf.getvalue())
buf.write(data)
def parse(self, unreader):
buf = BytesIO()
self.get_data(unreader, buf, stop=True)
# get request line
line, rbuf = self.read_line(unreader, buf, self.limit_request_line)
# proxy protocol
if self.proxy_protocol(bytes_to_str(line)):
# get next request line
buf = BytesIO()
buf.write(rbuf)
line, rbuf = self.read_line(unreader, buf, self.limit_request_line)
self.parse_request_line(bytes_to_str(line))
buf = BytesIO()
buf.write(rbuf)
# Headers
data = buf.getvalue()
idx = data.find(b"\r\n\r\n")
done = data[:2] == b"\r\n"
while True:
idx = data.find(b"\r\n\r\n")
done = data[:2] == b"\r\n"
if idx < 0 and not done:
self.get_data(unreader, buf)
data = buf.getvalue()
if len(data) > self.max_buffer_headers:
raise LimitRequestHeaders("max buffer headers")
else:
break
if done:
self.unreader.unread(data[2:])
return b""
self.headers = self.parse_headers(data[:idx])
ret = data[idx + 4:]
buf = BytesIO()
return ret
def read_line(self, unreader, buf, limit=0):
data = buf.getvalue()
while True:
idx = data.find(b"\r\n")
if idx >= 0:
# check if the request line is too large
if idx > limit > 0:
raise LimitRequestLine(idx, limit)
break
elif len(data) - 2 > limit > 0:
raise LimitRequestLine(len(data), limit)
self.get_data(unreader, buf)
data = buf.getvalue()
return (data[:idx], # request line,
data[idx + 2:]) # residue in the buffer, skip \r\n
def proxy_protocol(self, line):
"""\
Detect, check and parse proxy protocol.
:raises: ForbiddenProxyRequest, InvalidProxyLine.
:return: True for proxy protocol line else False
"""
if not self.cfg.proxy_protocol:
return False
if self.req_number != 1:
return False
if not line.startswith("PROXY"):
return False
self.proxy_protocol_access_check()
self.parse_proxy_protocol(line)
return True
def proxy_protocol_access_check(self):
# check in allow list
if isinstance(self.unreader, SocketUnreader):
try:
remote_host = self.unreader.sock.getpeername()[0]
except socket.error as e:
if e.args[0] == ENOTCONN:
raise ForbiddenProxyRequest("UNKNOW")
raise
if ("*" not in self.cfg.proxy_allow_ips and
remote_host not in self.cfg.proxy_allow_ips):
raise ForbiddenProxyRequest(remote_host)
def parse_proxy_protocol(self, line):
bits = line.split()
if len(bits) != 6:
raise InvalidProxyLine(line)
# Extract data
proto = bits[1]
s_addr = bits[2]
d_addr = bits[3]
# Validation
if proto not in ["TCP4", "TCP6"]:
raise InvalidProxyLine("protocol '%s' not supported" % proto)
if proto == "TCP4":
try:
socket.inet_pton(socket.AF_INET, s_addr)
socket.inet_pton(socket.AF_INET, d_addr)
except socket.error:
raise InvalidProxyLine(line)
elif proto == "TCP6":
try:
socket.inet_pton(socket.AF_INET6, s_addr)
socket.inet_pton(socket.AF_INET6, d_addr)
except socket.error:
raise InvalidProxyLine(line)
try:
s_port = int(bits[4])
d_port = int(bits[5])
except ValueError:
raise InvalidProxyLine("invalid port %s" % line)
if not ((0 <= s_port <= 65535) and (0 <= d_port <= 65535)):
raise InvalidProxyLine("invalid port %s" % line)
# Set data
self.proxy_protocol_info = {
"proxy_protocol": proto,
"client_addr": s_addr,
"client_port": s_port,
"proxy_addr": d_addr,
"proxy_port": d_port
}
def parse_request_line(self, line):
bits = line.split(None, 2)
if len(bits) != 3:
raise InvalidRequestLine(line)
# Method
if not METH_RE.match(bits[0]):
raise InvalidRequestMethod(bits[0])
self.method = bits[0].upper()
# URI
# When the path starts with //, urlsplit considers it as a
# relative uri while the RDF says it shouldnt
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2
# considers it as an absolute url.
# fix issue #297
if bits[1].startswith("//"):
self.uri = bits[1][1:]
else:
self.uri = bits[1]
parts = urlsplit(self.uri)
self.path = parts.path or ""
self.query = parts.query or ""
self.fragment = parts.fragment or ""
# Version
match = VERSION_RE.match(bits[2])
if match is None:
raise InvalidHTTPVersion(bits[2])
self.version = (int(match.group(1)), int(match.group(2)))
def set_body_reader(self):
super(Request, self).set_body_reader()
if isinstance(self.body.reader, EOFReader):
self.body = Body(LengthReader(self.unreader, 0))
|
|
# -*- coding: UTF-8 -*-
# YaBlog
# (c) Regis FLORET
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Regis FLORET BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -*- coding: utf-8 -*-
import re
import json
import datetime
from django.template import RequestContext, Template
from django.template.loader import render_to_string
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseBadRequest, HttpResponsePermanentRedirect
from django.utils.html import strip_tags
from django.core.validators import email_re
from django.shortcuts import render_to_response
from django.db.models import Q
from django.utils.translation import ugettext as _
from django.contrib.sites.models import Site
from django.contrib import messages
from django.conf import settings
from django.core.urlresolvers import reverse
from django.views.decorators.cache import cache_page
from yablog.notification import notification_send, ajax_log
from .models import Post, Comment, Preference, Tag, Categorie, Page
from yablog.appview.decorators import view_count
from yablog.capatcha.templatetags.capatcha_tags import Capatcha
def robot(request):
""" Return robot.txt """
return HttpResponse("""User-agent: *\nAllow: *\nSitemap: http://%s/sitemap.xml\n""" % Site.objects.get_current().domain, mimetype="text/plain")
def maintenance(request):
""" When you need to work quietly on your blog. Uncomment urls.py line """
return render_to_response("maintenance.html")
@view_count
#@cache_page(120)
def index(request):
""" Page d'accueil du blog """
return render_to_response(settings.BLOG_CONFIG.Templates.index, RequestContext(request))
@view_count
#@cache_page(120)
def article_short(request, article_short):
""" Display a article """
context = RequestContext(request)
# Desktop
context['blog_post'] = Post.objects.get(Shortcut=article_short, Page__Default=True)
return render_to_response(settings.BLOG_CONFIG.Templates.post, context)
@view_count
#@cache_page(120)
def articles(request):
""" Show all articles in the blog """
context = RequestContext(request)
return render_to_response(settings.BLOG_CONFIG.Templates.all, context)
def dynamic_comment(request):
""" Add a comment in ajax way """
try:
if request.method == 'POST' and request.is_ajax():
post = Post.objects.get(pk=request.POST['article_id'])
if request.user.is_authenticated():
name = request.user.username
email = request.user.email
else:
name = strip_tags(request.POST['name'].strip())
email = strip_tags(request.POST['email'].strip())
request.session['email'] = email
request.session['nom'] = name
body = request.POST['body']
comment = Comment(
Email = email,
UserName = name,
Comment = body,
IPAddress = request.META['REMOTE_ADDR'],
post = post,
Show=True
)
comment.save()
try:
notification_send(settings.BLOG_CONFIG.EmailTemplates.newcomment)
except:
pass
# Create the new capatcha
captcha = Capatcha(request)
request.esssion['capatcha'] = captcha
data = {
'comment' : render_to_string(settings.BLOG_CONFIG.Templates.comment, RequestContext(request, { 'blog_post' : post, 'comment': comment})),
'captcha': captcha.path
}
return HttpResponse(json.dumps(data, ensure_asciii=False), mimetype="application/json")
except Exception as e:
ajax_log("blog.views.dynamic_comment: %s " % e)
return HttpResponseBadRequest('')
def comment(request):
""" Add a comment """
try:
if request.method == 'POST':
context = RequestContext(request)
post = request.POST
article_id = post['blog']
context['blog_post'] = Post.objects.get(id=article_id)
error = False
if request.user.is_authenticated():
name = request.user.username
email = request.user.email
body = post['body']
else:
name = strip_tags(post['name'].strip())
email = strip_tags(post['email'].strip())
body = strip_tags(post['body'].strip())
error = False
if not len(name):
context['name_err'] = _("Thanks to leave name")
error = True
if not len(email):
context['email_err'] = _("Your email address is empty")
error = True
else:
if not email_re.match(email):
context['email_err'] = _('Your email address is not a valid one')
error = True
if not len(body):
context['body_err'] = _("The message body is empty")
error = True
if not request.session['capatcha'].isValid(post['capatcha']):
error = True
context['capat_err'] = _("Wrong secure code")
if error:
context['name'] = name
context['email'] = email
context['body'] = body
return render_to_response(settings.BLOG_CONFIG.Templates.post, context)
else:
request.session['email'] = email
request.session['nom'] = name
Comment.objects.create(
Email = email,
UserName = name,
Comment = body.replace("\n", "<br/>"),
IPAddress = request.META['REMOTE_ADDR'],
post = context['blog_post'],
CreationDateTime = datetime.datetime.now(),
Show = Preference.objects.immediate_publishing()
)
try:
notification_send(settings.BLOG_CONFIG.EmailTemplates.newcomment)
except:
pass
messages.add_message(request, messages.INFO, "Votre commentaire a été posté avec succés")
return HttpResponseRedirect(reverse('show_article', args=(context['blog_post'].Shortcut,)))
except Exception, e:
ajax_log("Erreur in comment: %s " % e)
return HttpResponseRedirect('/')
@view_count
#@cache_page(120)
def tag(request, tag_id):
""" Send all articles with the tag. @todo: replace pk with sanitize_name(tag_name) """
context = RequestContext(request)
tag = Tag.objects.get(pk=tag_id).Nom
context.update({
'blog_posts' : Post.objects.filter(Tags__pk=tag_id, Publish=True),
'blog_tag' : tag
})
return render_to_response(settings.BLOG_CONFIG.Templates.tags, context)
@view_count
#@cache_page(120)
def tag_name(request, tag_name):
""" Return all articles with the tag name """
context = RequestContext(request)
tag = Tag.objects.get(Nom=tag_name)
context.update({
'blog_posts' : Post.objects.filter(Tags=tag, Publish=True),
'blog_tag' : tag
})
return render_to_response(settings.BLOG_CONFIG.Templates.tags, context)
@view_count
#@cache_page(120)
def categories(request, categ_id):
""" return all posts within a category @todo: replace categ_id with categ name"""
return HttpResponsePermanentRedirect(Categorie.objects.get(id=categ_id).get_absolute_url())
@view_count
#@cache_page(120)
def categories_name(request, categ_name):
""" return all posts within a category """
context = RequestContext(request, {
'blog_posts': list(Post.objects.filter(Categorie__Nom=categ_name, Publish=True)),
'blog_category': Categorie.objects.get(Nom=categ_name).Nom
})
return render_to_response(settings.BLOG_CONFIG.Templates.categories, context)
@view_count
#@cache_page(120)
def show_by_date(request, year, month=None):
""" Show by date. This controller is using for both by_month and by_year """
query = Q(Publish=True) & Q(CreationDateTime__year=year)
if month is not None:
query &= Q(CreationDateTime__month=month)
context = RequestContext(request, {
'blog_post': Post.objects.filter(query)
})
return render_to_response(settings.BLOG_CONFIG.Templates.year, context)
def search(request):
""" Search engine (quite simple) """
search_str = request.POST['q'].strip()
if len(search_str) > 2:
keywords = search_str.split(' ')
query = Q()
for key in keywords:
query |= Q(Content__icontains=key)
p = Post.objects.filter(query & Q(Publish=True))
else:
p = []
keywords = [_("No keyword set") ]
context = RequestContext(request, { 'keywords' : keywords, 'post_found': p })
return render_to_response(settings.BLOG_CONFIG.Templates.search, context)
@view_count
#@cache_page(120)
def page(request, shortcut):
page = Page.objects.get(Shortcut=shortcut)
if page.Default == True:
return HttpResponseRedirect(reverse('home'))
context = RequestContext(request, { 'page' : page })
if page.Post is not None:
context['blog_post'] = page.Post
if re.search(r'{{%.*?%}}|{{{.*?}}}', context['blog_post'].Content) is not None:
context['blog_post'].Content = context['blog_post'].Content.replace('{{%', '{%').replace('%}}', '%}').replace('{{{', '{{').replace('}}}', '}}')
context['blog_post'].Content = Template(context['blog_post'].Content).render(RequestContext(request))
return render_to_response(settings.BLOG_CONFIG.Templates.post, context)
context['all_posts'] = list(Post.objects.filter(Page=page, Publish=True))
return render_to_response(settings.BLOG_CONFIG.Templates.pages, context)
@view_count
#@cache_page(120)
def page_article(request, shortcut, article_short):
context = RequestContext(request)
context['blog_post'] = Post.objects.get(Shortcut=article_short, Page__Shortcut=shortcut)
return render_to_response(settings.BLOG_CONFIG.Templates.post, context)
|
|
# (c) Copyright IBM Corp. 2021
# (c) Copyright Instana Inc. 2020
from __future__ import absolute_import
import os
import boto3
import pytest
from moto import mock_s3
from instana.singletons import tracer
from ...helpers import get_first_span_by_filter
pwd = os.path.dirname(os.path.abspath(__file__))
upload_filename = os.path.abspath(pwd + '/../../data/boto3/test_upload_file.jpg')
download_target_filename = os.path.abspath(pwd + '/../../data/boto3/download_target_file.asdf')
def setup_method():
""" Clear all spans before a test run """
tracer.recorder.clear_spans()
os.remove(download_target_filename)
@pytest.fixture(scope='function')
def aws_credentials():
"""Mocked AWS Credentials for moto."""
os.environ['AWS_ACCESS_KEY_ID'] = 'testing'
os.environ['AWS_SECRET_ACCESS_KEY'] = 'testing'
os.environ['AWS_SECURITY_TOKEN'] = 'testing'
os.environ['AWS_SESSION_TOKEN'] = 'testing'
@pytest.fixture(scope='function')
def s3(aws_credentials):
with mock_s3():
yield boto3.client('s3', region_name='us-east-1')
def test_vanilla_create_bucket(s3):
# s3 is a fixture defined above that yields a boto3 s3 client.
# Feel free to instantiate another boto3 S3 client -- Keep note of the region though.
s3.create_bucket(Bucket="aws_bucket_name")
result = s3.list_buckets()
assert len(result['Buckets']) == 1
assert result['Buckets'][0]['Name'] == 'aws_bucket_name'
def test_s3_create_bucket(s3):
result = None
with tracer.start_active_span('test'):
result = s3.create_bucket(Bucket="aws_bucket_name")
result = s3.list_buckets()
assert len(result['Buckets']) == 1
assert result['Buckets'][0]['Name'] == 'aws_bucket_name'
spans = tracer.recorder.queued_spans()
assert len(spans) == 2
filter = lambda span: span.n == "sdk"
test_span = get_first_span_by_filter(spans, filter)
assert (test_span)
filter = lambda span: span.n == "boto3"
boto_span = get_first_span_by_filter(spans, filter)
assert (boto_span)
assert (boto_span.t == test_span.t)
assert (boto_span.p == test_span.s)
assert (test_span.ec is None)
assert (boto_span.ec is None)
assert boto_span.data['boto3']['op'] == 'CreateBucket'
assert boto_span.data['boto3']['ep'] == 'https://s3.amazonaws.com'
assert boto_span.data['boto3']['reg'] == 'us-east-1'
assert boto_span.data['boto3']['payload'] == {'Bucket': 'aws_bucket_name'}
assert boto_span.data['http']['status'] == 200
assert boto_span.data['http']['method'] == 'POST'
assert boto_span.data['http']['url'] == 'https://s3.amazonaws.com:443/CreateBucket'
def test_s3_list_buckets(s3):
result = None
with tracer.start_active_span('test'):
result = s3.list_buckets()
result = s3.list_buckets()
assert len(result['Buckets']) == 0
assert result['ResponseMetadata']['HTTPStatusCode'] is 200
spans = tracer.recorder.queued_spans()
assert len(spans) == 2
filter = lambda span: span.n == "sdk"
test_span = get_first_span_by_filter(spans, filter)
assert (test_span)
filter = lambda span: span.n == "boto3"
boto_span = get_first_span_by_filter(spans, filter)
assert (boto_span)
assert (boto_span.t == test_span.t)
assert (boto_span.p == test_span.s)
assert (test_span.ec is None)
assert (boto_span.ec is None)
assert boto_span.data['boto3']['op'] == 'ListBuckets'
assert boto_span.data['boto3']['ep'] == 'https://s3.amazonaws.com'
assert boto_span.data['boto3']['reg'] == 'us-east-1'
assert boto_span.data['boto3']['payload'] == {}
assert boto_span.data['http']['status'] == 200
assert boto_span.data['http']['method'] == 'POST'
assert boto_span.data['http']['url'] == 'https://s3.amazonaws.com:443/ListBuckets'
def test_s3_vanilla_upload_file(s3):
object_name = 'aws_key_name'
bucket_name = 'aws_bucket_name'
s3.create_bucket(Bucket=bucket_name)
result = s3.upload_file(upload_filename, bucket_name, object_name)
assert result is None
def test_s3_upload_file(s3):
object_name = 'aws_key_name'
bucket_name = 'aws_bucket_name'
s3.create_bucket(Bucket=bucket_name)
result = None
with tracer.start_active_span('test'):
s3.upload_file(upload_filename, bucket_name, object_name)
spans = tracer.recorder.queued_spans()
assert len(spans) == 2
filter = lambda span: span.n == "sdk"
test_span = get_first_span_by_filter(spans, filter)
assert (test_span)
filter = lambda span: span.n == "boto3"
boto_span = get_first_span_by_filter(spans, filter)
assert (boto_span)
assert (boto_span.t == test_span.t)
assert (boto_span.p == test_span.s)
assert (test_span.ec is None)
assert (boto_span.ec is None)
assert boto_span.data['boto3']['op'] == 'upload_file'
assert boto_span.data['boto3']['ep'] == 'https://s3.amazonaws.com'
assert boto_span.data['boto3']['reg'] == 'us-east-1'
payload = {'Filename': upload_filename, 'Bucket': 'aws_bucket_name', 'Key': 'aws_key_name'}
assert boto_span.data['boto3']['payload'] == payload
assert boto_span.data['http']['method'] == 'POST'
assert boto_span.data['http']['url'] == 'https://s3.amazonaws.com:443/upload_file'
def test_s3_upload_file_obj(s3):
object_name = 'aws_key_name'
bucket_name = 'aws_bucket_name'
s3.create_bucket(Bucket=bucket_name)
result = None
with tracer.start_active_span('test'):
with open(upload_filename, "rb") as fd:
s3.upload_fileobj(fd, bucket_name, object_name)
spans = tracer.recorder.queued_spans()
assert len(spans) == 2
filter = lambda span: span.n == "sdk"
test_span = get_first_span_by_filter(spans, filter)
assert (test_span)
filter = lambda span: span.n == "boto3"
boto_span = get_first_span_by_filter(spans, filter)
assert (boto_span)
assert (boto_span.t == test_span.t)
assert (boto_span.p == test_span.s)
assert (test_span.ec is None)
assert (boto_span.ec is None)
assert (boto_span.data['boto3']['op'] == 'upload_fileobj')
assert (boto_span.data['boto3']['ep'] == 'https://s3.amazonaws.com')
assert (boto_span.data['boto3']['reg'] == 'us-east-1')
payload = {'Bucket': 'aws_bucket_name', 'Key': 'aws_key_name'}
assert boto_span.data['boto3']['payload'] == payload
assert boto_span.data['http']['method'] == 'POST'
assert boto_span.data['http']['url'] == 'https://s3.amazonaws.com:443/upload_fileobj'
def test_s3_download_file(s3):
object_name = 'aws_key_name'
bucket_name = 'aws_bucket_name'
s3.create_bucket(Bucket=bucket_name)
s3.upload_file(upload_filename, bucket_name, object_name)
result = None
with tracer.start_active_span('test'):
s3.download_file(bucket_name, object_name, download_target_filename)
spans = tracer.recorder.queued_spans()
assert len(spans) == 2
filter = lambda span: span.n == "sdk"
test_span = get_first_span_by_filter(spans, filter)
assert (test_span)
filter = lambda span: span.n == "boto3"
boto_span = get_first_span_by_filter(spans, filter)
assert (boto_span)
assert (boto_span.t == test_span.t)
assert (boto_span.p == test_span.s)
assert (test_span.ec is None)
assert (boto_span.ec is None)
assert (boto_span.data['boto3']['op'] == 'download_file')
assert (boto_span.data['boto3']['ep'] == 'https://s3.amazonaws.com')
assert (boto_span.data['boto3']['reg'] == 'us-east-1')
payload = {'Bucket': 'aws_bucket_name', 'Key': 'aws_key_name', 'Filename': '%s' % download_target_filename}
assert boto_span.data['boto3']['payload'] == payload
assert boto_span.data['http']['method'] == 'POST'
assert boto_span.data['http']['url'] == 'https://s3.amazonaws.com:443/download_file'
def test_s3_download_file_obj(s3):
object_name = 'aws_key_name'
bucket_name = 'aws_bucket_name'
s3.create_bucket(Bucket=bucket_name)
s3.upload_file(upload_filename, bucket_name, object_name)
result = None
with tracer.start_active_span('test'):
with open(download_target_filename, "wb") as fd:
s3.download_fileobj(bucket_name, object_name, fd)
spans = tracer.recorder.queued_spans()
assert len(spans) == 2
filter = lambda span: span.n == "sdk"
test_span = get_first_span_by_filter(spans, filter)
assert (test_span)
filter = lambda span: span.n == "boto3"
boto_span = get_first_span_by_filter(spans, filter)
assert (boto_span)
assert (boto_span.t == test_span.t)
assert (boto_span.p == test_span.s)
assert (test_span.ec is None)
assert (boto_span.ec is None)
assert boto_span.data['boto3']['op'] == 'download_fileobj'
assert boto_span.data['boto3']['ep'] == 'https://s3.amazonaws.com'
assert boto_span.data['boto3']['reg'] == 'us-east-1'
assert boto_span.data['http']['method'] == 'POST'
assert boto_span.data['http']['url'] == 'https://s3.amazonaws.com:443/download_fileobj'
|
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from future.utils import bytes_to_native_str as n
from past.builtins import long
import argparse
import csv
import locale
import logging
import mmap
import os
import sys
import traceback
import types
from .compat import is_py2#, str, bytes, basestring
BAD_LEADING_CHARS = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '_']
EMPTY_COLUMN_VALS = ['na', 'n/a', '(none)', '(null)', 'null']
def _show_error():
"""
show system errors
"""
et, ev, tb = sys.exc_info()
print("Error Type: %s" % et)
print("Error Value: %s" % ev)
while tb :
co = tb.tb_frame.f_code
filename = str(co.co_filename)
#line_no = str(traceback.tb_lineno(tb))
line_no = 1
print(' %s:%s' % (filename, line_no))
traceback.print_tb(tb)
tb = tb.tb_next
class File2DBParseError(Exception):
pass
class Column(object):
""" Simple class encapsulate meta data for a column
"""
def __init__(self):
self.name = None
self.max_length = 0
self.min_length = 0
self.max_value = None
self.min_value = None
self.min_length_value = 0
self.max_length_value = 0
self.index = 0
self.type = None
self.empty = 0
self.not_empty = 0
def __str__(self):
return 'Name: {} Type: {}'.format(self.name, self.type)
def parse_type(value):
"""
Return converted value or raise File2DBParseError
"""
#print('value=',value)
#print (type(value))
try:
return int(value)
except ValueError:
pass
try:
return float(value)
except ValueError:
pass
if is_py2:
if isinstance(value, unicode):
return value.decode('ascii', 'ignore')
if isinstance(value, str):
return value
else:
if isinstance(value, bytes):
return n(value)
if isinstance(value, str):
return value
raise File2DBParseError('Unknown type')
def fix_column_name(column_name):
"""
Remove bad characters
:param column_name:
:return:
"""
column_name = column_name.replace('.', '_')
column_name = column_name.replace(' ', '_')
# eliminate columns starting with bad characters
while column_name[0] in BAD_LEADING_CHARS:
column_name = column_name[1:]
return column_name
def _fixstr(s):
#print('s=',s)
#print (type(s))
if is_py2:
if isinstance(s, str):
return str(s.decode('ascii', 'ignore'))
else:
if isinstance(s, bytes):
return n(s)
if isinstance(s, str):
return s
return str(s)
def qdf(c):
"""
qdf = quick data fix
Strip the trailing and leading spaces and/or replace with None
"""
s = _fixstr(c)
s = s.strip()
if len(s) > 1 and (s[0] == '"' or s[0] == '\'') and (s[-1] == '"' or s[-1] == '\''):
s = s[1:]
s = s[:-1]
t = s.lower()
if len(t) == 0 or t in EMPTY_COLUMN_VALS:
s = None
return s
def qdf_row(row):
"""
qdf = quick data fix
Strip the trailing and leading spaces and/or replace with None
"""
return map(qdf, row)
def count_lines(filename):
f = open(filename, "r+")
buf = mmap.mmap(f.fileno(), 0)
lines = 0
readline = buf.readline
while readline():
lines += 1
return lines
def parse_file(input_file, delimiter, output_file=None, null_value=None, info_only=True):
"""
Parse a file and gather statistics about each column.
"""
col_info = []
num_lines = count_lines(input_file)
if num_lines == 0:
logging.error("'{0}' contains no lines!!!".format(input_file))
exit()
line = 0
try:
reader = csv.reader(open(input_file), delimiter=delimiter, quoting=csv.QUOTE_MINIMAL)
if not info_only:
writer = csv.writer(open(output_file, "w"), delimiter=delimiter, quoting=csv.QUOTE_MINIMAL)
first_row = next(reader)
i = 0
line += 1
logging.debug(first_row)
# parse header or first row
for h in first_row:
c = Column()
c.name = fix_column_name(h)
c.index = i
col_info.append(c)
i += 1
for row in reader:
logging.debug(row)
i = 0
new_row = []
for col in row:
# handle nasty case where someone puts an extra delim at EOL
if i >= len(col_info):
continue
data = qdf(col)
if data:
new_row.append(data)
else:
new_row.append(null_value)
c = col_info[i]
# skip if no data
if not data:
c.empty += 1
else:
c.not_empty += 1
v = parse_type(data)
t = type(v)
#print('\ncurrent column: {}'.format(c))
#print('current value: {}, type: {}'.format(v, t))
dl = len(str(v))
if c.type is None:
c.type = t
elif c.type in (float, long, int):
# column is currently numeric
if t == str:
# new data is str
c.type = t
# TODO: this isn't the best way to handle this
c.max_value = str(c.max_value)
c.min_value = str(c.min_value)
elif c.type == str:
v = str(v)
else:
c.type = t
if c.max_value:
c.max_length = dl if dl > c.max_length else c.max_length
c.max_value = v if v > c.max_value else c.max_value
else:
c.max_value = v
c.max_length = dl
if c.min_value:
c.min_length = dl if dl < c.min_length else c.min_length
c.min_value = v if v < c.min_value else c.min_value
else:
c.min_value = v
c.min_length = dl
col_info[i] = c
i += 1
line += 1
if not info_only:
writer.writerow(new_row)
except Exception as inst:
_show_error()
print(str(inst))
print("Line number: " + str(line))
return None
return col_info
|
|
# Copyright (c) 2014 Openstack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import collections
import uuid
from oslo_config import cfg
from oslo_utils import importutils
import six
interface_map = {
'vsctl': 'neutron.agent.ovsdb.impl_vsctl.OvsdbVsctl',
'native': 'neutron.agent.ovsdb.impl_idl.OvsdbIdl',
}
OPTS = [
cfg.StrOpt('ovsdb_interface',
choices=interface_map.keys(),
default='vsctl',
help=_('The interface for interacting with the OVSDB')),
]
cfg.CONF.register_opts(OPTS, 'OVS')
@six.add_metaclass(abc.ABCMeta)
class Command(object):
"""An OSVDB command that can be executed in a transaction
:attr result: The result of executing the command in a transaction
"""
@abc.abstractmethod
def execute(self, **transaction_options):
"""Immediately execute an OVSDB command
This implicitly creates a transaction with the passed options and then
executes it, returning the value of the executed transaction
:param transaction_options: Options to pass to the transaction
"""
@six.add_metaclass(abc.ABCMeta)
class Transaction(object):
@abc.abstractmethod
def commit(self):
"""Commit the transaction to OVSDB"""
@abc.abstractmethod
def add(self, command):
"""Append an OVSDB operation to the transaction"""
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, tb):
if exc_type is None:
self.result = self.commit()
@six.add_metaclass(abc.ABCMeta)
class API(object):
def __init__(self, context):
self.context = context
@staticmethod
def get(context, iface_name=None):
"""Return the configured OVSDB API implementation"""
iface = importutils.import_class(
interface_map[iface_name or cfg.CONF.OVS.ovsdb_interface])
return iface(context)
@abc.abstractmethod
def transaction(self, check_error=False, log_errors=True, **kwargs):
"""Create a transaction
:param check_error: Allow the transaction to raise an exception?
:type check_error: bool
:param log_errors: Log an error if the transaction fails?
:type log_errors: bool
:returns: A new transaction
:rtype: :class:`Transaction`
"""
@abc.abstractmethod
def add_br(self, name, may_exist=True, datapath_type=None):
"""Create an command to add an OVS bridge
:param name: The name of the bridge
:type name: string
:param may_exist: Do not fail if bridge already exists
:type may_exist: bool
:param datapath_type: The datapath_type of the bridge
:type datapath_type: string
:returns: :class:`Command` with no result
"""
@abc.abstractmethod
def del_br(self, name, if_exists=True):
"""Create a command to delete an OVS bridge
:param name: The name of the bridge
:type name: string
:param if_exists: Do not fail if the bridge does not exist
:type if_exists: bool
:returns: :class:`Command` with no result
"""
@abc.abstractmethod
def br_exists(self, name):
"""Create a command to check if an OVS bridge exists
:param name: The name of the bridge
:type name: string
:returns: :class:`Command` with bool result
"""
@abc.abstractmethod
def port_to_br(self, name):
"""Create a command to return the name of the bridge with the port
:param name: The name of the OVS port
:type name: string
:returns: :class:`Command` with bridge name result
"""
@abc.abstractmethod
def iface_to_br(self, name):
"""Create a command to return the name of the bridge with the interface
:param name: The name of the OVS interface
:type name: string
:returns: :class:`Command` with bridge name result
"""
@abc.abstractmethod
def list_br(self):
"""Create a command to return the current list of OVS bridge names
:returns: :class:`Command` with list of bridge names result
"""
@abc.abstractmethod
def br_get_external_id(self, name, field):
"""Create a command to return a field from the Bridge's external_ids
:param name: The name of the OVS Bridge
:type name: string
:param field: The external_ids field to return
:type field: string
:returns: :class:`Command` with field value result
"""
@abc.abstractmethod
def db_create(self, table, **col_values):
"""Create a command to create new record
:param table: The OVS table containing the record to be created
:type table: string
:param col_values: The columns and their associated values
to be set after create
:type col_values: Dictionary of columns id's and values
:returns: :class:`Command` with no result
"""
@abc.abstractmethod
def db_destroy(self, table, record):
"""Create a command to destroy a record
:param table: The OVS table containing the record to be destroyed
:type table: string
:param record: The record id (name/uuid) to be destroyed
:type record: uuid/string
:returns: :class:`Command` with no result
"""
@abc.abstractmethod
def db_set(self, table, record, *col_values):
"""Create a command to set fields in a record
:param table: The OVS table containing the record to be modified
:type table: string
:param record: The record id (name/uuid) to be modified
:type table: string
:param col_values: The columns and their associated values
:type col_values: Tuples of (column, value). Values may be atomic
values or unnested sequences/mappings
:returns: :class:`Command` with no result
"""
# TODO(twilson) Consider handling kwargs for arguments where order
# doesn't matter. Though that would break the assert_called_once_with
# unit tests
@abc.abstractmethod
def db_clear(self, table, record, column):
"""Create a command to clear a field's value in a record
:param table: The OVS table containing the record to be modified
:type table: string
:param record: The record id (name/uuid) to be modified
:type record: string
:param column: The column whose value should be cleared
:type column: string
:returns: :class:`Command` with no result
"""
@abc.abstractmethod
def db_get(self, table, record, column):
"""Create a command to return a field's value in a record
:param table: The OVS table containing the record to be queried
:type table: string
:param record: The record id (name/uuid) to be queried
:type record: string
:param column: The column whose value should be returned
:type column: string
:returns: :class:`Command` with the field's value result
"""
@abc.abstractmethod
def db_list(self, table, records=None, columns=None, if_exists=False):
"""Create a command to return a list of OVSDB records
:param table: The OVS table to query
:type table: string
:param records: The records to return values from
:type records: list of record ids (names/uuids)
:param columns: Limit results to only columns, None means all columns
:type columns: list of column names or None
:param if_exists: Do not fail if the record does not exist
:type if_exists: bool
:returns: :class:`Command` with [{'column', value}, ...] result
"""
@abc.abstractmethod
def db_find(self, table, *conditions, **kwargs):
"""Create a command to return find OVSDB records matching conditions
:param table: The OVS table to query
:type table: string
:param conditions:The conditions to satisfy the query
:type conditions: 3-tuples containing (column, operation, match)
Examples:
atomic: ('tag', '=', 7)
map: ('external_ids' '=', {'iface-id': 'xxx'})
field exists?
('external_ids', '!=', {'iface-id', ''})
set contains?:
('protocols', '{>=}', 'OpenFlow13')
See the ovs-vsctl man page for more operations
:param columns: Limit results to only columns, None means all columns
:type columns: list of column names or None
:returns: :class:`Command` with [{'column', value}, ...] result
"""
@abc.abstractmethod
def set_controller(self, bridge, controllers):
"""Create a command to set an OVS bridge's OpenFlow controllers
:param bridge: The name of the bridge
:type bridge: string
:param controllers: The controller strings
:type controllers: list of strings, see ovs-vsctl manpage for format
:returns: :class:`Command` with no result
"""
@abc.abstractmethod
def del_controller(self, bridge):
"""Create a command to clear an OVS bridge's OpenFlow controllers
:param bridge: The name of the bridge
:type bridge: string
:returns: :class:`Command` with no result
"""
@abc.abstractmethod
def get_controller(self, bridge):
"""Create a command to return an OVS bridge's OpenFlow controllers
:param bridge: The name of the bridge
:type bridge: string
:returns: :class:`Command` with list of controller strings result
"""
@abc.abstractmethod
def set_fail_mode(self, bridge, mode):
"""Create a command to set an OVS bridge's failure mode
:param bridge: The name of the bridge
:type bridge: string
:param mode: The failure mode
:type mode: "secure" or "standalone"
:returns: :class:`Command` with no result
"""
@abc.abstractmethod
def add_port(self, bridge, port, may_exist=True):
"""Create a command to add a port to an OVS bridge
:param bridge: The name of the bridge
:type bridge: string
:param port: The name of the port
:type port: string
:param may_exist: Do not fail if the port already exists
:type may_exist: bool
:returns: :class:`Command` with no result
"""
@abc.abstractmethod
def del_port(self, port, bridge=None, if_exists=True):
"""Create a command to delete a port an OVS port
:param port: The name of the port
:type port: string
:param bridge: Only delete port if it is attached to this bridge
:type bridge: string
:param if_exists: Do not fail if the port does not exist
:type if_exists: bool
:returns: :class:`Command` with no result
"""
@abc.abstractmethod
def list_ports(self, bridge):
"""Create a command to list the names of ports on a bridge
:param bridge: The name of the bridge
:type bridge: string
:returns: :class:`Command` with list of port names result
"""
@abc.abstractmethod
def list_ifaces(self, bridge):
"""Create a command to list the names of interfaces on a bridge
:param bridge: The name of the bridge
:type bridge: string
:returns: :class:`Command` with list of interfaces names result
"""
def val_to_py(val):
"""Convert a json ovsdb return value to native python object"""
if isinstance(val, collections.Sequence) and len(val) == 2:
if val[0] == "uuid":
return uuid.UUID(val[1])
elif val[0] == "set":
return [val_to_py(x) for x in val[1]]
elif val[0] == "map":
return {val_to_py(x): val_to_py(y) for x, y in val[1]}
return val
def py_to_val(pyval):
"""Convert python value to ovs-vsctl value argument"""
if isinstance(pyval, bool):
return 'true' if pyval is True else 'false'
elif pyval == '':
return '""'
else:
return pyval
|
|
import datetime
import json
import os
import traceback
from dateutil.parser import parse
from great_expectations import DataContext
from great_expectations.cli.upgrade_helpers.base_upgrade_helper import BaseUpgradeHelper
from great_expectations.data_context.store import (
DatabaseStoreBackend,
HtmlSiteStore,
InMemoryStoreBackend,
MetricStore,
TupleFilesystemStoreBackend,
TupleGCSStoreBackend,
TupleS3StoreBackend,
ValidationsStore,
)
from great_expectations.data_context.types.resource_identifiers import (
ValidationResultIdentifier,
)
class UpgradeHelperV11(BaseUpgradeHelper):
def __init__(self, data_context=None, context_root_dir=None, **kwargs):
assert (
data_context or context_root_dir
), "Please provide a data_context object or a context_root_dir."
self.data_context = data_context or DataContext(
context_root_dir=context_root_dir
)
self.upgrade_log = {
"skipped_validations_stores": {
"database_store_backends": [],
"unsupported": [],
},
"skipped_docs_validations_stores": {"unsupported": []},
"skipped_metrics_stores": {
"database_store_backends": [],
"unsupported": [],
},
"exceptions": [
# {
# "validation_store_name": store_name
# "src": src_url,
# "dest": dest_url,
# "exception_message": exception_message,
# },
# {
# "site_name": site_name,
# "src": src_url,
# "dest": dest_url,
# "exception_message": exception_message,
# }
],
"upgraded_validations_stores": {
# STORE_NAME: {
# "validations_updated": [{
# "src": src_url,
# "dest": dest_url
# }],
# "exceptions": BOOL
# }
},
"upgraded_docs_site_validations_stores": {
# SITE_NAME: {
# "validation_result_pages_updated": [{
# src: src_url,
# dest: dest_url
# }],
# "exceptions": BOOL
# }
},
}
self.upgrade_checklist = {
"validations_store_backends": {},
"docs_validations_store_backends": {},
}
self.validation_run_times = {}
self.run_time_setters_by_backend_type = {
TupleFilesystemStoreBackend: self._get_tuple_filesystem_store_backend_run_time,
TupleS3StoreBackend: self._get_tuple_s3_store_backend_run_time,
TupleGCSStoreBackend: self._get_tuple_gcs_store_backend_run_time,
}
self._generate_upgrade_checklist()
def _generate_upgrade_checklist(self):
for (store_name, store) in self.data_context.stores.items():
if not isinstance(store, (ValidationsStore, MetricStore)):
continue
elif isinstance(store, ValidationsStore):
self._process_validations_store_for_checklist(store_name, store)
elif isinstance(store, MetricStore):
self._process_metrics_store_for_checklist(store_name, store)
sites = (
self.data_context.project_config_with_variables_substituted.data_docs_sites
)
if sites:
for site_name, site_config in sites.items():
self._process_docs_site_for_checklist(site_name, site_config)
def _process_docs_site_for_checklist(self, site_name, site_config):
site_html_store = HtmlSiteStore(
store_backend=site_config.get("store_backend"),
runtime_environment={
"data_context": self.data_context,
"root_directory": self.data_context.root_directory,
"site_name": site_name,
},
)
site_validations_store_backend = site_html_store.store_backends[
ValidationResultIdentifier
]
if isinstance(
site_validations_store_backend,
tuple(list(self.run_time_setters_by_backend_type.keys())),
):
self.upgrade_checklist["docs_validations_store_backends"][
site_name
] = site_validations_store_backend
else:
self.upgrade_log["skipped_docs_validations_stores"]["unsupported"].append(
{
"site_name": site_name,
"validations_store_backend_class": type(
site_validations_store_backend
).__name__,
}
)
def _process_validations_store_for_checklist(self, store_name, store):
store_backend = store.store_backend
if isinstance(store_backend, DatabaseStoreBackend):
self.upgrade_log["skipped_validations_stores"][
"database_store_backends"
].append(
{
"store_name": store_name,
"store_backend_class": type(store_backend).__name__,
}
)
elif isinstance(
store_backend, tuple(list(self.run_time_setters_by_backend_type.keys()))
):
self.upgrade_checklist["validations_store_backends"][
store_name
] = store_backend
else:
self.upgrade_log["skipped_validations_stores"]["unsupported"].append(
{
"store_name": store_name,
"store_backend_class": type(store_backend).__name__,
}
)
def _process_metrics_store_for_checklist(self, store_name, store):
store_backend = store.store_backend
if isinstance(store_backend, DatabaseStoreBackend):
self.upgrade_log["skipped_metrics_stores"][
"database_store_backends"
].append(
{
"store_name": store_name,
"store_backend_class": type(store_backend).__name__,
}
)
elif isinstance(store_backend, InMemoryStoreBackend):
pass
else:
self.upgrade_log["skipped_metrics_stores"]["unsupported"].append(
{
"store_name": store_name,
"store_backend_class": type(store_backend).__name__,
}
)
def _upgrade_store_backend(self, store_backend, store_name=None, site_name=None):
assert store_name or site_name, "Must pass either store_name or site_name."
assert not (
store_name and site_name
), "Must pass either store_name or site_name, not both."
try:
validation_source_keys = store_backend.list_keys()
except Exception as e:
exception_traceback = traceback.format_exc()
exception_message = (
f'{type(e).__name__}: "{str(e)}". '
f'Traceback: "{exception_traceback}".'
)
self._update_upgrade_log(
store_backend=store_backend,
store_name=store_name,
site_name=site_name,
exception_message=exception_message,
)
for source_key in validation_source_keys:
try:
run_name = source_key[-2]
dest_key = None
if run_name not in self.validation_run_times:
self.run_time_setters_by_backend_type.get(type(store_backend))(
source_key, store_backend
)
dest_key_list = list(source_key)
dest_key_list.insert(-1, self.validation_run_times[run_name])
dest_key = tuple(dest_key_list)
except Exception as e:
exception_traceback = traceback.format_exc()
exception_message = (
f'{type(e).__name__}: "{str(e)}". '
f'Traceback: "{exception_traceback}".'
)
self._update_upgrade_log(
store_backend=store_backend,
source_key=source_key,
dest_key=dest_key,
store_name=store_name,
site_name=site_name,
exception_message=exception_message,
)
try:
if store_name:
self._update_validation_result_json(
source_key=source_key,
dest_key=dest_key,
run_name=run_name,
store_backend=store_backend,
)
else:
store_backend.move(source_key, dest_key)
self._update_upgrade_log(
store_backend=store_backend,
source_key=source_key,
dest_key=dest_key,
store_name=store_name,
site_name=site_name,
)
except Exception as e:
exception_traceback = traceback.format_exc()
exception_message = (
f'{type(e).__name__}: "{str(e)}". '
f'Traceback: "{exception_traceback}".'
)
self._update_upgrade_log(
store_backend=store_backend,
source_key=source_key,
dest_key=dest_key,
store_name=store_name,
site_name=site_name,
exception_message=exception_message,
)
def _update_upgrade_log(
self,
store_backend,
source_key=None,
dest_key=None,
store_name=None,
site_name=None,
exception_message=None,
):
assert store_name or site_name, "Must pass either store_name or site_name."
assert not (
store_name and site_name
), "Must pass either store_name or site_name, not both."
try:
src_url = store_backend.get_url_for_key(source_key) if source_key else "N/A"
except Exception:
src_url = f"Unable to generate URL for key: {source_key}"
try:
dest_url = store_backend.get_url_for_key(dest_key) if dest_key else "N/A"
except Exception:
dest_url = f"Unable to generate URL for key: {dest_key}"
if not exception_message:
log_dict = {"src": src_url, "dest": dest_url}
else:
key_name = "validation_store_name" if store_name else "site_name"
log_dict = {
key_name: store_name if store_name else site_name,
"src": src_url,
"dest": dest_url,
"exception_message": exception_message,
}
self.upgrade_log["exceptions"].append(log_dict)
if store_name:
if exception_message:
self.upgrade_log["upgraded_validations_stores"][store_name][
"exceptions"
] = True
else:
self.upgrade_log["upgraded_validations_stores"][store_name][
"validations_updated"
].append(log_dict)
else:
if exception_message:
self.upgrade_log["upgraded_docs_site_validations_stores"][site_name][
"exceptions"
] = True
else:
self.upgrade_log["upgraded_docs_site_validations_stores"][site_name][
"validation_result_pages_updated"
].append(log_dict)
def _update_validation_result_json(
self, source_key, dest_key, run_name, store_backend
):
new_run_id_dict = {
"run_name": run_name,
"run_time": self.validation_run_times[run_name],
}
validation_json_dict = json.loads(store_backend.get(source_key))
validation_json_dict["meta"]["run_id"] = new_run_id_dict
store_backend.set(dest_key, json.dumps(validation_json_dict))
store_backend.remove_key(source_key)
def _get_tuple_filesystem_store_backend_run_time(self, source_key, store_backend):
run_name = source_key[-2]
try:
self.validation_run_times[run_name] = parse(run_name).strftime(
"%Y%m%dT%H%M%S.%fZ"
)
except (ValueError, TypeError):
source_path = os.path.join(
store_backend.full_base_directory,
store_backend._convert_key_to_filepath(source_key),
)
path_mod_timestamp = os.path.getmtime(source_path)
path_mod_iso_str = datetime.datetime.fromtimestamp(
path_mod_timestamp
).strftime("%Y%m%dT%H%M%S.%fZ")
self.validation_run_times[run_name] = path_mod_iso_str
def _get_tuple_s3_store_backend_run_time(self, source_key, store_backend):
import boto3
s3 = boto3.resource("s3")
run_name = source_key[-2]
try:
self.validation_run_times[run_name] = parse(run_name).strftime(
"%Y%m%dT%H%M%S.%fZ"
)
except (ValueError, TypeError):
source_path = store_backend._convert_key_to_filepath(source_key)
if not source_path.startswith(store_backend.prefix):
source_path = os.path.join(store_backend.prefix, source_path)
source_object = s3.Object(store_backend.bucket, source_path)
source_object_last_mod = source_object.last_modified.strftime(
"%Y%m%dT%H%M%S.%fZ"
)
self.validation_run_times[run_name] = source_object_last_mod
def _get_tuple_gcs_store_backend_run_time(self, source_key, store_backend):
from google.cloud import storage
gcs = storage.Client(project=store_backend.project)
bucket = gcs.get_bucket(store_backend.bucket)
run_name = source_key[-2]
try:
self.validation_run_times[run_name] = parse(run_name).strftime(
"%Y%m%dT%H%M%S.%fZ"
)
except (ValueError, TypeError):
source_path = store_backend._convert_key_to_filepath(source_key)
if not source_path.startswith(store_backend.prefix):
source_path = os.path.join(store_backend.prefix, source_path)
source_blob_created_time = bucket.get_blob(
source_path
).time_created.strftime("%Y%m%dT%H%M%S.%fZ")
self.validation_run_times[run_name] = source_blob_created_time
def _get_skipped_store_and_site_names(self):
validations_stores_with_database_backends = [
store_dict.get("store_name")
for store_dict in self.upgrade_log["skipped_validations_stores"][
"database_store_backends"
]
]
metrics_stores_with_database_backends = [
store_dict.get("store_name")
for store_dict in self.upgrade_log["skipped_metrics_stores"][
"database_store_backends"
]
]
unsupported_validations_stores = [
store_dict.get("store_name")
for store_dict in self.upgrade_log["skipped_validations_stores"][
"unsupported"
]
]
unsupported_metrics_stores = [
store_dict.get("store_name")
for store_dict in self.upgrade_log["skipped_metrics_stores"]["unsupported"]
]
stores_with_database_backends = (
validations_stores_with_database_backends
+ metrics_stores_with_database_backends
)
stores_with_unsupported_backends = (
unsupported_validations_stores + unsupported_metrics_stores
)
doc_sites_with_unsupported_backends = [
doc_site_dict.get("site_name")
for doc_site_dict in self.upgrade_log["skipped_docs_validations_stores"][
"unsupported"
]
]
return (
stores_with_database_backends,
stores_with_unsupported_backends,
doc_sites_with_unsupported_backends,
)
def manual_steps_required(self):
(
skip_with_database_backends,
skip_with_unsupported_backends,
skip_doc_sites_with_unsupported_backends,
) = self._get_skipped_store_and_site_names()
return any(
[
skip_with_database_backends,
skip_with_unsupported_backends,
skip_doc_sites_with_unsupported_backends,
]
)
def get_upgrade_overview(self):
(
skip_with_database_backends,
skip_with_unsupported_backends,
skip_doc_sites_with_unsupported_backends,
) = self._get_skipped_store_and_site_names()
validations_store_name_checklist = [
store_name
for store_name in self.upgrade_checklist[
"validations_store_backends"
].keys()
]
site_name_checklist = [
site_name
for site_name in self.upgrade_checklist[
"docs_validations_store_backends"
].keys()
]
upgrade_overview = """\
<cyan>\
++====================================++
|| UpgradeHelperV11: Upgrade Overview ||
++====================================++\
</cyan>
UpgradeHelperV11 will upgrade your project to be compatible with Great Expectations 0.11.x.
"""
if not any(
[
validations_store_name_checklist,
site_name_checklist,
skip_with_database_backends,
skip_with_unsupported_backends,
skip_doc_sites_with_unsupported_backends,
]
):
upgrade_overview += """
<green>\
Good news! No special upgrade steps are required to bring your project up to date.
The Upgrade Helper will simply increment the config_version of your great_expectations.yml for you.
</green>
Would you like to proceed?
"""
else:
upgrade_overview += """
<red>**WARNING**: Before proceeding, please make sure you have appropriate backups of your project.</red>
"""
if validations_store_name_checklist or site_name_checklist:
upgrade_overview += """
<cyan>\
Automated Steps
================
</cyan>
The following Stores and/or Data Docs sites will be upgraded:
"""
upgrade_overview += (
f"""\
- Validation Stores: {", ".join(validations_store_name_checklist)}
"""
if validations_store_name_checklist
else ""
)
upgrade_overview += (
f"""\
- Data Docs Sites: {", ".join(site_name_checklist)}
"""
if site_name_checklist
else ""
)
if any(
[
skip_with_database_backends,
skip_with_unsupported_backends,
skip_doc_sites_with_unsupported_backends,
]
):
upgrade_overview += """
<cyan>\
Manual Steps
=============
</cyan>
The following Stores and/or Data Docs sites must be upgraded manually, due to having a database backend, or backend
type that is unsupported or unrecognized:
"""
upgrade_overview += (
f"""\
- Stores with database backends: {", ".join(skip_with_database_backends)}
"""
if skip_with_database_backends
else ""
)
upgrade_overview += (
f"""\
- Stores with unsupported/unrecognized backends: {", ".join(skip_with_unsupported_backends)}
"""
if skip_with_unsupported_backends
else ""
)
upgrade_overview += (
f"""\
- Data Docs sites with unsupported/unrecognized backends: {", ".join(skip_doc_sites_with_unsupported_backends)}
"""
if skip_doc_sites_with_unsupported_backends
else ""
)
else:
upgrade_overview += """
<cyan>\
Manual Steps
=============
</cyan>
No manual upgrade steps are required.
"""
upgrade_overview += """
<cyan>\
Upgrade Confirmation
=====================
</cyan>
Please consult the 0.11.x migration guide for instructions on how to complete any required manual steps or
to learn more about the automated upgrade process:
<cyan>https://docs.greatexpectations.io/docs/guides/miscellaneous/migration_guide#migrating-to-the-batch-request-v3-api</cyan>
Would you like to proceed with the project upgrade?\
"""
return upgrade_overview, True
def _save_upgrade_log(self):
current_time = datetime.datetime.now(datetime.timezone.utc).strftime(
"%Y%m%dT%H%M%S.%fZ"
)
dest_path = os.path.join(
self.data_context._context_root_directory,
"uncommitted",
"logs",
"project_upgrades",
f"UpgradeHelperV11_{current_time}.json",
)
dest_dir, dest_filename = os.path.split(dest_path)
os.makedirs(dest_dir, exist_ok=True)
with open(dest_path, "w") as outfile:
json.dump(self.upgrade_log, outfile, indent=2)
return dest_path
def _generate_upgrade_report(self):
upgrade_log_path = self._save_upgrade_log()
skipped_stores_or_sites = any(self._get_skipped_store_and_site_names())
exception_occurred = False
exceptions = self.upgrade_log.get("exceptions")
if skipped_stores_or_sites or exceptions:
increment_version = False
else:
increment_version = True
upgrade_report = """\
<cyan>\
++================++
|| Upgrade Report ||
++================++\
</cyan>
"""
if increment_version:
upgrade_report += f"""
<green>\
Your project was successfully upgraded to be compatible with Great Expectations 0.11.x.
The config_version of your great_expectations.yml has been automatically incremented to 2.0.
A log detailing the upgrade can be found here:
- {upgrade_log_path}\
</green>\
"""
else:
if exceptions:
exception_occurred = True
upgrade_report += f"""
<red>\
The Upgrade Helper encountered some exceptions during the upgrade process.
Please review the exceptions section of the upgrade log and migrate the affected files manually,
as detailed in the 0.11.x migration guide.
The upgrade log can be found here:
- {upgrade_log_path}\
</red>\
"""
else:
upgrade_report += f"""
<yellow>\
The Upgrade Helper has completed the automated upgrade steps.
A log detailing the upgrade can be found here:
- {upgrade_log_path}\
</yellow>\
"""
return upgrade_report, increment_version, exception_occurred
def upgrade_project(self):
try:
for (store_name, store_backend) in self.upgrade_checklist[
"validations_store_backends"
].items():
self.upgrade_log["upgraded_validations_stores"][store_name] = {
"validations_updated": [],
"exceptions": False,
}
self._upgrade_store_backend(store_backend, store_name=store_name)
except Exception:
pass
try:
for (site_name, store_backend) in self.upgrade_checklist[
"docs_validations_store_backends"
].items():
self.upgrade_log["upgraded_docs_site_validations_stores"][site_name] = {
"validation_result_pages_updated": [],
"exceptions": False,
}
self._upgrade_store_backend(store_backend, site_name=site_name)
except Exception:
pass
# return a report of what happened, boolean indicating whether or not version should be incremented,
# the report should include instructions for steps to be performed manually
(
upgrade_report,
increment_version,
exception_occurred,
) = self._generate_upgrade_report()
return upgrade_report, increment_version, exception_occurred
|
|
"""
Generate a random sample of rows from a relational database that preserves
referential integrity - so long as constraints are defined, all parent rows
will exist for child rows.
Good for creating test/development databases from production. It's slow,
but how often do you need to generate a test/development database?
Usage::
rdbms-subsetter <source SQLAlchemy connection string> <destination connection string> <fraction of rows to use>
Example::
rdbms-subsetter postgresql://:@/bigdb postgresql://:@/littledb 0.05
Valid SQLAlchemy connection strings are described
`here <docs.sqlalchemy.org/en/latest/core/engines.html#database-urls#database-urls>`_.
``rdbms-subsetter`` promises that each child row will have whatever parent rows are
required by its foreign keys. It will also *try* to include most child rows belonging
to each parent row (up to the supplied ``--children`` parameter, default 3 each), but it
can't make any promises. (Demanding all children can lead to infinite propagation in
thoroughly interlinked databases, as every child record demands new parent records,
which demand new child records, which demand new parent records...
so increase ``--children`` with caution.)
When row numbers in your tables vary wildly (tens to billions, for example),
consider using the ``-l`` flag, which reduces row counts by a logarithmic formula. If ``f`` is
the fraction specified, and ``-l`` is set, and the original table has ``n`` rows,
then each new table's row target will be::
math.pow(10, math.log10(n)*f)
A fraction of ``0.5`` seems to produce good results, converting 10 rows to 3,
1,000,000 to 1,000, and 1,000,000,000 to 31,622.
Rows are selected randomly, but for tables with a single primary key column, you
can force rdbms-subsetter to include specific rows (and their dependencies) with
``force=<tablename>:<primary key value>``. The immediate children of these rows
are also exempted from the ``--children`` limit.
rdbms-subsetter only performs the INSERTS; it's your responsibility to set
up the target database first, with its foreign key constraints. The easiest
way to do this is with your RDBMS's dump utility. For example, for PostgreSQL,
::
pg_dump --schema-only -f schemadump.sql bigdb
createdb littledb
psql -f schemadump.sql littledb
You can pull rows from a non-default schema by passing ``--schema=<name>``.
Currently the target database must contain the corresponding tables in its own
schema of the same name (moving between schemas of different names is not yet
supported).
Case-specific table names will probably create bad results in rdbms-subsetter,
and in the rest of your life, for that matter. Don't do it.
"""
import json
import fnmatch
import argparse
import functools
import logging
from collections import OrderedDict, deque
import math
import random
import types
import sqlalchemy as sa
from sqlalchemy.engine.reflection import Inspector
# Python2 has a totally different definition for ``input``; overriding it here
try:
input = raw_input
except NameError:
pass
__version__ = '0.2.4'
def _find_n_rows(self, estimate=False):
self.n_rows = 0
if estimate:
try:
if self.db.engine.driver in ('psycopg2', 'pg8000',):
schema = (self.schema + '.') if self.schema else ''
qry = """SELECT reltuples FROM pg_class
WHERE oid = lower('%s%s')::regclass""" % (schema, self.name.lower())
elif 'oracle' in self.db.engine.driver:
qry = """SELECT num_rows FROM all_tables
WHERE LOWER(table_name)='%s'""" % self.name.lower()
else:
raise NotImplementedError("No approximation known for driver %s"
% self.db.engine.driver)
self.n_rows = self.db.conn.execute(qry).fetchone()[0]
except Exception as e:
logging.debug("failed to get approximate rowcount for %s\n%s" %
(self.name, str(e)))
if not self.n_rows:
self.n_rows = self.db.conn.execute(self.count()).fetchone()[0]
def _random_row_func(self):
dialect = self.bind.engine.dialect.name
if 'mysql' in dialect:
return sa.sql.func.rand()
elif 'oracle' in dialect:
return sa.sql.func.dbms_random.value()
else:
return sa.sql.func.random()
def _random_row_gen_fn(self):
"""
Random sample of *approximate* size n
"""
if self.n_rows:
while True:
n = self.target.n_rows_desired
if self.n_rows > 1000:
fraction = n / float(self.n_rows)
qry = sa.sql.select([self,]).where(self.random_row_func() < fraction)
results = self.db.conn.execute(qry).fetchall()
# we may stop wanting rows at any point, so shuffle them so as not to
# skew the sample toward those near the beginning
random.shuffle(results)
for row in results:
yield row
else:
qry = sa.sql.select([self,]).order_by(self.random_row_func()).limit(n)
for row in self.db.conn.execute(qry):
yield row
def _next_row(self):
if self.target.required:
return self.target.required.popleft()
elif self.target.requested:
return self.target.requested.popleft()
else:
try:
return (next(self.random_rows), False) # not prioritized
except StopIteration:
return None
def _filtered_by(self, **kw):
slct = sa.sql.select([self,])
slct = slct.where(sa.sql.and_((self.c[k] == v) for (k, v) in kw.items()))
return slct
def _pk_val(self, row):
if self.pk:
return row[self.pk[0]]
else:
return None
def _by_pk(self, pk):
pk_name = self.db.inspector.get_primary_keys(self.name,
self.schema)[0]
slct = self.filtered_by(**{pk_name: pk})
return self.db.conn.execute(slct).fetchone()
def _completeness_score(self):
"""Scores how close a target table is to being filled enough to quit"""
result = ( 0
- (len(self.requested) / float(self.n_rows or 1 ))
- len(self.required))
if not self.required: # anything in `required` queue disqualifies
result += (self.n_rows / (self.n_rows_desired or 1))**0.33
return result
class Db(object):
def __init__(self, sqla_conn, args, schema=None):
self.args = args
self.sqla_conn = sqla_conn
self.schema = schema
self.engine = sa.create_engine(sqla_conn)
self.meta = sa.MetaData(bind=self.engine) # excised schema=schema to prevent errors
self.meta.reflect(schema=self.schema)
self.inspector = Inspector(bind=self.engine)
self.conn = self.engine.connect()
self.tables = OrderedDict()
for tbl in self.meta.sorted_tables:
if any(fnmatch.fnmatch(tbl.name, each) for each in args.exclude_tables):
continue
tbl.db = self
# TODO: Replace all these monkeypatches with an instance assigment
tbl.find_n_rows = types.MethodType(_find_n_rows, tbl)
tbl.random_row_func = types.MethodType(_random_row_func, tbl)
tbl.fks = self.inspector.get_foreign_keys(tbl.name, schema=tbl.schema)
tbl.pk = self.inspector.get_primary_keys(tbl.name, schema=tbl.schema)
if not tbl.pk:
tbl.pk = [d['name'] for d in self.inspector.get_columns(tbl.name)]
tbl.filtered_by = types.MethodType(_filtered_by, tbl)
tbl.by_pk = types.MethodType(_by_pk, tbl)
tbl.pk_val = types.MethodType(_pk_val, tbl)
tbl.child_fks = []
estimate_rows = not(any(fnmatch.fnmatch(tbl.name, each)
for each in self.args.full_tables))
tbl.find_n_rows(estimate=estimate_rows)
self.tables[(tbl.schema, tbl.name)] = tbl
for ((tbl_schema, tbl_name), tbl) in self.tables.items():
constraints = args.config.get('constraints', {}).get(tbl_name, [])
for fk in (tbl.fks + constraints):
fk['constrained_schema'] = tbl_schema
fk['constrained_table'] = tbl_name # TODO: check against constrained_table
self.tables[(fk['referred_schema'], fk['referred_table'])].child_fks.append(fk)
def __repr__(self):
return "Db('%s')" % self.sqla_conn
def assign_target(self, target_db):
for ((tbl_schema, tbl_name), tbl) in self.tables.items():
tbl._random_row_gen_fn = types.MethodType(_random_row_gen_fn, tbl)
tbl.random_rows = tbl._random_row_gen_fn()
tbl.next_row = types.MethodType(_next_row, tbl)
target = target_db.tables[(tbl_schema, tbl_name)]
target.requested = deque()
target.required = deque()
target.pending = dict()
target.done = set()
if any(fnmatch.fnmatch(tbl.name, each)
for each in self.args.full_tables):
target.n_rows_desired = tbl.n_rows
else:
if tbl.n_rows:
if self.args.logarithmic:
target.n_rows_desired = int(math.pow(10, math.log10(tbl.n_rows)
* self.args.fraction)) or 1
else:
target.n_rows_desired = int(tbl.n_rows * self.args.fraction) or 1
else:
target.n_rows_desired = 0
target.source = tbl
tbl.target = target
target.completeness_score = types.MethodType(_completeness_score, target)
logging.debug("assigned methods to %s" % target.name)
def confirm(self):
message = []
for (tbl_schema, tbl_name) in sorted(self.tables, key=lambda t: t[1]):
tbl = self.tables[(tbl_schema, tbl_name)]
message.append("Create %d rows from %d in %s.%s" %
(tbl.target.n_rows_desired, tbl.n_rows,
tbl_schema or '', tbl_name))
print("\n".join(sorted(message)))
if self.args.yes:
return True
response = input("Proceed? (Y/n) ").strip().lower()
return (not response) or (response[0] == 'y')
def create_row_in(self, source_row, target_db, target, prioritized=False):
logging.debug('create_row_in %s:%s ' %
(target.name, target.pk_val(source_row)))
pks = tuple((source_row[key] for key in target.pk))
row_exists = pks in target.pending or pks in target.done
logging.debug("Row exists? %s" % str(row_exists))
if row_exists and not prioritized:
return
if not row_exists:
# make sure that all required rows are in parent table(s)
for fk in target.fks:
target_parent = target_db.tables[(fk['referred_schema'], fk['referred_table'])]
slct = sa.sql.select([target_parent,])
any_non_null_key_columns = False
for (parent_col, child_col) in zip(fk['referred_columns'],
fk['constrained_columns']):
slct = slct.where(target_parent.c[parent_col] ==
source_row[child_col])
if source_row[child_col] is not None:
any_non_null_key_columns = True
break
if any_non_null_key_columns:
target_parent_row = target_db.conn.execute(slct).first()
if not target_parent_row:
source_parent_row = self.conn.execute(slct).first()
self.create_row_in(source_parent_row, target_db, target_parent)
pks = tuple((source_row[key] for key in target.pk))
target.pending[pks] = source_row
target.n_rows += 1
for child_fk in target.child_fks:
child = self.tables[(child_fk['constrained_schema'], child_fk['constrained_table'])]
slct = sa.sql.select([child])
for (child_col, this_col) in zip(child_fk['constrained_columns'],
child_fk['referred_columns']):
slct = slct.where(child.c[child_col] == source_row[this_col])
if not prioritized:
slct = slct.limit(self.args.children)
for (n, desired_row) in enumerate(self.conn.execute(slct)):
if prioritized:
child.target.required.append((desired_row, prioritized))
elif (n == 0):
child.target.requested.appendleft((desired_row, prioritized))
else:
child.target.requested.append((desired_row, prioritized))
@property
def pending(self):
return functools.reduce(
lambda count, table: count + len(table.pending),
self.tables.values(),
0
)
def flush(self):
for table in self.tables.values():
if not table.pending:
continue
self.conn.execute(table.insert(), list(table.pending.values()))
table.done = table.done.union(table.pending.keys())
table.pending = dict()
def create_subset_in(self, target_db):
for (tbl_name, pks) in self.args.force_rows.items():
if '.' in tbl_name:
(tbl_schema, tbl_name) = tbl_name.split('.', 1)
else:
tbl_schema = None
source = self.tables[(tbl_schema, tbl_name)]
for pk in pks:
source_row = source.by_pk(pk)
if source_row:
self.create_row_in(source_row, target_db,
source.target, prioritized=True)
else:
logging.warn("requested %s:%s not found in source db,"
"could not create" % (source.name, pk))
#import pdb; pdb.set_trace()
while True:
targets = sorted(target_db.tables.values(),
key=lambda t: t.completeness_score())
try:
target = targets.pop(0)
while not target.source.n_rows:
target = targets.pop(0)
except IndexError: # pop failure, no more tables
break
logging.debug("total n_rows in target: %d" %
sum((t.n_rows for t in target_db.tables.values())))
logging.debug("target tables with 0 n_rows: %s" %
", ".join(t.name for t in target_db.tables.values()
if not t.n_rows))
logging.info("lowest completeness score (in %s) at %f" %
(target.name, target.completeness_score()))
if target.completeness_score() > 0.97:
break
(source_row, prioritized) = target.source.next_row()
self.create_row_in(source_row, target_db, target,
prioritized=prioritized)
if target_db.pending > self.args.buffer:
target_db.flush()
target_db.flush()
def update_sequences(source, target):
"""Set database sequence values to match the source db
Needed to avoid subsequent unique key violations after DB build.
Currently only implemented for postgresql -> postgresql."""
if source.engine.name != 'postgresql' or target.engine.name != 'postgresql':
return
qry = """SELECT 'SELECT last_value FROM ' || n.nspname ||
'.' || c.relname || ';' AS qry,
n.nspname || '.' || c.relname AS qual_name
FROM pg_namespace n
JOIN pg_class c ON (n.oid = c.relnamespace)
WHERE c.relkind = 'S'"""
for (qry, qual_name) in list(source.conn.execute(qry)):
(lastval, ) = source.conn.execute(qry).first()
nextval = int(lastval) + 1
updater = "ALTER SEQUENCE %s RESTART WITH %s;" % (qual_name, nextval)
target.conn.execute(updater)
target.conn.execute('commit')
def fraction(n):
n = float(n)
if 0 <= n <= 1:
return n
raise argparse.ArgumentError('Fraction must be greater than 0 and no greater than 1')
all_loglevels = "CRITICAL, FATAL, ERROR, DEBUG, INFO, WARN, WARNING"
def loglevel(raw):
try:
return int(raw)
except ValueError:
upper = raw.upper()
if upper in all_loglevels:
return getattr(logging, upper)
raise NotImplementedError('log level "%s" not one of %s' % (raw, all_loglevels))
argparser = argparse.ArgumentParser(description='Generate consistent subset of a database')
argparser.add_argument('source', help='SQLAlchemy connection string for data origin',
type=str)
argparser.add_argument('dest', help='SQLAlchemy connection string for data destination',
type=str)
argparser.add_argument('fraction', help='Proportion of rows to create in dest (0.0 to 1.0)',
type=fraction)
argparser.add_argument('-l', '--logarithmic', help='Cut row numbers logarithmically; try 0.5 for fraction',
action='store_true')
argparser.add_argument('-b', '--buffer', help='Number of records to store in buffer before flush',
type=int, default=1000)
argparser.add_argument('--loglevel', type=loglevel, help='log level (%s)' % all_loglevels,
default='INFO')
argparser.add_argument('-f', '--force', help='<table name>:<primary_key_val> to force into dest',
type=str.lower, action='append')
argparser.add_argument('-c', '--children',
help='Max number of child rows to attempt to pull for each parent row',
type=int, default=3)
argparser.add_argument('--schema', help='Non-default schema to include',
type=str, action='append', default=[])
argparser.add_argument('--config', help='Path to configuration .json file',
type=argparse.FileType('r'))
argparser.add_argument('--exclude-table', '-T', dest='exclude_tables', help='Tables to exclude',
type=str, action='append', default=[])
argparser.add_argument('--full-table', '-F', dest='full_tables', help='Tables to include every row of',
type=str, action='append', default=[])
argparser.add_argument('-y', '--yes', help='Proceed without stopping for confirmation', action='store_true')
def generate():
args = argparser.parse_args()
args.force_rows = {}
for force_row in (args.force or []):
(table_name, pk) = force_row.split(':')
if table_name not in args.force_rows:
args.force_rows[table_name] = []
args.force_rows[table_name].append(pk)
logging.getLogger().setLevel(args.loglevel)
schemas = args.schema + [None,]
args.config = json.load(args.config) if args.config else {}
for schema in schemas:
source = Db(args.source, args, schema=schema)
target = Db(args.dest, args, schema=schema)
if set(source.tables.keys()) != set(target.tables.keys()):
raise Exception('Source and target databases have different tables')
source.assign_target(target)
if source.confirm():
source.create_subset_in(target)
update_sequences(source, target)
if __name__ == '__main__':
generate()
|
|
'''
An RMI framework for synapse.
'''
import copy
import getpass
import threading
import threading
import traceback
import synapse.link as s_link
import synapse.async as s_async
import synapse.crypto as s_crypto
import synapse.dyndeps as s_dyndeps
import synapse.eventbus as s_eventbus
import synapse.lib.pki as s_pki
import synapse.lib.queue as s_queue
import synapse.lib.sched as s_sched
import synapse.lib.socket as s_socket
import synapse.lib.threads as s_threads
from synapse.common import *
from synapse.compat import queue
def openurl(url,**opts):
'''
Construct a telepath proxy from a url.
Example:
foo = openurl('tcp://1.2.3.4:90/foo')
foo.dostuff(30) # call remote method
'''
plex = opts.pop('plex',None)
link = s_link.chopLinkUrl(url)
link[1].update(opts)
relay = s_link.getLinkRelay(link)
return Proxy(relay, plex=plex)
def openlink(link):
'''
Construct a telepath proxy from a link tufo.
Example:
foo = openlink(link)
foo.bar(20)
'''
relay = s_link.getLinkRelay(link)
return Proxy(relay)
def evalurl(url,**opts):
'''
Construct either a local object or a telepath proxy.
WARNING: this API enables ctor:// proto which uses eval!
( trusted inputs only )
Example:
item0 = evalurl('tcp://1.2.3.4:90/foo')
item1 = evalurl('ctor://foo.bar.baz("woot",y=20)')
'''
if url.find('://') == -1:
raise BadUrl(url)
scheme,therest = url.split('://',1)
if scheme == 'ctor':
locs = opts.get('locs')
return s_dyndeps.runDynEval(therest, locs=locs)
return openurl(url,**opts)
class Method:
def __init__(self, proxy, meth):
self.meth = meth
self.proxy = proxy
def __call__(self, *args, **kwargs):
ondone = kwargs.pop('ondone',None)
task = (self.meth,args,kwargs)
job = self.proxy._tx_call( task, ondone=ondone )
if ondone != None:
return job
return self.proxy.sync(job)
telelocal = set(['tele:sock:init'])
class Proxy(s_eventbus.EventBus):
'''
The telepath proxy provides "pythonic" access to remote objects.
( you most likely want openurl() or openlink() )
NOTE:
*all* locals in this class *must* have _tele_ prefixes to prevent
accidental deref of something with the same name in code using it
under the assumption it's something else....
'''
def __init__(self, relay, plex=None):
s_eventbus.EventBus.__init__(self)
self.onfini( self._onProxyFini )
# NOTE: the _tele_ prefixes are designed to prevent accidental
# derefs with overlapping names from working correctly
self._tele_sid = None
self._tele_pki = None
self._tele_q = s_queue.Queue()
self._tele_pushed = {}
if plex == None:
plex = s_socket.Plex()
self._tele_plex = plex
self._tele_boss = s_async.Boss()
self._raw_on('job:done', self._tele_boss.dist )
self._raw_on('tele:call', self._onTeleCall )
poolmax = relay.getLinkProp('poolmax', -1)
poolsize = relay.getLinkProp('poolsize', 0)
self._tele_cthr = self.consume( self._tele_q )
self._tele_pool = s_threads.Pool(size=poolsize, maxsize=poolmax)
self._tele_ons = set()
self._tele_sock = None
self._tele_relay = relay # LinkRelay()
# obj name is path minus leading "/"
self._tele_name = relay.link[1].get('path')[1:]
if relay.getLinkProp('pki'):
#TODO pkiurl
self._tele_pki = relay.getLinkProp('pkistor')
if self._tele_pki == None:
self._tele_pki = s_pki.getUserPki()
self._initTeleSock()
def _raw_on(self, name, func):
return s_eventbus.EventBus.on(self, name, func)
def _raw_off(self, name, func):
return s_eventbus.EventBus.off(self, name, func)
def on(self, name, func):
if name not in telelocal:
self._tele_ons.add(name)
job = self._txTeleJob('tele:on', events=[name], name=self._tele_name)
self.sync(job)
return s_eventbus.EventBus.on(self, name, func)
def off(self, name, func):
self._tele_ons.discard(name)
job = self._txTeleJob('tele:off', evt=name, name=self._tele_name)
self.sync(job)
return s_eventbus.EventBus.off(self, name, func)
def fire(self, name, **info):
if name in telelocal:
return s_eventbus.EventBus.fire(self, name, **info)
# events fired on a proxy go through the remove first...
return self.call('fire', name, **info)
def call(self, name, *args, **kwargs):
'''
Call a shared method as a job.
Example:
job = proxy.call('getFooByBar',bar)
# ... do other stuff ...
ret = proxy.sync(job)
'''
ondone = kwargs.pop('ondone',None)
task = (name, args, kwargs)
return self._tx_call(task,ondone=ondone)
def callx(self, name, task, ondone=None):
'''
Call a method on a specific shared object as a job.
Example:
# task is (<method>,<args>,<kwargs>)
task = ('getFooByBar', (bar,), {} )
job = proxy.callx('woot',task)
ret = proxy.sync(job)
'''
return self._txTeleJob('tele:call', name=name, task=task, ondone=ondone)
def push(self, name, item):
'''
Push access to an object to the daemon, allowing other clients access.
Example:
prox = s_telepath.openurl('tcp://127.0.0.1/')
prox.push( 'bar', Bar() )
'''
job = self._txTeleJob('tele:push', name=name)
self._tele_pushed[ name ] = item
return self.sync(job)
def _tx_call(self, task, ondone=None):
return self._txTeleJob('tele:call', name=self._tele_name, task=task, ondone=ondone)
def sync(self, job, timeout=None):
'''
Wait on a given job and return/raise it's result.
Example:
job = proxy.call('woot', 10, bar=20)
ret = proxy.sync(job)
'''
self._waitTeleJob(job,timeout=timeout)
return s_async.jobret(job)
def _waitTeleJob(self, job, timeout=None):
# dont block the consumer thread, consume events
# until the job completes...
if threading.currentThread() == self._tele_cthr:
return self._fakeConsWait(job, timeout=timeout)
if not self._tele_boss.wait(job[0], timeout=timeout):
raise HitMaxTime()
def _fakeConsWait(self, job, timeout=None):
# a wait like function for the consumer thread
# which continues to consume events until a job
# has been completed.
maxtime = None
if timeout != None:
maxtime = time.time() + timeout
while not job[1].get('done'):
if maxtime != None and time.time() >= maxtime:
raise HitMaxTime()
mesg = self._tele_q.get()
self.dist(mesg)
def _initTeleSock(self):
if self.isfini:
return False
if self._tele_sock != None:
self._tele_sock.fini()
self._tele_sock = self._tele_relay.connect()
if self._tele_sock == None:
return False
# generated on the socket by the multiplexor ( and queued )
self._tele_sock.on('link:sock:mesg', self._onLinkSockMesg )
self._tele_sock.onfini( self._onSockFini )
self._tele_plex.addPlexSock( self._tele_sock )
self._teleSynAck()
# let client code do stuff on reconnect
self.fire('tele:sock:init', sock=self._tele_sock)
return True
def _onLinkSockMesg(self, event):
# MULTIPLEXOR: DO NOT BLOCK
mesg = event[1].get('mesg')
self._tele_q.put( mesg )
def _onSockFini(self):
if self.isfini:
return
try:
if not self._initTeleSock():
sched = s_sched.getGlobSched()
sched.insec( 1, self._onSockFini )
except Exception as e:
sched = s_sched.getGlobSched()
sched.insec( 1, self._onSockFini )
def _onTeleCall(self, mesg):
# dont block consumer thread... task pool
self._tele_pool.call( self._runTeleCall, mesg )
def _runTeleCall(self, mesg):
jid = mesg[1].get('jid')
name = mesg[1].get('name')
task = mesg[1].get('task')
suid = mesg[1].get('suid')
retinfo = dict(suid=suid,jid=jid)
try:
item = self._tele_pushed.get(name)
if item == None:
return self._txTeleSock('tele:retn', err='NoSuchObj', errmsg=name, **retinfo)
meth,args,kwargs = task
func = getattr(item,meth,None)
if func == None:
return self._txTeleSock('tele:retn', err='NoSuchMeth', errmsg=meth, **retinfo)
self._txTeleSock('tele:retn', ret=func(*args,**kwargs), **retinfo )
except Exception as e:
retinfo.update( excinfo(e) )
return self._txTeleSock('tele:retn', **retinfo)
def _getTeleSock(self):
if self._tele_sock.isfini:
self._initTeleSock()
return self._tele_sock
def _getUserCert(self):
'''
If pki is enabled, return the cert for link username as iden.
'''
if self._tele_pki == None:
return None
iden = self._tele_relay.getLinkProp('user')
return self._tele_pki.getTokenCert(iden)
def _teleSynAck(self):
'''
Send a tele:syn to get a telepath session
'''
chal = os.urandom(16)
cert = self._getUserCert()
host = self._tele_relay.getLinkProp('host')
msginfo = dict(sid=self._tele_sid)
job = self._txTeleJob('tele:syn', sid=self._tele_sid, chal=chal, cert=cert, host=host )
synresp = self.sync(job, timeout=4)
# we require the server to auth...
if self._tele_pki:
cert = synresp.get('cert')
if cert == None:
raise Exception('NoPkiCert')
tokn = self._tele_pki.loadCertToken(cert)
if tokn == None:
# FIXME pki exceptions...
raise Exception('BadPkiCert')
sign = synresp.get('sign')
if sign == None:
raise Exception('NoPkiSign')
if not self._tele_pki.isValidSign(tokn[0],sign,chal):
raise Exception('BadPkiSign')
ckey = os.urandom(16)
skey = self._tele_pki.encToIden(tokn[0], ckey)
job = self._txTeleJob('tele:skey', iden=tokn[0], algo='rc4', skey=skey)
if not self.sync(job):
raise Exception('BadSetSkey')
xform = s_crypto.Rc4Skey(ckey)
self._tele_sock.addSockXform(xform)
self._tele_sid = synresp.get('sess')
events = list(self._tele_ons)
if events:
job = self._txTeleJob('tele:on', events=events, name=self._tele_name)
self.sync( job )
def _txTeleJob(self, msg, **msginfo):
'''
Transmit a message as a job ( add jid to mesg ) and return job.
'''
ondone = msginfo.pop('ondone',None)
job = self._tele_boss.initJob(ondone=ondone)
msginfo['jid'] = job[0]
self._txTeleSock(msg,**msginfo)
return job
def _txTeleSock(self, msg, **msginfo):
'''
Send a mesg over the socket and include our session id.
'''
msginfo['sid'] = self._tele_sid
sock = self._getTeleSock()
sock.tx( (msg,msginfo) )
def _onProxyFini(self):
if not self._tele_sock.isfini:
self._tele_sock.tx( tufo('tele:fin', sid=self._tele_sid) )
self._tele_pool.fini()
self._tele_boss.fini()
self._tele_sock.fini()
def __getattr__(self, name):
meth = Method(self, name)
setattr(self,name,meth)
return meth
# some methods to avoid round trips...
def __nonzero__(self):
return True
def __eq__(self, obj):
return id(self) == id(obj)
def __ne__(self, obj):
return not self.__eq__(obj)
|
|
""" Vanilla RNN
@author Graham Taylor
"""
import numpy as np
import theano
import theano.tensor as T
from sklearn.base import BaseEstimator
import logging
import time
import os
import datetime
import pickle as pickle
import math
import matplotlib.pyplot as plt
plt.ion()
mode = theano.Mode(linker='cvm')
#mode = 'DEBUG_MODE'
class RNN(object):
""" Recurrent neural network class
Supported output types:
real : linear output units, use mean-squared error
binary : binary output units, use cross-entropy error
softmax : single softmax out, use cross-entropy error
"""
def __init__(self, input, n_in, n_hidden, n_out, activation=T.tanh,
output_type='real', use_symbolic_softmax=False):
self.input = input
self.activation = activation
self.output_type = output_type
# when using HF, SoftmaxGrad.grad is not implemented
# use a symbolic softmax which is slightly slower than T.nnet.softmax
# See: http://groups.google.com/group/theano-dev/browse_thread/
# thread/3930bd5a6a67d27a
if use_symbolic_softmax:
def symbolic_softmax(x):
e = T.exp(x)
return e / T.sum(e, axis=1).dimshuffle(0, 'x')
self.softmax = symbolic_softmax
else:
self.softmax = T.nnet.softmax
# recurrent weights as a shared variable
W_init = np.asarray(np.random.uniform(size=(n_hidden, n_hidden),
low=-.01, high=.01),
dtype=theano.config.floatX)
self.W = theano.shared(value=W_init, name='W')
# input to hidden layer weights
W_in_init = np.asarray(np.random.uniform(size=(n_in, n_hidden),
low=-.01, high=.01),
dtype=theano.config.floatX)
self.W_in = theano.shared(value=W_in_init, name='W_in')
# hidden to output layer weights
W_out_init = np.asarray(np.random.uniform(size=(n_hidden, n_out),
low=-.01, high=.01),
dtype=theano.config.floatX)
self.W_out = theano.shared(value=W_out_init, name='W_out')
h0_init = np.zeros((n_hidden,), dtype=theano.config.floatX)
self.h0 = theano.shared(value=h0_init, name='h0')
bh_init = np.zeros((n_hidden,), dtype=theano.config.floatX)
self.bh = theano.shared(value=bh_init, name='bh')
by_init = np.zeros((n_out,), dtype=theano.config.floatX)
self.by = theano.shared(value=by_init, name='by')
self.params = [self.W, self.W_in, self.W_out, self.h0,
self.bh, self.by]
# for every parameter, we maintain it's last update
# the idea here is to use "momentum"
# keep moving mostly in the same direction
self.updates = {}
for param in self.params:
init = np.zeros(param.get_value(borrow=True).shape,
dtype=theano.config.floatX)
self.updates[param] = theano.shared(init)
# recurrent function (using tanh activation function) and linear output
# activation function
def step(x_t, h_tm1):
h_t = self.activation(T.dot(x_t, self.W_in) + \
T.dot(h_tm1, self.W) + self.bh)
y_t = T.dot(h_t, self.W_out) + self.by
return h_t, y_t
# the hidden state `h` for the entire sequence, and the output for the
# entire sequence `y` (first dimension is always time)
[self.h, self.y_pred], _ = theano.scan(step,
sequences=self.input,
outputs_info=[self.h0, None])
# L1 norm ; one regularization option is to enforce L1 norm to
# be small
self.L1 = 0
self.L1 += abs(self.W.sum())
self.L1 += abs(self.W_in.sum())
self.L1 += abs(self.W_out.sum())
# square of L2 norm ; one regularization option is to enforce
# square of L2 norm to be small
self.L2_sqr = 0
self.L2_sqr += (self.W ** 2).sum()
self.L2_sqr += (self.W_in ** 2).sum()
self.L2_sqr += (self.W_out ** 2).sum()
if self.output_type == 'real':
self.loss = lambda y: self.mse(y)
elif self.output_type == 'binary':
# push through sigmoid
self.p_y_given_x = T.nnet.sigmoid(self.y_pred) # apply sigmoid
self.y_out = T.round(self.p_y_given_x) # round to {0,1}
self.loss = lambda y: self.nll_binary(y)
elif self.output_type == 'softmax':
# push through softmax, computing vector of class-membership
# probabilities in symbolic form
self.p_y_given_x = self.softmax(self.y_pred)
# compute prediction as class whose probability is maximal
self.y_out = T.argmax(self.p_y_given_x, axis=-1)
self.loss = lambda y: self.nll_multiclass(y)
else:
raise NotImplementedError
def mse(self, y):
# error between output and target
return T.mean((self.y_pred - y) ** 2)
def nll_binary(self, y):
# negative log likelihood based on binary cross entropy error
return T.mean(T.nnet.binary_crossentropy(self.p_y_given_x, y))
def nll_multiclass(self, y):
# negative log likelihood based on multiclass cross entropy error
# y.shape[0] is (symbolically) the number of rows in y, i.e.,
# number of time steps (call it T) in the sequence
# T.arange(y.shape[0]) is a symbolic vector which will contain
# [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of
# Log-Probabilities (call it LP) with one row per example and
# one column per class LP[T.arange(y.shape[0]),y] is a vector
# v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ...,
# LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is
# the mean (across minibatch examples) of the elements in v,
# i.e., the mean log-likelihood across the minibatch.
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
def errors(self, y):
"""Return a float representing the number of errors in the sequence
over the total number of examples in the sequence ; zero one
loss over the size of the sequence
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
"""
# check if y has same dimension of y_pred
if y.ndim != self.y_out.ndim:
raise TypeError('y should have the same shape as self.y_out',
('y', y.type, 'y_out', self.y_out.type))
if self.output_type in ('binary', 'softmax'):
# check if y is of the correct datatype
if y.dtype.startswith('int'):
# the T.neq operator returns a vector of 0s and 1s, where 1
# represents a mistake in prediction
return T.mean(T.neq(self.y_out, y))
else:
raise NotImplementedError()
class MetaRNN(BaseEstimator):
def __init__(self, n_in=5, n_hidden=50, n_out=5, learning_rate=0.01,
n_epochs=100, L1_reg=0.00, L2_reg=0.00005, learning_rate_decay=1,
activation='tanh', output_type='real',
final_momentum=0.9, initial_momentum=0.5,
momentum_switchover=5,
use_symbolic_softmax=False):
self.n_in = int(n_in)
self.n_hidden = int(n_hidden)
self.n_out = int(n_out)
self.learning_rate = float(learning_rate)
self.learning_rate_decay = float(learning_rate_decay)
self.n_epochs = int(n_epochs)
self.L1_reg = float(L1_reg)
self.L2_reg = float(L2_reg)
self.activation = activation
self.output_type = output_type
self.initial_momentum = float(initial_momentum)
self.final_momentum = float(final_momentum)
self.momentum_switchover = int(momentum_switchover)
self.use_symbolic_softmax = use_symbolic_softmax
self.ready()
def ready(self):
# input (where first dimension is time)
self.x = T.matrix()
# target (where first dimension is time)
if self.output_type == 'real':
self.y = T.matrix(name='y', dtype=theano.config.floatX)
elif self.output_type == 'binary':
self.y = T.matrix(name='y', dtype='int32')
elif self.output_type == 'softmax': # only vector labels supported
self.y = T.vector(name='y', dtype='int32')
else:
raise NotImplementedError
# initial hidden state of the RNN
self.h0 = T.vector()
# learning rate
self.lr = T.scalar()
if self.activation == 'tanh':
activation = T.tanh
elif self.activation == 'sigmoid':
activation = T.nnet.sigmoid
elif self.activation == 'relu':
activation = lambda x: x * (x > 0)
elif self.activation == 'cappedrelu':
activation = lambda x: T.minimum(x * (x > 0), 6)
else:
raise NotImplementedError
self.rnn = RNN(input=self.x, n_in=self.n_in,
n_hidden=self.n_hidden, n_out=self.n_out,
activation=activation, output_type=self.output_type,
use_symbolic_softmax=self.use_symbolic_softmax)
if self.output_type == 'real':
self.predict = theano.function(inputs=[self.x, ],
outputs=self.rnn.y_pred,
mode=mode)
elif self.output_type == 'binary':
self.predict_proba = theano.function(inputs=[self.x, ],
outputs=self.rnn.p_y_given_x, mode=mode)
self.predict = theano.function(inputs=[self.x, ],
outputs=T.round(self.rnn.p_y_given_x),
mode=mode)
elif self.output_type == 'softmax':
self.predict_proba = theano.function(inputs=[self.x, ],
outputs=self.rnn.p_y_given_x, mode=mode)
self.predict = theano.function(inputs=[self.x, ],
outputs=self.rnn.y_out, mode=mode)
else:
raise NotImplementedError
def shared_dataset(self, data_xy):
""" Load the dataset into shared variables """
data_x, data_y = data_xy
shared_x = theano.shared(np.asarray(data_x,
dtype=theano.config.floatX))
shared_y = theano.shared(np.asarray(data_y,
dtype=theano.config.floatX))
if self.output_type in ('binary', 'softmax'):
return shared_x, T.cast(shared_y, 'int32')
else:
return shared_x, shared_y
def __getstate__(self):
""" Return state sequence."""
params = self._get_params() # parameters set in constructor
weights = [p.get_value() for p in self.rnn.params]
state = (params, weights)
return state
def _set_weights(self, weights):
""" Set fittable parameters from weights sequence.
Parameters must be in the order defined by self.params:
W, W_in, W_out, h0, bh, by
"""
i = iter(weights)
for param in self.rnn.params:
param.set_value(i.next())
def __setstate__(self, state):
""" Set parameters from state sequence.
Parameters must be in the order defined by self.params:
W, W_in, W_out, h0, bh, by
"""
params, weights = state
self.set_params(**params)
self.ready()
self._set_weights(weights)
def save(self, fpath='.', fname=None):
""" Save a pickled representation of Model state. """
fpathstart, fpathext = os.path.splitext(fpath)
if fpathext == '.pkl':
# User supplied an absolute path to a pickle file
fpath, fname = os.path.split(fpath)
elif fname is None:
# Generate filename based on date
date_obj = datetime.datetime.now()
date_str = date_obj.strftime('%Y-%m-%d-%H:%M:%S')
class_name = self.__class__.__name__
fname = '%s.%s.pkl' % (class_name, date_str)
fabspath = os.path.join(fpath, fname)
logging.info("Saving to %s ..." % fabspath)
file = open(fabspath, 'wb')
state = self.__getstate__()
pickle.dump(state, file, protocol=pickle.HIGHEST_PROTOCOL)
file.close()
def load(self, path):
""" Load model parameters from path. """
logging.info("Loading from %s ..." % path)
file = open(path, 'rb')
state = pickle.load(file)
self.__setstate__(state)
file.close()
def fit(self, X_train, Y_train, X_test=None, Y_test=None,
validation_frequency=100):
""" Fit model
Pass in X_test, Y_test to compute test error and report during
training.
X_train : ndarray (n_seq x n_steps x n_in)
Y_train : ndarray (n_seq x n_steps x n_out)
validation_frequency : int
in terms of number of sequences (or number of weight updates)
"""
f = file('../RNN-data/trainProcess/trainOutput-b15-2220-720-60.txt','a+')
if X_test is not None:
assert(Y_test is not None)
self.interactive = True
test_set_x, test_set_y = self.shared_dataset((X_test, Y_test))
else:
self.interactive = False
train_set_x, train_set_y = self.shared_dataset((X_train, Y_train))
n_train = train_set_x.get_value(borrow=True).shape[0]
if self.interactive:
n_test = test_set_x.get_value(borrow=True).shape[0]
######################
# BUILD ACTUAL MODEL #
######################
logging.info('... building the model')
index = T.lscalar('index') # index to a case
# learning rate (may change)
l_r = T.scalar('l_r', dtype=theano.config.floatX)
mom = T.scalar('mom', dtype=theano.config.floatX) # momentum
cost = self.rnn.loss(self.y) \
+ self.L1_reg * self.rnn.L1 \
+ self.L2_reg * self.rnn.L2_sqr
compute_train_error = theano.function(inputs=[index, ],
outputs=self.rnn.loss(self.y),
givens={
self.x: train_set_x[index],
self.y: train_set_y[index]},
mode=mode)
if self.interactive:
compute_test_error = theano.function(inputs=[index, ],
outputs=self.rnn.loss(self.y),
givens={
self.x: test_set_x[index],
self.y: test_set_y[index]},
mode=mode)
# compute the gradient of cost with respect to theta = (W, W_in, W_out)
# gradients on the weights using BPTT
gparams = []
for param in self.rnn.params:
gparam = T.grad(cost, param)
gparams.append(gparam)
updates = {}
for param, gparam in zip(self.rnn.params, gparams):
weight_update = self.rnn.updates[param]
upd = mom * weight_update - l_r * gparam
updates[weight_update] = upd
updates[param] = param + upd
# compiling a Theano function `train_model` that returns the
# cost, but in the same time updates the parameter of the
# model based on the rules defined in `updates`
train_model = theano.function(inputs=[index, l_r, mom],
outputs=cost,
updates=updates,
givens={
self.x: train_set_x[index],
self.y: train_set_y[index]},
mode=mode)
###############
# TRAIN MODEL #
###############
logging.info('... training')
epoch = 0
while (epoch < self.n_epochs):
epoch = epoch + 1
for idx in xrange(n_train):
effective_momentum = self.final_momentum \
if epoch > self.momentum_switchover \
else self.initial_momentum
example_cost = train_model(idx, self.learning_rate,
effective_momentum)
# iteration number (how many weight updates have we made?)
# epoch is 1-based, index is 0 based
iter = (epoch - 1) * n_train + idx + 1
if iter % validation_frequency == 0:
# compute loss on training set
train_losses = [compute_train_error(i)
for i in xrange(n_train)]
this_train_loss = np.mean(train_losses)
if self.interactive:
test_losses = [compute_test_error(i)
for i in xrange(n_test)]
this_test_loss = np.mean(test_losses)
f.write('epoch %i, seq %i/%i, tr loss %f '
'te loss %f lr: %f \n' % \
(epoch, idx + 1, n_train,
this_train_loss, this_test_loss, self.learning_rate))
print('epoch %i, seq %i/%i, tr loss %f '
'te loss %f lr: %f' % \
(epoch, idx + 1, n_train,
this_train_loss, this_test_loss, self.learning_rate))
else:
f.write('epoch %i, seq %i/%i, train loss %f '
'lr: %f \n' % \
(epoch, idx + 1, n_train, this_train_loss,
self.learning_rate))
print('epoch %i, seq %i/%i, train loss %f '
'lr: %f' % \
(epoch, idx + 1, n_train, this_train_loss,
self.learning_rate))
self.learning_rate *= self.learning_rate_decay
f.close()
def test_real():
""" Test RNN with real-valued outputs. """
n_hidden = 200
n_in = 20
n_out = 5
n_steps = 10
n_seq = 100
np.random.seed(0)
# simple lag test
seq = np.random.randn(n_seq, n_steps, n_in)
targets = np.zeros((n_seq, n_steps, n_out))
targets[:, 1:, 0] = seq[:, :-1, 3] # delayed 1
targets[:, 1:, 1] = seq[:, :-1, 2] # delayed 1
targets[:, 2:, 2] = seq[:, :-2, 0] # delayed 2
targets += 0.01 * np.random.standard_normal(targets.shape)
model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out,
learning_rate=0.001, learning_rate_decay=0.999,
n_epochs=400, activation='tanh')
model.fit(seq, targets, validation_frequency=1000)
[seqNum,lineNum,colNum] = targets.shape
print(seqNum,lineNum,colNum)
error = [0 for i in range(colNum)]
plt.close('all')
fig = plt.figure()
ax1 = plt.subplot(211)
plt.plot(seq[0])
ax1.set_title('input')
ax2 = plt.subplot(212)
true_targets = plt.plot(targets[0])
guess = model.predict(seq[0])
guessed_targets = plt.plot(guess, linestyle='--')
for i, x in enumerate(guessed_targets):
x.set_color(true_targets[i].get_color())
ax2.set_title('solid: true output, dashed: model output')
dif = abs(guess - targets[0])
[linedif,coldif] = dif.shape
print(linedif,coldif)
errorsum = 0
for i in range (colNum):
sum = 0
for j in range (lineNum):
sum += dif[j][i] ** 2
error[i] = math.sqrt(sum/lineNum)
errorsum += error[i]
print(error[i])
print("average error = ", errorsum/colNum)
def test_binary(multiple_out=False, n_epochs=250):
""" Test RNN with binary outputs. """
n_hidden = 100
n_in = 5
n_out = 121
n_steps = 40
n_seq = 1500
np.random.seed(0)
# simple lag test
seqlist = []
count = 0
data = []
BASE_DIR = os.path.dirname(__file__)
file_path1 = os.path.join(BASE_DIR,"../RNN-data/traindata/inputdata-b12-50-40-30-y.txt")
for l in open(file_path1):
#for l in open("inputdata-b02-300-10.txt"):
count += 1
row = [int(x) for x in l.split()]
if len(row) > 0:
data.append(row)
if (count == n_steps):
count = 0
if len(data) >0:
seqlist.append(data)
data = []
seqarray = np.asarray(seqlist)
seq = seqarray[:,:,:n_in]
targets = seqarray[:,:,n_in:]
seqlistTest1 = []
count = 0
dataTest1 = []
file_path2 = os.path.join(BASE_DIR, '../RNN-data/testdata/inputdata-b12-20-40-25.txt')
#file_path2 = os.path.join(BASE_DIR, '../RNN-data/testdata/inputerror-b15-60-60-12-y.txt')
for l in open(file_path2):
#for l in open("inputdata-b02-100-10.txt"):
count += 1
row = [int(x) for x in l.split()]
if len(row) > 0:
dataTest1.append(row)
if (count == n_steps):
count = 0
if len(dataTest1) >0:
seqlistTest1.append(dataTest1)
dataTest1 = []
seqarrayTest1 = np.asarray(seqlistTest1)
seqTest1 = seqarrayTest1[:,:,:n_in]
targetsTest1 = seqarrayTest1[:,:,n_in:]
############## Add another Test ####################
seqlistTest2 = []
count = 0
dataTest2 = []
#file_path2 = os.path.join(BASE_DIR, '../RNN-data/testdata/inputdata-b15-60-60-12.txt')
file_path4 = os.path.join(BASE_DIR, '../RNN-data/testdata/inputerror-b12-20-40-25-y.txt')
for l in open(file_path4):
#for l in open("inputdata-b02-100-10.txt"):
count += 1
row = [int(x) for x in l.split()]
if len(row) > 0:
dataTest2.append(row)
if (count == n_steps):
count = 0
if len(dataTest2) >0:
seqlistTest2.append(dataTest2)
dataTest2 = []
seqarrayTest2 = np.asarray(seqlistTest2)
seqTest2 = seqarrayTest2[:,:,:n_in]
targetsTest2 = seqarrayTest2[:,:,n_in:]
########### End add another Test ##############
######## Calculate change Frequency for each FF ##############
seqlistError = []
count = 0
dataError = []
file_path3 = os.path.join(BASE_DIR, '../RNN-data/traindata/inputerror-b12-50-40-30-y.txt')
for l in open(file_path3):
count += 1
row = [int(x) for x in l.split()]
if len(row) > 0:
dataError.append(row)
if (count == n_steps):
count = 0
if len(dataError) >0:
seqlistError.append(dataError)
dataError = []
seqarrayError = np.asarray(seqlistError)
targetsError = seqarrayError[:,:,n_in:]
[seqNum, lineNum, colNum] = targetsTest1.shape
freqArray = [None] * lineNum
for i in range (lineNum):
freqArray[i] = [0]*colNum
freqArrayNP = np.asarray(freqArray)
for i in range(seqNum):
freqArrayNP = freqArrayNP +abs(targets[i] - targetsError[i])
fmatrix = file('../RNN-data/matrix/freqMatrix-b12.txt','a+')
for i in range (lineNum):
for j in range(colNum):
fmatrix.write(str(freqArrayNP[i,j]))
fmatrix.write("\n")
fmatrix.close()
######### End Frequency Calculation #########################
model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out,
learning_rate=0.085, learning_rate_decay=1.005,
n_epochs=n_epochs, activation='tanh', output_type='binary')
#model.fit(seq, targets, validation_frequency=1000)
model.fit(seq, targets, seqTest1, targetsTest1, validation_frequency=1000)
ferror1 = file('errorRate/errorRate-b12-no.txt','a+')
ferror2 = file('errorRate/errorRate-b12-single.txt','a+')
[seqNum,lineNum,colNum] = targetsTest1.shape
seqs = xrange(seqNum)
error = [0 for i in range(seqNum)]
errorsum = 0
for k in seqs:
guess1 = model.predict_proba(seqTest1[k])
dif1 = abs(guess1 - targetsTest1[k])
[lineDif,colDif] = dif1.shape
for i in range (1,lineDif):
for j in range (colDif):
if (dif1[i][j] > 0.5):
ferror1.write("1 ")
else:
ferror1.write("0 ")
ferror1.write("\n")
ferror1.close()
for k in seqs:
guess2 = model.predict_proba(seqTest2[k])
dif2 = abs(guess2 - targetsTest2[k])
[lineDif,colDif] = dif2.shape
for i in range (1,lineDif):
for j in range (colDif):
if (dif2[i][j] > 0.5):
ferror2.write("1 ")
else:
ferror2.write("0 ")
ferror2.write("\n")
ferror2.close()
## #print (seqTest.shape)
## seqs = xrange(seqNum)
## error = [0 for i in range(lineNum*seqNum)]
## errorsum = 0
## for k in seqs:
## guess = model.predict_proba(seqTest[k])
## dif = abs(guess - targetsTest[k])
## [lineDif,colDif] = dif.shape
## #print(lineDif,colDif)
## for i in range (lineDif):
## ki = k*lineDif+i
## for j in range (colDif):
## if (dif[i][j] > 0.5):
## error[ki] += 1
## ferror.write('error %d = %d \n' % (ki,error[ki]))
## if (error[ki]>0):
## errorsum += 1
## print(errorsum)
## errorRate = errorsum/1.0/seqNum/lineNum
## ferror.write("average error = %f \n" % (errorRate))
##
## seqs = xrange(1)
##
## [seqNum,lineNum,colNum] = targets.shape
## print(seqNum,lineNum,colNum)
## error = [0 for i in range(colNum)]
##
## plt.close('all')
## for seq_num in seqs:
## fig = plt.figure()
## ax1 = plt.subplot(211)
## plt.plot(seq[seq_num])
## ax1.set_title('input')
## ax2 = plt.subplot(212)
## true_targets = plt.step(xrange(n_steps), targets[seq_num], marker='o')
##
## guess = model.predict_proba(seq[seq_num])
## guessed_targets = plt.step(xrange(n_steps), guess)
## plt.setp(guessed_targets, linestyle='--', marker='d')
## for i, x in enumerate(guessed_targets):
## x.set_color(true_targets[i].get_color())
## ax2.set_ylim((-0.1, 1.1))
## ax2.set_title('solid: true output, dashed6 model output (prob)')
##
##
## dif = abs(guess - targets[seq_num])
## [lineDif,colDif] = dif.shape
## print(lineDif,colDif)
## errorsum = 0
## for i in range (colNum):
## for j in range (lineNum):
## if (dif[j][i] > 0.5):
## error[i] += 1
## print(error[i])
## errorsum += error[i]
## print("average error = ", errorsum/colNum)
def test_softmax(n_epochs=250):
""" Test RNN with softmax outputs. """
n_hidden = 10
n_in = 5
n_steps = 10
n_seq = 100
n_classes = 3
n_out = n_classes # restricted to single softmax per time step
np.random.seed(0)
# simple lag test
seq = np.random.randn(n_seq, n_steps, n_in)
targets = np.zeros((n_seq, n_steps), dtype=np.int)
thresh = 0.5
# if lag 1 (dim 3) is greater than lag 2 (dim 0) + thresh
# class 1
# if lag 1 (dim 3) is less than lag 2 (dim 0) - thresh
# class 2
# if lag 2(dim0) - thresh <= lag 1 (dim 3) <= lag2(dim0) + thresh
# class 0
targets[:, 2:][seq[:, 1:-1, 3] > seq[:, :-2, 0] + thresh] = 1
targets[:, 2:][seq[:, 1:-1, 3] < seq[:, :-2, 0] - thresh] = 2
#targets[:, 2:, 0] = np.cast[np.int](seq[:, 1:-1, 3] > seq[:, :-2, 0])
model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out,
learning_rate=0.001, learning_rate_decay=0.999,
n_epochs=n_epochs, activation='tanh',
output_type='softmax', use_symbolic_softmax=False)
model.fit(seq, targets, validation_frequency=1000)
seqs = xrange(10)
[seqNum,lineNum,colNum] = seq.shape
print(seqNum,lineNum,colNum)
error = [0 for i in range(colNum)]
plt.close('all')
for seq_num in seqs:
fig = plt.figure()
ax1 = plt.subplot(211)
plt.plot(seq[seq_num])
ax1.set_title('input??')
ax2 = plt.subplot(212)
# blue line will represent true classes
true_targets = plt.step(xrange(n_steps), targets[seq_num], marker='o')
# show probabilities (in b/w) output by model
guess = model.predict_proba(seq[seq_num])
guessed_probs = plt.imshow(guess.T, interpolation='nearest',
cmap='gray')
ax2.set_title('blue: true class, grayscale: probs assigned by model')
dif = abs(seq[seq_num] - targets[seq_num])
for i in range (colNum):
sum = 0
for j in range (lineNum):
sum += dif[i,j] ** 2
error[i] = math.sqrt(sum/lineNum)
print(error[i])
if __name__ == "__main__":
##logging.basicConfig(
## level = logging.INFO,
## format = 'LINE %(lineno)-4d %(levelname)-8s %(message)s',
## datafmt = '%m-%d %H:%M',
## filename = "D:/logresult20160123/one.log",
## filemode = 'w')
t0 = time.time()
#test_real()
# problem takes more epochs to solve
test_binary(multiple_out=True, n_epochs=20)
#test_softmax(n_epochs=250)
print ("Elapsed time: %f" % (time.time() - t0))
|
|
'''
Kivy framework
==============
Kivy is an open source library for developing multi-touch applications. It is
completely cross-platform (Linux/OSX/Win) and released under the terms of the
MIT License.
It comes with native support for many multi-touch input devices, a growing
library of multi-touch aware widgets and hardware accelerated OpenGL drawing.
Kivy is designed to let you focus on building custom and highly interactive
applications as quickly and easily as possible.
With Kivy, you can take full advantage of the dynamic nature of Python. There
are thousands of high-quality, free libraries that can be integrated in your
application. At the same time, performance-critical parts are implemented
in the C language.
See http://kivy.org for more information.
'''
__all__ = (
'require',
'kivy_configure', 'kivy_register_post_configuration',
'kivy_options', 'kivy_base_dir',
'kivy_modules_dir', 'kivy_data_dir', 'kivy_shader_dir',
'kivy_icons_dir', 'kivy_home_dir', 'kivy_userexts_dir',
'kivy_config_fn', 'kivy_usermodules_dir',
)
__version__ = '1.8.0-dev'
import sys
import shutil
from getopt import getopt, GetoptError
from os import environ, mkdir
from os.path import dirname, join, basename, exists, expanduser
from kivy.logger import Logger, LOG_LEVELS
from kivy.utils import platform
# internals for post-configuration
__kivy_post_configuration = []
if platform == 'macosx' and sys.maxsize < 9223372036854775807:
r = '''Unsupported Python version detected!:
Kivy requires a 64 bit version of Python to run on OS X. We strongly advise
you to use the version of Python that is provided by Apple (don't use ports,
fink or homebrew unless you know what you're doing).
See http://kivy.org/docs/installation/installation-macosx.html for details.
'''
Logger.critical(r)
def require(version):
'''Require can be used to check the minimum version required to run a Kivy
application. For example, you can start your application code like this::
import kivy
kivy.require('1.0.1')
If a user attempts to run your application with a version of Kivy that is
older than the specified version, an Exception is raised.
The Kivy version string is built like this::
X.Y.Z[-tag[-tagrevision]]
X is the major version
Y is the minor version
Z is the bugfixes revision
The tag is optional, but may be one of 'dev', 'alpha', or 'beta'.
The tagrevision is the revision of the tag.
.. warning::
You must not ask for a version with a tag, except -dev. Asking for a
'dev' version will just warn the user if the current Kivy version is not
a -dev, but it will never raise an exception.
You must not ask for a version with a tagrevision.
'''
def parse_version(version):
# check for tag
tag = None
tagrev = None
if '-' in version:
l = version.split('-')
if len(l) == 2:
version, tag = l
elif len(l) == 3:
version, tag, tagrev = l
else:
raise Exception('Revision format must be X.Y.Z[-tag]')
# check x y z
l = version.split('.')
if len(l) != 3:
raise Exception('Revision format must be X.Y.Z[-tag]')
return [int(x) for x in l], tag, tagrev
# user version
revision, tag, tagrev = parse_version(version)
# current version
sysrevision, systag, systagrev = parse_version(__version__)
# ensure that the required version don't contain tag, except dev
if tag not in (None, 'dev'):
raise Exception('Revision format must not have any tag except "dev"')
if tag == 'dev' and systag != 'dev':
Logger.warning('Application requested a -dev version of Kivy. '
'(You have %s, but the application requires %s)' % (
__version__, version))
# not tag rev (-alpha-1, -beta-x) allowed.
if tagrev is not None:
raise Exception('Revision format must not contain any tagrevision')
# finally, checking revision
if sysrevision < revision:
raise Exception('The version of Kivy installed on this system '
'is too old. '
'(You have %s, but the application requires %s)' % (
__version__, version))
def kivy_configure():
'''Call post-configuration of Kivy.
This function must be called if you create the window yourself.
'''
for callback in __kivy_post_configuration:
callback()
def kivy_register_post_configuration(callback):
'''Register a function to be called when kivy_configure() is called.
.. warning::
Internal use only.
'''
__kivy_post_configuration.append(callback)
def kivy_usage():
'''Kivy Usage: %s [OPTION...]::
-h, --help
Prints this help message.
-d, --debug
Shows debug log
-a, --auto-fullscreen
Force 'auto' fullscreen mode (no resolution change).
Uses your display's resolution. This is most likely what you want.
-c, --config section:key[:value]
Set a custom [section] key=value in the configuration object
-f, --fullscreen
Force running in fullscreen mode.
-k, --fake-fullscreen
Force 'fake' fullscreen mode (no window border/decoration).
Uses the resolution specified by width and height in your config.
-w, --windowed
Force running in a window.
-p, --provider id:provider[,options]
Add an input provider (eg: ccvtable1:tuio,192.168.0.1:3333).
-m mod, --module=mod
Activate a module (use "list" to get a list of available modules).
-r, --rotation
Rotate the window's contents (0, 90, 180, 270).
-s, --save
Save current Kivy configuration.
--size=640x480
Size of window geometry.
--dpi=96
Manually overload the Window DPI (for testing only.)
'''
print(kivy_usage.__doc__ % (basename(sys.argv[0])))
# Start !
if 'vim' in globals():
Logger.setLevel(level=LOG_LEVELS.get('critical'))
else:
Logger.setLevel(level=LOG_LEVELS.get('info'))
Logger.info('Kivy v%s' % (__version__))
#: Global settings options for kivy
kivy_options = {
'window': ('egl_rpi', 'pygame', 'sdl', 'x11'),
'text': ('pil', 'pygame', 'sdlttf'),
'video': ('ffmpeg', 'gstreamer', 'pyglet', 'null'),
'audio': ('pygame', 'gstreamer', 'sdl'),
'image': ('tex', 'imageio', 'dds', 'gif', 'pil', 'pygame'),
'camera': ('opencv', 'gstreamer', 'videocapture'),
'spelling': ('enchant', 'osxappkit', ),
'clipboard': ('pygame', 'dummy'), }
# Read environment
for option in kivy_options:
key = 'KIVY_%s' % option.upper()
if key in environ:
try:
if type(kivy_options[option]) in (list, tuple):
kivy_options[option] = environ[key].split(',')
else:
kivy_options[option] = environ[key].lower() in \
('true', '1', 'yes', 'yup')
except Exception:
Logger.warning('Core: Wrong value for %s environment key' % key)
Logger.exception('')
# Extract all needed path in kivy
#: Kivy directory
kivy_base_dir = dirname(sys.modules[__name__].__file__)
#: Kivy modules directory
kivy_modules_dir = environ.get('KIVY_MODULES_DIR',
join(kivy_base_dir, 'modules'))
#: Kivy extension directory
kivy_exts_dir = environ.get('KIVY_EXTS_DIR',
join(kivy_base_dir, 'extensions'))
#: Kivy data directory
kivy_data_dir = environ.get('KIVY_DATA_DIR',
join(kivy_base_dir, 'data'))
#: Kivy glsl shader directory
kivy_shader_dir = join(kivy_data_dir, 'glsl')
#: Kivy icons config path (don't remove the last '')
kivy_icons_dir = join(kivy_data_dir, 'icons', '')
#: Kivy user-home storage directory
kivy_home_dir = ''
#: Kivy configuration filename
kivy_config_fn = ''
#: Kivy user modules directory
kivy_usermodules_dir = ''
#: Kivy user extensions directory
kivy_userexts_dir = ''
# Don't go further if we generate documentation
if any(name in sys.argv[0] for name in ('sphinx-build', 'autobuild.py')):
environ['KIVY_DOC'] = '1'
if 'sphinx-build' in sys.argv[0]:
environ['KIVY_DOC_INCLUDE'] = '1'
if any('nosetests' in arg for arg in sys.argv):
environ['KIVY_UNITTEST'] = '1'
if any('pyinstaller' in arg for arg in sys.argv):
environ['KIVY_PACKAGING'] = '1'
if not environ.get('KIVY_DOC_INCLUDE'):
# Configuration management
user_home_dir = expanduser('~')
if platform == 'android':
user_home_dir = environ['ANDROID_APP_PATH']
elif platform == 'ios':
user_home_dir = join(expanduser('~'), 'Documents')
kivy_home_dir = join(user_home_dir, '.kivy')
kivy_config_fn = join(kivy_home_dir, 'config.ini')
kivy_usermodules_dir = join(kivy_home_dir, 'mods')
kivy_userexts_dir = join(kivy_home_dir, 'extensions')
icon_dir = join(kivy_home_dir, 'icon')
if 'KIVY_NO_CONFIG' not in environ:
if not exists(kivy_home_dir):
mkdir(kivy_home_dir)
if not exists(kivy_usermodules_dir):
mkdir(kivy_usermodules_dir)
if not exists(kivy_userexts_dir):
mkdir(kivy_userexts_dir)
if not exists(icon_dir):
try:
shutil.copytree(join(kivy_data_dir, 'logo'), icon_dir)
except:
Logger.exception('Error when copying logo directory')
# configuration
from kivy.config import Config
# Set level of logger
level = LOG_LEVELS.get(Config.get('kivy', 'log_level'))
Logger.setLevel(level=level)
Logger.setLevel(level=LOG_LEVELS.get('debug'))
# Can be overrided in command line
if 'KIVY_UNITTEST' not in environ and 'KIVY_PACKAGING' not in environ:
# save sys argv, otherwize, gstreamer use it and display help..
sys_argv = sys.argv
sys.argv = sys.argv[:1]
try:
opts, args = getopt(sys_argv[1:], 'hp:fkawFem:sr:dc:',
['help', 'fullscreen', 'windowed', 'fps', 'event',
'module=', 'save', 'fake-fullscreen', 'auto-fullscreen',
'display=', 'size=', 'rotate=', 'config=', 'debug',
'dpi='])
except GetoptError as err:
Logger.error('Core: %s' % str(err))
kivy_usage()
sys.exit(2)
# set argv to the non-read args
sys.argv = sys_argv[0:1] + args
else:
opts = []
args = []
need_save = False
for opt, arg in opts:
if opt in ('-h', '--help'):
kivy_usage()
sys.exit(0)
elif opt in ('-p', '--provider'):
try:
pid, args = arg.split(':', 1)
Config.set('input', pid, args)
except ValueError:
# when we are doing an executable on macosx with pyinstaller,
# they are passing information with -p. so it will conflict with
# our current -p option. since the format is not the same, just
# avoid it.
pass
elif opt in ('-a', '--auto-fullscreen'):
Config.set('graphics', 'fullscreen', 'auto')
elif opt in ('-c', '--config'):
l = arg.split(':', 2)
if len(l) == 2:
Config.set(l[0], l[1], '')
elif len(l) == 3:
Config.set(l[0], l[1], l[2])
else:
raise Exception('Invalid --config value')
if l[0] == 'kivy' and l[1] == 'log_level':
level = LOG_LEVELS.get(Config.get('kivy', 'log_level'))
Logger.setLevel(level=level)
elif opt in ('-k', '--fake-fullscreen'):
Config.set('graphics', 'fullscreen', 'fake')
elif opt in ('-f', '--fullscreen'):
Config.set('graphics', 'fullscreen', '1')
elif opt in ('-w', '--windowed'):
Config.set('graphics', 'fullscreen', '0')
elif opt in ('--size', ):
w, h = str(arg).split('x')
Config.set('graphics', 'width', w)
Config.set('graphics', 'height', h)
elif opt in ('--display', ):
Config.set('graphics', 'display', str(arg))
elif opt in ('-m', '--module'):
if str(arg) == 'list':
from kivy.modules import Modules
Modules.usage_list()
sys.exit(0)
args = arg.split(':', 1)
if len(args) == 1:
args += ['']
Config.set('modules', args[0], args[1])
elif opt in ('-s', '--save'):
need_save = True
elif opt in ('-r', '--rotation'):
Config.set('graphics', 'rotation', arg)
elif opt in ('-d', '--debug'):
level = LOG_LEVELS.get('debug')
Logger.setLevel(level=level)
elif opt == '--dpi':
environ['KIVY_DPI'] = arg
if need_save and 'KIVY_NO_CONFIG' not in environ:
try:
with open(kivy_config_fn, 'w') as fd:
Config.write(fd)
except Exception as e:
Logger.exception('Core: error while saving default'
'configuration file:', str(e))
Logger.info('Core: Kivy configuration saved.')
sys.exit(0)
# configure all activated modules
from kivy.modules import Modules
Modules.configure()
# android hooks: force fullscreen and add android touch input provider
# if platform in ('android', 'ios'):
# from kivy.config import Config
# Config.set('graphics', 'fullscreen', 'auto')
# Config.remove_section('input')
# Config.add_section('input')
#
# if platform == 'android':
# Config.set('input', 'androidtouch', 'android')
|
|
# Skytraq Venus module
import serial
import time
import datetime
import math
from ftplib import FTP
import struct
class Venus6:
"Venus6 GPS object"
MSG_TYPE_SOFT_VERSION_Q = 0x02
MSG_TYPE_SOFT_CRC_Q = 0x03
MSG_TYPE_CONF_SERIAL = 0x05
MSG_TYPE_LOG_STATUS_Q = 0x17
MSG_TYPE_LOG_CLEAR = 0x19
MSG_TYPE_LOG_READ_BATCH = 0x1D
MSG_TYPE_EPHEMERIS_GET = 0x30
MSG_TYPE_WAAS_SET = 0x37
MSG_TYPE_WAAS_GET = 0x38
MSG_TYPE_NAV_MODE_SET = 0x3C
MSG_TYPE_NAV_MODE_GET = 0x3D
MSG_TYPE_SOFT_VERSION_R = 0x80
MSG_TYPE_SOFT_CRC_R = 0x81
MSG_TYPE_ACK = 0x83
MSG_TYPE_NACK = 0x84
MSG_TYPE_LOG_STATUS_R = 0x94
MSG_TYPE_EPHEMERIS_GET_R = 0xB1
MSG_TYPE_WAAS_GET_R = 0xB3
MSG_TYPE_NAV_MODE_GET_R = 0xB5
def __init__(self, serialport, baudrate, debug=False):
if baudrate == None:
baudrate = 9600
self.serial = serial.Serial(serialport, baudrate, timeout=5)
self.debug = debug
def __del__(self):
if hasattr(self, 'serial'):
self.serial.close()
def readline(self):
"read a response line from gps host (terminated by \r\n)"
prev = 0
line = bytearray()
while True:
c = self.serial.read()
line += c
if c == b'\n' and prev == b'\r':
return line
prev = c
def isNMEA(buf):
"return if line content is NMEA cf: http://en.wikipedia.org/wiki/NMEA_0183"
return buf[0] == ord('$')
def setSerialSpeed(self, speed):
"Set the host serial port speed"
speedIdx = -1
if speed == 4800:
speedIdx = 0
elif speed == 9600:
speedIdx = 1
elif speed == 19200:
speedIdx = 2
elif speed == 38400:
speedIdx = 3
elif speed == 57600:
speedIdx = 4
elif speed == 115200:
speedIdx = 5
if speedIdx < 0:
raise Exception("invalid speed", speed)
self.sendCmd(self.MSG_TYPE_CONF_SERIAL,
bytearray([
0x00, # COM1
speedIdx,
0x00 # only update sram
]))
print("speed changed to", speed)
self.serial.flush()
# update baudrate
self.serial.baudrate = speed
print(self.readline())
def guessSerialSpeed(self):
"Attempt to guess host serial port speed"
for speed in [ 9600, 115200, 4800, 19200, 38400, 57600 ]:
self.serial.baudrate = speed
try:
rep = self.getSoftwareVersion(0)
if self.debug:
print("got software version", rep, "at speed", speed)
return speed
except Exception as e:
print("failed to get soft version at speed", speed, e)
raise Exception("failed to guess serial speed")
def readResponse(self, expectedRespId=None, expectedLen=0, maxAttempts=256):
"read a response from gps, discarding NMEA output"
attempt = 0
prev = 0
response = bytearray()
tmp = bytearray()
# look for start of sequence
while attempt < maxAttempts:
c = self.serial.read()
if c[0] == 0xA1 and prev == 0xA0:
break
tmp += c
prev = c[0]
attempt += 1
if attempt >= maxAttempts:
raise Exception("failed to get response after reading %d bytes" % attempt, tmp)
# read length
l = self.serial.read(2)
payloadLen = l[0] << 8 | l[1]
msgId = self.serial.read()[0]
payload = self.serial.read(payloadLen - 1)
# read checksum
checksum = self.serial.read()[0]
# compare checksum
check = 0 ^ msgId
for b in payload:
check ^= b
if check != checksum:
raise Exception("received msg with invalid checksum", check, checksum)
# read end of sequence
eos = self.serial.read(2)
if eos != b"\r\n":
raise Exception("invalid end of sequence", eos)
if self.debug:
print("RX <-", payloadLen, msgId, payload)
if expectedRespId and msgId != expectedRespId:
raise Exception("unexpected reponse from gps", msgId,
"expected", expectedRespId)
if expectedLen and len(payload) != expectedLen:
raise Exception("unexpected reponse length",
len(payload), "expected", expectedLen)
return msgId, payload
def sendCmd(self, msgId, payload, maxAttempts=5, expectAck=True):
"sends a binary message to venus gps"
msg = bytearray([0xA0, 0xA1]) # start of sequence
payloadLen = 1 + len(payload)
checksum = 0
# payload length
msg.append(payloadLen >> 8)
msg.append(payloadLen & 0xFF)
# msg_id
msg.append(msgId)
checksum ^= msgId
# payload
msg += payload
for b in payload:
checksum ^= b
# checksum
msg.append(checksum)
# end of sequence
msg += bytearray([0x0D, 0x0A])
if self.debug:
print("TX ->", msg)
self.serial.write(msg)
# check ACK
if expectAck:
i = 0
while i < maxAttempts:
repId, repPayload = self.readResponse()
if repId == self.MSG_TYPE_NACK:
if repPayload[0] == msgId:
raise Exception("got NACK from gps")
elif repId == self.MSG_TYPE_ACK:
if repPayload[0] == msgId:
# ok got ack
break
elif self.debug:
print("received unexpected", repId, repPayload)
if i >= maxAttempts:
raise Exception("failed to get ack for query")
def getSoftwareVersion(self, versionType):
"Get running software version on host"
self.sendCmd(self.MSG_TYPE_SOFT_VERSION_Q, bytearray([versionType]))
# read response
msgId, payload = self.readResponse(self.MSG_TYPE_SOFT_VERSION_R, 13)
versionType = payload[0]
kernelVersion = "%d.%d.%d" % (payload[1] << 8 | payload[2],
payload[3], payload[4])
odmVersion = "%d.%d.%d" % (payload[5] << 8 | payload[6],
payload[7], payload[8])
revision = "%d/%d/%d" % (payload[9] << 8 | payload[10],
payload[11], payload[12])
return (versionType, kernelVersion, odmVersion, revision)
def getSoftwareCRC(self):
"Get CRC of software running on host"
self.sendCmd(self.MSG_TYPE_SOFT_CRC_Q, bytearray([0x01]))
# read response
msgId, payload = self.readResponse(self.MSG_TYPE_SOFT_CRC_R, 3)
return (payload[1:3])
def getLogStatus(self):
"Get log buffer status / NOTE: number transmitted in little endian"
self.sendCmd(self.MSG_TYPE_LOG_STATUS_Q, bytearray())
# read response
msgId, payload = self.readResponse(self.MSG_TYPE_LOG_STATUS_R)
if (len(payload) < 34):
raise Exception("unexpected reponse length for status log",
len(payload))
offset = 0
log_wr_ptr = (payload[offset+3] << 24 | payload[offset+2] << 16
| payload[offset+1] << 8 | payload[offset])
offset += 4
sector_left = payload[offset+1] << 8 | payload[offset]
offset += 2
total_sector = payload[offset+1] << 8 | payload[offset]
offset += 2
max_time = (payload[offset+3] << 24 | payload[offset+2] << 16
| payload[offset+1] << 8 | payload[offset])
offset += 4
min_time = (payload[offset+3] << 24 | payload[offset+2] << 16
| payload[offset+1] << 8 | payload[offset])
offset += 4
max_distance = (payload[offset+3] << 24 | payload[offset+2] << 16
| payload[offset+1] << 8 | payload[offset])
offset += 4
min_distance = (payload[offset+3] << 24 | payload[offset+2] << 16
| payload[offset+1] << 8 | payload[offset])
offset += 4
max_speed = (payload[offset+3] << 24 | payload[offset+2] << 16
| payload[offset+1] << 8 | payload[offset])
offset += 4
min_speed = (payload[offset+3] << 24 | payload[offset+2] << 16
| payload[offset+1] << 8 | payload[offset])
offset += 4
data_log_enable = payload[offset]
offset += 1
log_fifo_mode = payload[offset]
offset += 1
return (log_wr_ptr, sector_left, total_sector,
min_time, max_time, min_distance, max_distance,
min_speed, max_speed, data_log_enable, log_fifo_mode)
def readLogResponse(self, sector, length):
"read a log response from gps"
# read response
response = self.serial.read(length)
# compute data checksum
mysum = 0
for i in range(length):
mysum ^= response[i]
end = self.serial.read(13)
if (end != b"END\x00CHECKSUM="):
print("unexpected end", end, response)
while True:
print(self.readline())
raise Exception("unexpected response, check device page size", response, end)
checksum = self.serial.read(1)[0]
if checksum != mysum:
raise Exception("checksum mismatch: 0x%x vs 0x%x" % (checksum, mysum))
# check sector read
tmp = self.serial.read(2)
sector_verif = tmp[1] << 8 | tmp[0]
if sector_verif != sector:
raise Exception("sector mismatch %d vs %d" % (sector_verif, sector))
# discard trailing crap
self.serial.read(3)
# wait for next line before attempting to issue
# another command
self.readline()
return response
def readLog(self, sector, nb_sector=1, maxAttempts=3):
"Read a log sector form gps"
attempt = 0
while True:
try:
self.sendCmd(self.MSG_TYPE_LOG_READ_BATCH,
bytearray([
# start sector
(sector >> 16) & 0xFF,
sector & 0xFF,
# nb sector
(nb_sector >> 16) & 0xFF,
nb_sector & 0xFF
])
)
data = self.readLogResponse(sector, nb_sector * 4096) # sector length is 4096 bytes
break
except Exception as e:
print("FAILED TO READ SECTOR", e)
attempt += 1
if attempt > maxAttempts:
raise Exception("failed to read sector")
return data
@staticmethod
def __ecef_to_geo(x, y, z):
# see: http://fr.wikipedia.org/wiki/WGS_84
# see: http://microem.ru/files/2012/08/GPS.G1-X-00006.pdf
a = 6378137.0 # earth semimajor axis in meters
e = 0.081819190842622 # excentricity
e_2 = 0.0066943799901414 # e ^ 2
b = 6356752.314245179497563967 # earth semi minor axis
ep = 0.08209443794969568 # first eccentricity sqrt((a^2-b^2)/b^2)
ep_2 = 0.006739496742276433 # ep ^ 2
p = math.sqrt(math.pow(x, 2) + math.pow(y, 2))
th = math.atan2(a*z, b*p)
lon = math.atan2(y,x)
lat = math.atan2(
z + ep_2 * b * math.pow(math.sin(th), 3),
p - e_2 * a * math.pow(math.cos(th), 3)
)
N = a / math.sqrt(1 - e_2 * math.pow(math.sin(lat), 2))
alt = p / math.cos(lat) - N
return (lon * 180 / math.pi, lat * 180 / math.pi, alt)
@staticmethod
def __gps_time_to_timestamp(week_number, time_of_week):
refDate = datetime.datetime(1980, 1, 6, tzinfo=datetime.timezone.utc)
date = refDate + datetime.timedelta(weeks=week_number, seconds=time_of_week)
return date
@staticmethod
def __decodeFull(data, offset):
speed = (data[offset] & 0x03) << 8 | data[offset + 1]
wn = data[offset + 3] | ((data[offset + 2] & 0x03) << 8)
wn += 1024 # counter wrapped 1 time
tow = (((data[offset + 2] >> 4) & 0x0F)
| (data[offset + 5] << 4)
| (data[offset + 4] << 12))
offset += 6
(d1, d2,) = struct.unpack_from(">Hh", data, offset)
ecef_x = d2 << 16 | d1
offset += 4
(d1, d2,) = struct.unpack_from(">Hh", data, offset)
ecef_y = d2 << 16 | d1
offset += 4
(d1, d2,) = struct.unpack_from(">Hh", data, offset)
ecef_z = d2 << 16 | d1
offset += 4
# print(">> speed %d km/h" % speed)
# print(">> wn %d" % wn)
# print(">> tow %d" % tow)
# print(">> ecef_x %d" % ecef_x)
# print(">> ecef_y %d" % ecef_y)
# print(">> ecef_z %d" % ecef_z)
return (speed, wn, tow, ecef_x, ecef_y, ecef_z)
@staticmethod
def __decodeCompact(data, offset):
speed = (data[offset] & 0x03) << 8 | data[offset + 1]
d_tow = data[offset + 2] << 8 | data[offset + 3]
d_x = data[offset + 4] << 2 | (data[offset + 5] >> 6) & 0x03
d_y = (data[offset + 5] & 0x3F) | (((data[offset + 6] >> 4) & 0x0F) << 6)
d_z = ((data[offset + 6] & 0x03) << 8) | data[offset + 7]
# convert to signed
if (d_x >= 512):
d_x = 511 - d_x
if (d_y >= 512):
d_y = 511 - d_y
if (d_z >= 512):
d_z = 511 - d_z
return (speed, d_tow, d_x, d_y, d_z)
@staticmethod
def decodeLog(data):
entries = []
offset = 0
length = len(data)
while offset < length:
b = data[offset]
entryType = b >> 5
if entryType == 2 or entryType == 3:
# full fix
(speed, wn, tow, ecef_x, ecef_y, ecef_z) = Venus6.__decodeFull(data, offset)
# convert ecef to geodetic system
(lon, lat, alt) = Venus6.__ecef_to_geo(ecef_x, ecef_y, ecef_z)
# convert time
date = Venus6.__gps_time_to_timestamp(wn, tow)
# add entry
entries.append([date, lat, lon, alt, speed])
offset += 18
elif entryType == 4:
# read compact
(speed, d_tow, d_x, d_y, d_z) = Venus6.__decodeCompact(data, offset)
tow += d_tow
ecef_x += d_x
ecef_y += d_y
ecef_z += d_z
# convert ecef to geodetic system
(lon, lat, alt) = Venus6.__ecef_to_geo(ecef_x, ecef_y, ecef_z)
# convert time
date = Venus6.__gps_time_to_timestamp(wn, tow)
# add entry
entries.append([date, lat, lon, alt, speed])
offset += 8
elif entryType == 7:
# empty, skip
offset += 2
else:
print("WARN: unknown entry type %d", entryType)
offset += 1
return entries
def clearLogs(self):
"clear all gps logs"
self.sendCmd(self.MSG_TYPE_LOG_CLEAR, bytearray())
def getWaasStatus(self):
"get waas status from host"
self.sendCmd(self.MSG_TYPE_WAAS_GET, bytearray())
# read response
msgId, payload = self.readResponse(self.MSG_TYPE_WAAS_GET_R, 1)
return (payload[0])
def setWaasStatus(self, enabled, persist=True):
"enable/disable waas"
self.sendCmd(self.MSG_TYPE_WAAS_SET, bytearray([
1 if enabled else 0,
1 if persist else 0,
]))
# give time to reboot
time.sleep(1)
# drop received data
self.serial.flushInput()
def getNavigationMode(self):
"get navigation from host"
self.sendCmd(self.MSG_TYPE_NAV_MODE_GET, bytearray())
# read response
msgId, payload = self.readResponse(self.MSG_TYPE_NAV_MODE_GET_R, 1)
return "pedestrian" if payload[0] else "car"
def setNavigationMode(self, pedestrian, persist=True):
"set navigation mode"
self.sendCmd(self.MSG_TYPE_NAV_MODE_SET, bytearray([
1 if pedestrian else 0,
1 if persist else 0,
]))
# give time to reboot
time.sleep(1)
# drop received data
self.serial.flushInput()
def getEphemeris(self, sv):
"get ephemeris data from host"
self.sendCmd(self.MSG_TYPE_EPHEMERIS_GET, bytearray([sv]))
# read response
msgId, payload = self.readResponse(self.MSG_TYPE_EPHEMERIS_GET_R, 86)
return payload
def updateEphemeris(self):
"update current gps ephemeris"
# download ephemeris file
if self.debug:
print("Start downloading ephemeris file")
with FTP('60.250.205.31') as ftp:
ftp.login('skytraq', 'skytraq')
ftp.cwd('ephemeris')
eph_data = bytearray()
ftp.retrbinary('RETR Eph_4.dat', eph_data.extend)
if self.debug:
print('got data', len(eph_data), eph_data)
# send content to host
if self.debug:
print("Start uploading data to host")
self.serial.write(eph_data)
if self.debug:
print("Done")
# give time to process
time.sleep(1)
# drop received data
self.serial.flushInput()
|
|
import mock
from django import test
from django.conf import settings
from django.http import Http404
from cradmin_legacy import cradmin_testhelpers
from cradmin_legacy.crinstance import reverse_cradmin_url
from model_bakery import baker
from devilry.apps.core.models import Assignment
from devilry.devilry_admin.views.assignment.examiners import overview
class TestOverview(test.TestCase, cradmin_testhelpers.TestCaseMixin):
viewclass = overview.Overview
def __mockinstance_with_devilryrole(self, devilryrole):
mockinstance = mock.MagicMock()
mockinstance.get_devilryrole_for_requestuser.return_value = devilryrole
return mockinstance
def test_title(self):
testassignment = baker.make('core.Assignment', long_name='Test Assignment')
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self.__mockinstance_with_devilryrole('departmentadmin'))
self.assertIn(
'Examiners on Test Assignment',
mockresponse.selector.one('title').alltext_normalized)
def test_h1(self):
testassignment = baker.make('core.Assignment', long_name='Test Assignment')
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self.__mockinstance_with_devilryrole('departmentadmin'))
self.assertEqual(
'Examiners on Test Assignment',
mockresponse.selector.one('h1').alltext_normalized)
def test_buttonbar_sanity(self):
testassignment = baker.make('core.Assignment')
baker.make('core.RelatedExaminer', period=testassignment.period)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self.__mockinstance_with_devilryrole('departmentadmin'))
self.assertEqual(
1,
mockresponse.selector.count(
'#devilry_admin_assignment_examiners_overview_buttonbar .btn'))
def test_buttonbar_organize_examiners_link(self):
testassignment = baker.make('core.Assignment')
baker.make('core.RelatedExaminer', period=testassignment.period)
mock_cradmin_instance = self.__mockinstance_with_devilryrole('departmentadmin')
def mock_reverse_url(appname, viewname, **kwargs):
return '/{}/{}'.format(appname, viewname)
mock_cradmin_instance.reverse_url = mock_reverse_url
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=mock_cradmin_instance)
self.assertEqual(
'/bulk_organize_examiners/INDEX',
mockresponse.selector
.one('#devilry_admin_assignment_examiners_overview_button_bulk_organize_examiners')['href'])
def test_buttonbar_organize_examiners_text(self):
testassignment = baker.make('core.Assignment')
baker.make('core.RelatedExaminer', period=testassignment.period)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self.__mockinstance_with_devilryrole('departmentadmin'))
self.assertEqual(
'Bulk-organize examiners',
mockresponse.selector
.one('#devilry_admin_assignment_examiners_overview_button_bulk_organize_examiners')
.alltext_normalized)
def test_examinerlist_no_relatedexaminers_sanity(self):
testuser = baker.make(settings.AUTH_USER_MODEL)
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start')
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self.__mockinstance_with_devilryrole('departmentadmin'),
requestuser=testuser)
self.assertTrue(mockresponse.selector.exists(
'#devilry_admin_assignment_examiners_overview_no_relatedexaminers'))
self.assertFalse(mockresponse.selector.exists('#cradmin_legacy_listbuilderview_listwrapper'))
def test_examinerlist_no_relatedexaminers_text(self):
testuser = baker.make(settings.AUTH_USER_MODEL)
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start',
parentnode__parentnode__short_name='testsubject',
parentnode__short_name='testperiod')
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self.__mockinstance_with_devilryrole('departmentadmin'),
requestuser=testuser)
self.assertEqual(
'You have no users registered as examiner for testsubject.testperiod. You need to '
'add users as examiners on the semester page for the course before you can use '
'them as examiners for assignments.',
mockresponse.selector.one(
'#devilry_admin_assignment_examiners_overview_no_relatedexaminers p').alltext_normalized)
self.assertEqual(
'Add examiners',
mockresponse.selector.one(
'#devilry_admin_assignment_examiners_overview_no_relatedexaminers a').alltext_normalized)
def test_examinerlist_no_relatedexaminers_url(self):
testuser = baker.make(settings.AUTH_USER_MODEL)
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start')
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self.__mockinstance_with_devilryrole('departmentadmin'),
requestuser=testuser)
self.assertEqual(
reverse_cradmin_url(
instanceid='devilry_admin_periodadmin',
appname='examiners',
roleid=testassignment.period.id),
mockresponse.selector.one(
'#devilry_admin_assignment_examiners_overview_no_relatedexaminers a')['href'])
def test_exclude_inactive_relatedexaminers(self):
testuser = baker.make(settings.AUTH_USER_MODEL)
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start')
baker.make('core.RelatedExaminer', period=testassignment.period, active=False)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self.__mockinstance_with_devilryrole('departmentadmin'),
requestuser=testuser)
self.assertFalse(mockresponse.selector.exists('#cradmin_legacy_listbuilderview_listwrapper'))
def test_has_relatedexaminers_sanity(self):
testuser = baker.make(settings.AUTH_USER_MODEL)
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start')
baker.make('core.RelatedExaminer', period=testassignment.period, _quantity=5)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self.__mockinstance_with_devilryrole('departmentadmin'),
requestuser=testuser)
self.assertTrue(mockresponse.selector.exists('#cradmin_legacy_listbuilderview_listwrapper'))
self.assertEqual(5, mockresponse.selector.count('.cradmin-legacy-listbuilder-itemvalue'))
self.assertFalse(mockresponse.selector.exists(
'#devilry_admin_assignment_examiners_overview_no_relatedexaminers'))
def test_listbuilderlist_footer_text(self):
testuser = baker.make(settings.AUTH_USER_MODEL)
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start',
parentnode__parentnode__short_name='testsubject',
parentnode__short_name='testperiod')
baker.make('core.RelatedExaminer', period=testassignment.period)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self.__mockinstance_with_devilryrole('departmentadmin'),
requestuser=testuser)
self.assertEqual(
'Only users registered as examiner for testsubject.testperiod is available '
'as examiners for assignments. Add more examiners.',
mockresponse.selector.one(
'.devilry-listbuilderlist-footer').alltext_normalized)
def test_listbuilderlist_footer_url(self):
testuser = baker.make(settings.AUTH_USER_MODEL)
testassignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start')
baker.make('core.RelatedExaminer', period=testassignment.period)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_instance=self.__mockinstance_with_devilryrole('departmentadmin'),
requestuser=testuser)
self.assertEqual(
reverse_cradmin_url(
instanceid='devilry_admin_periodadmin',
appname='examiners',
roleid=testassignment.period.id),
mockresponse.selector.one(
'.devilry-listbuilderlist-footer a')['href'])
def test_students_without_examiners_warning(self):
testassignment = baker.make('core.Assignment')
baker.make('core.Candidate', assignment_group__parentnode=testassignment)
baker.make('core.RelatedExaminer', period=testassignment.period)
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=testassignment)
self.assertTrue(mockresponse.selector.exists('#id_devilry_admin_assignment_examineroverview'))
self.assertEqual(
mockresponse.selector.one('#id_devilry_admin_assignment_examineroverview').alltext_normalized,
'warning: There are still students on the assignment with no examiners assigned to them')
def test_students_all_students_are_assigned_examiners_warning_not_rendered(self):
testassignment = baker.make('core.Assignment')
assignment_group = baker.make('core.AssignmentGroup', parentnode=testassignment)
baker.make('core.Candidate', assignment_group=assignment_group)
baker.make('core.Examiner', related_examiner__period=testassignment.parentnode,
assignmentgroup=assignment_group)
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=testassignment)
self.assertFalse(mockresponse.selector.exists('#id_devilry_admin_assignment_examineroverview'))
#
#
# Anonymization tests
#
#
def test_anonymizationmode_fully_anonymous_subjectadmin_404(self):
testuser = baker.make(settings.AUTH_USER_MODEL)
testgroup = baker.make('core.AssignmentGroup',
parentnode=baker.make_recipe(
'devilry.apps.core.assignment_activeperiod_start',
anonymizationmode=Assignment.ANONYMIZATIONMODE_FULLY_ANONYMOUS))
with self.assertRaisesMessage(Http404, 'Only department admins have permission to edit examiners '
'for fully anonymous assignments.'):
self.mock_getrequest(
cradmin_role=testgroup.assignment,
cradmin_instance=self.__mockinstance_with_devilryrole('subjectadmin'),
requestuser=testuser)
def test_anonymizationmode_fully_anonymous_departmentadmin_no_404(self):
testuser = baker.make(settings.AUTH_USER_MODEL)
testgroup = baker.make('core.AssignmentGroup',
parentnode=baker.make_recipe(
'devilry.apps.core.assignment_activeperiod_start',
anonymizationmode=Assignment.ANONYMIZATIONMODE_FULLY_ANONYMOUS))
self.mock_http200_getrequest_htmls(
cradmin_role=testgroup.assignment,
cradmin_instance=self.__mockinstance_with_devilryrole('departmentadmin'),
requestuser=testuser) # No Http404 exception raised!
def test_anonymizationmode_semi_anonymous_subjectadmin_no_404(self):
testuser = baker.make(settings.AUTH_USER_MODEL)
testgroup = baker.make('core.AssignmentGroup',
parentnode=baker.make_recipe(
'devilry.apps.core.assignment_activeperiod_start',
anonymizationmode=Assignment.ANONYMIZATIONMODE_SEMI_ANONYMOUS))
self.mock_http200_getrequest_htmls(
cradmin_role=testgroup.assignment,
cradmin_instance=self.__mockinstance_with_devilryrole('subjectadmin'),
requestuser=testuser) # No Http404 exception raised!
|
|
import ray
import os
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
import numpy as np
from torch.autograd import Variable
from torch.nn import functional as F
from scipy.stats import entropy
import matplotlib.pyplot as plt
import matplotlib.animation as animation
# Training parameters
dataroot = "~/data"
workers = 2
batch_size = 64
image_size = 32
# Number of channels in the training images. For color images this is 3
nc = 1
# Size of z latent vector (i.e. size of generator input)
nz = 100
# Size of feature maps in generator
ngf = 32
# Size of feature maps in discriminator
ndf = 32
# Beta1 hyperparam for Adam optimizers
beta1 = 0.5
# iterations of actual training in each Trainable _train
train_iterations_per_step = 5
MODEL_PATH = os.path.expanduser("~/.ray/models/mnist_cnn.pt")
def get_data_loader():
dataset = dset.MNIST(
root=dataroot,
download=True,
transform=transforms.Compose([
transforms.Resize(image_size),
transforms.ToTensor(),
transforms.Normalize((0.5, ), (0.5, )),
]))
# Create the dataloader
dataloader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, shuffle=True, num_workers=workers)
return dataloader
# __GANmodel_begin__
# custom weights initialization called on netG and netD
def weights_init(m):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find("BatchNorm") != -1:
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0)
# Generator Code
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
self.main = nn.Sequential(
# input is Z, going into a convolution
nn.ConvTranspose2d(nz, ngf * 4, 4, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(True),
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True),
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(True),
nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),
nn.Tanh())
def forward(self, input):
return self.main(input)
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.main = nn.Sequential(
nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 2), nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4), nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(ndf * 4, 1, 4, 1, 0, bias=False), nn.Sigmoid())
def forward(self, input):
return self.main(input)
# __GANmodel_end__
# __INCEPTION_SCORE_begin__
class Net(nn.Module):
"""
LeNet for MNist classification, used for inception_score
"""
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def inception_score(imgs, mnist_model_ref, batch_size=32, splits=1):
N = len(imgs)
dtype = torch.FloatTensor
dataloader = torch.utils.data.DataLoader(imgs, batch_size=batch_size)
cm = ray.get(mnist_model_ref) # Get the mnist model from Ray object store.
up = nn.Upsample(size=(28, 28), mode="bilinear").type(dtype)
def get_pred(x):
x = up(x)
x = cm(x)
return F.softmax(x).data.cpu().numpy()
preds = np.zeros((N, 10))
for i, batch in enumerate(dataloader, 0):
batch = batch.type(dtype)
batchv = Variable(batch)
batch_size_i = batch.size()[0]
preds[i * batch_size:i * batch_size + batch_size_i] = get_pred(batchv)
# Now compute the mean kl-div
split_scores = []
for k in range(splits):
part = preds[k * (N // splits):(k + 1) * (N // splits), :]
py = np.mean(part, axis=0)
scores = []
for i in range(part.shape[0]):
pyx = part[i, :]
scores.append(entropy(pyx, py))
split_scores.append(np.exp(np.mean(scores)))
return np.mean(split_scores), np.std(split_scores)
# __INCEPTION_SCORE_end__
def train(netD, netG, optimG, optimD, criterion, dataloader, iteration, device,
mnist_model_ref):
real_label = 1
fake_label = 0
for i, data in enumerate(dataloader, 0):
if i >= train_iterations_per_step:
break
netD.zero_grad()
real_cpu = data[0].to(device)
b_size = real_cpu.size(0)
label = torch.full(
(b_size, ), real_label, dtype=torch.float, device=device)
output = netD(real_cpu).view(-1)
errD_real = criterion(output, label)
errD_real.backward()
D_x = output.mean().item()
noise = torch.randn(b_size, nz, 1, 1, device=device)
fake = netG(noise)
label.fill_(fake_label)
output = netD(fake.detach()).view(-1)
errD_fake = criterion(output, label)
errD_fake.backward()
D_G_z1 = output.mean().item()
errD = errD_real + errD_fake
optimD.step()
netG.zero_grad()
label.fill_(real_label)
output = netD(fake).view(-1)
errG = criterion(output, label)
errG.backward()
D_G_z2 = output.mean().item()
optimG.step()
is_score, is_std = inception_score(fake, mnist_model_ref)
# Output training stats
if iteration % 10 == 0:
print("[%d/%d]\tLoss_D: %.4f\tLoss_G: %.4f\tD(x): %.4f\tD(G(z))"
": %.4f / %.4f \tInception score: %.4f" %
(iteration, len(dataloader), errD.item(), errG.item(), D_x,
D_G_z1, D_G_z2, is_score))
return errG.item(), errD.item(), is_score
def plot_images(dataloader):
# Plot some training images
real_batch = next(iter(dataloader))
plt.figure(figsize=(8, 8))
plt.axis("off")
plt.title("Original Images")
plt.imshow(
np.transpose(
vutils.make_grid(real_batch[0][:64], padding=2,
normalize=True).cpu(), (1, 2, 0)))
plt.show()
def demo_gan(checkpoint_paths):
img_list = []
fixed_noise = torch.randn(64, nz, 1, 1)
for netG_path in checkpoint_paths:
loadedG = Generator()
loadedG.load_state_dict(torch.load(netG_path)["netGmodel"])
with torch.no_grad():
fake = loadedG(fixed_noise).detach().cpu()
img_list.append(vutils.make_grid(fake, padding=2, normalize=True))
fig = plt.figure(figsize=(8, 8))
plt.axis("off")
ims = [[plt.imshow(np.transpose(i, (1, 2, 0)), animated=True)]
for i in img_list]
ani = animation.ArtistAnimation(
fig, ims, interval=1000, repeat_delay=1000, blit=True)
ani.save("./generated.gif", writer="imagemagick", dpi=72)
plt.show()
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
import sys
import core.bonds
import core.control
from gui.tabs.file_tab import FileTabDock
from gui.tabs.view_tab import ViewTabDock
from gui.tabs.image_video_tab import ImageVideoTabDock
from gui.tabs.statistics_tab import StatisticsTabDock
from gui.tabs.log_tab import LogTabDock
from PyQt5 import QtCore, QtGui, QtWidgets
from gui.dialogs.settings_dialog import SettingsDialog
from gui.dialogs.about_dialog import AboutDialog
from gui.gl_widget import UpdateGLEvent
from util import message
from gui.gl_stack import GLStack
from _version import __version__
import functools
import os.path
from config.configuration import config
WEBSITE_URL = 'https://pgi-jcns.fz-juelich.de/portal/pages/pymoldyn-doc.html'
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, control):
QtWidgets.QMainWindow.__init__(self, None)
self.control = control
self.center = CentralWidget(self)
self.file_dock = FileTabDock(self)
self.view_dock = ViewTabDock(self)
self.view_dock.setVisible(False)
self.image_video_dock = ImageVideoTabDock(self)
self.statistics_dock = StatisticsTabDock(self)
self.log_dock = LogTabDock(self)
self._error_wrapper = None
p = self.file_dock.file_tab.progress_dialog
self.set_output_callbacks(p.progress, p.print_step, p.calculation_finished,
self.show_error, self.log_dock.append_log)
self.docks = []
self.shown_dataset = None
self.setTabPosition(QtCore.Qt.RightDockWidgetArea, QtWidgets.QTabWidget.North)
self.setCentralWidget(self.center)
for dock in (self.file_dock, self.view_dock, self.image_video_dock, self.statistics_dock, self.log_dock):
self.addDockWidget(QtCore.Qt.RightDockWidgetArea,
dock, QtCore.Qt.Vertical)
self.docks.append(dock)
for dock in (self.view_dock, self.statistics_dock):
self.tabifyDockWidget(self.file_dock, dock)
# this variable is used to open the FileDialog in the propper path
self.file_dock.file_tab.most_recent_path = "~"
self.menubar = None
self.file_menu = None
self.recent_files_submenu = None
self.init_menu()
self.setWindowTitle('pyMolDyn v%s' % __version__)
self.setWindowIcon(QtGui.QIcon('icon.png'))
self.show()
# get Dock Widgets TabBar and set the first one to current
self.file_dock.show()
self.file_dock.raise_()
# another workaround to do the same
# tabbars = self.findChildren(QtWidgets.QTabBar)
# tabbars[0].setCurrentIndex(0)
def init_menu(self):
open_action = QtWidgets.QAction('&Open dataset', self)
open_action.setShortcut('Ctrl+O')
open_action.triggered.connect(self.file_dock.file_tab.open_file_dialog)
settings_action = QtWidgets.QAction('&Settings', self)
settings_action.setShortcut('Ctrl+I')
settings_action.triggered.connect(self.show_settings)
export_submenu = QtWidgets.QMenu("&Export", self)
export_bonds_action = QtWidgets.QAction('Export &Bonds', self)
export_bonds_action.setShortcut('Ctrl+1')
export_bonds_action.triggered.connect(self.wrapper_export_bonds)
export_bond_angles_action = QtWidgets.QAction('Export Bond &Angles', self)
export_bond_angles_action.setShortcut('Ctrl+2')
export_bond_angles_action.triggered.connect(self.wrapper_export_bond_angles)
export_bond_dihedral_angles_action = QtWidgets.QAction('Export Bond &Dihedral Angles', self)
export_bond_dihedral_angles_action.setShortcut('Ctrl+3')
export_bond_dihedral_angles_action.triggered.connect(self.wrapper_export_bond_dihedral_angles)
export_domains_action = QtWidgets.QAction('Export Cavity Information (domains)', self)
export_domains_action.setShortcut('Ctrl+4')
export_domains_action.triggered.connect(self.wrapper_export_domains)
export_surface_cavities_action = QtWidgets.QAction('Export Cavity Information (surface method)', self)
export_surface_cavities_action.setShortcut('Ctrl+5')
export_surface_cavities_action.triggered.connect(self.wrapper_export_surface_cavities)
export_center_cavities_action = QtWidgets.QAction('Export Cavity Information (center method)', self)
export_center_cavities_action.setShortcut('Ctrl+6')
export_center_cavities_action.triggered.connect(self.wrapper_export_center_cavities)
website_action = QtWidgets.QAction('&pyMolDyn website', self)
website_action.setShortcut('F1')
website_action.triggered.connect(self.show_website)
about_action = QtWidgets.QAction('&About', self)
about_action.triggered.connect(self.show_about_box)
self.menubar = self.menuBar()
self.file_menu = self.menubar.addMenu('&File')
self.file_menu.addAction(open_action)
self.file_menu.addAction(settings_action)
self.file_menu.addMenu(export_submenu)
self.init_submenu_recent_files()
self.file_menu.addMenu(self.recent_files_submenu)
export_submenu.addAction(export_bonds_action)
export_submenu.addAction(export_bond_angles_action)
export_submenu.addAction(export_bond_dihedral_angles_action)
export_submenu.addAction(export_domains_action)
export_submenu.addAction(export_surface_cavities_action)
export_submenu.addAction(export_center_cavities_action)
help_menu = self.menubar.addMenu('&Help')
help_menu.addAction(website_action)
help_menu.addSeparator()
help_menu.addAction(about_action)
def show_error(self, error_message):
QtCore.QMetaObject.invokeMethod(self, '_show_error', QtCore.Qt.QueuedConnection,
QtCore.Q_ARG(str, error_message))
@QtCore.pyqtSlot(str)
def _show_error(self, error_message):
QtWidgets.QMessageBox.information(self, 'Information', error_message)
def show_settings(self):
SettingsDialog()
self.control.update()
self.statistics_dock.update_results(self.control.visualization.results)
def show_website(self):
url = QtCore.QUrl(WEBSITE_URL)
QtGui.QDesktopServices.openUrl(url)
def show_about_box(self):
AboutDialog(self, 'pyMolDyn is a molecule viewer which is capable of computing molecular cavities.',
(('Florian Rhiem', 'f.rhiem@fz-juelich.de'),
('Fabian Beule', 'f.beule@fz-juelich.de'),
('David Knodt', 'd.knodt@fz-juelich.de'),
('Ingo Heimbach', 'i.heimbach@fz-juelich.de'),
('Florian Macherey', 'f.macherey@fz-juelich.de'))).show()
def init_submenu_recent_files(self):
self.recent_files_submenu = QtWidgets.QMenu("&Recent files", self)
if (not config.recent_files) or (config.recent_files == ['']):
self.recent_files_submenu.setDisabled(True)
else:
self.recent_files_submenu.setEnabled(True)
for f in config.recent_files:
f_action = QtWidgets.QAction(f, self)
f_action.triggered.connect(functools.partial(self.wrapper_recent_files, f))
self.recent_files_submenu.addAction(f_action)
self.file_dock.file_tab.most_recent_path = os.path.dirname(config.recent_files[0])
self._submenu_add_shortcut_for_first_item()
def update_submenu_recent_files(self):
if not config.recent_files:
return
most_recent_file = config.recent_files[0]
if not most_recent_file:
return
actions_in_menu = self.recent_files_submenu.actions()
actions_in_menu_str = [s.text() for s in actions_in_menu]
if most_recent_file in actions_in_menu_str:
index = actions_in_menu_str.index(most_recent_file)
if index == 0:
return
self.recent_files_submenu.removeAction(actions_in_menu[index])
self.recent_files_submenu.insertAction(actions_in_menu[0], actions_in_menu[index])
else:
new_action = QtWidgets.QAction(most_recent_file, self)
new_action.triggered.connect(functools.partial(self.wrapper_recent_files, most_recent_file))
if not actions_in_menu:
self.recent_files_submenu.setEnabled(True)
self.recent_files_submenu.addAction(new_action)
else:
self.recent_files_submenu.insertAction(actions_in_menu[0], new_action)
self.recent_files_submenu.actions()[0].setDisabled(True)
if len(actions_in_menu) == 5:
self.recent_files_submenu.removeAction(actions_in_menu[4])
self.file_dock.file_tab.most_recent_path = os.path.dirname(most_recent_file)
self._submenu_add_shortcut_for_first_item()
self.recent_files_submenu.update()
def _submenu_add_shortcut_for_first_item(self):
actions_in_menu = self.recent_files_submenu.actions()
actions_in_menu[0].setShortcut('Alt+1')
for action in actions_in_menu[1:]:
action.setShortcut('')
self.recent_files_submenu.update()
def wrapper_recent_files(self, f):
if f:
self.file_dock.file_tab.disable_files_in_menu_and_open(f)
self.update_submenu_recent_files()
def wrapper_export_bonds(self):
filename = QtWidgets.QFileDialog.getSaveFileName(self, "Export Bonds", "bonds.txt")[0]
if filename:
core.bonds.export_bonds(filename, self.control.visualization.results.atoms)
QtWidgets.QMessageBox.information(self,
'Export Bonds',
"Saved to filename: %s" % (filename))
def wrapper_export_bond_angles(self):
filename = QtWidgets.QFileDialog.getSaveFileName(self, "Export Bond Angles", "bond_angles.txt")[0]
if filename:
core.bonds.export_bond_angles(filename, self.control.visualization.results.atoms)
QtWidgets.QMessageBox.information(self,
'Export Bond Angles',
"Saved to filename: %s" % (filename))
def wrapper_export_bond_dihedral_angles(self):
filename = QtWidgets.QFileDialog.getSaveFileName(self, "Export Bond Dihedral Angles",
"bond_dihedral_angles.txt")[0]
if filename:
core.bonds.export_bond_dihedral_angles(filename, self.control.visualization.results.atoms)
QtWidgets.QMessageBox.information(self,
'Export Bond Dihedral Angles',
"Saved to filename: %s" % (filename))
def wrapper_export_domains(self):
filename = QtWidgets.QFileDialog.getSaveFileName(self, "Export Cavity Information (domains)", "domains")[0]
if filename:
filenames = self.control.visualization.results.domains.totxt(filename + '_{property}.txt')
QtWidgets.QMessageBox.information(self,
'Export Cavity Information (domains)',
"Saved to filenames: %s" % (', '.join(filenames)))
def wrapper_export_surface_cavities(self):
filename = QtWidgets.QFileDialog.getSaveFileName(self, "Export Cavity Information (surface method)",
"surface_cavities")[0]
if filename:
filenames = self.control.visualization.results.surface_cavities.totxt(filename + '_{property}.txt')
QtWidgets.QMessageBox.information(self,
'Export Cavity Information (surface method)',
"Saved to filenames: %s" % (', '.join(filenames)))
def wrapper_export_center_cavities(self):
filename = QtWidgets.QFileDialog.getSaveFileName(self, "Export Cavity Information (center method)",
"center_cavities")[0]
if filename:
filenames = self.control.visualization.results.center_cavities.totxt(filename + '_{property}.txt')
QtWidgets.QMessageBox.information(self,
'Export Cavity Information (center method)',
"Saved to filenames: %s" % (', '.join(filenames)))
def keyPressEvent(self, e):
if e.key() == QtCore.Qt.Key_M:
if not self.isFullScreen():
for dock in self.docks:
dock.hide()
self.showFullScreen()
else:
for dock in self.docks:
dock.show()
self.showNormal()
def set_output_callbacks(self, progress_func, print_func, finish_func, error_func, log_func):
message.set_output_callbacks(progress_func, print_func, finish_func, error_func, log_func)
def updatestatus(self, was_successful=lambda : True):
if was_successful and self.control.results is not None:
results = self.control.results[-1][-1]
self.shown_dataset = results
visualization_settings = self.control.visualization.settings
status = results.description(domain_volume=visualization_settings.show_domains, surface_cavity_volume=visualization_settings.show_surface_cavities, center_cavity_volume=visualization_settings.show_center_cavities)
self.statusBar().showMessage(status)
self.statistics_dock.update_results(self.control.visualization.results)
self.view_dock.setVisible(True)
self.view_dock.view_tab.update_cavity_buttons(self.control.visualization.results, None)
self.center.gl_stack.updatestatus()
QtWidgets.QApplication.postEvent(self.center.gl_stack.gl_widget, UpdateGLEvent())
# def closeEvent(self, event):
# reply = QtWidgets.QMessageBox.question(self, 'Message',
# "Are you sure to quit?", QtWidgets.QMessageBox.Yes |
# QtWidgets.QMessageBox.No, QtWidgets.QMessageBox.No)
# if reply == QtWidgets.QMessageBox.Yes:
# event.accept()
# else:
# event.ignore()
class CentralWidget(QtWidgets.QWidget):
def __init__(self, parent):
QtWidgets.QWidget.__init__(self, parent)
self.control = parent.control
self.setWindowTitle('pyMolDyn 2')
self.widget_titles = (
"3D View",
"Pair Distribution Functions",
"Cavity Histograms")
self.init_gui()
def init_gui(self):
self.gl_stack = GLStack(self, self.parent())
self.gl_widget = self.gl_stack.gl_widget
self.combo = QtWidgets.QComboBox()
for title in self.widget_titles:
self.combo.addItem(title)
self.combo.activated[str].connect(self.on_combo)
layout = QtWidgets.QVBoxLayout()
layout.addWidget(self.gl_stack)
layout.addWidget(self.combo)
self.setFocusPolicy(QtCore.Qt.StrongFocus)
self.setLayout(layout)
def on_combo(self, string):
index = self.widget_titles.index(string)
self.gl_stack.activate(index)
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import six
from heat.common import grouputils
from heat.common import identifier
from heat.common import template_format
from heat.rpc import client as rpc_client
from heat.tests import common
from heat.tests import utils
nested_stack = '''
heat_template_version: 2013-05-23
resources:
r0:
type: OverwrittenFnGetRefIdType
r1:
type: OverwrittenFnGetRefIdType
'''
class GroupUtilsTest(common.HeatTestCase):
def test_non_nested_resource(self):
group = mock.Mock()
group.nested_identifier.return_value = None
group.nested.return_value = None
self.assertEqual(0, grouputils.get_size(group))
self.assertEqual([], grouputils.get_members(group))
self.assertEqual([], grouputils.get_member_refids(group))
self.assertEqual([], grouputils.get_member_names(group))
def test_normal_group(self):
group = mock.Mock()
t = template_format.parse(nested_stack)
stack = utils.parse_stack(t)
group.nested.return_value = stack
# member list (sorted)
members = [r for r in six.itervalues(stack)]
expected = sorted(members, key=lambda r: (r.created_time, r.name))
actual = grouputils.get_members(group)
self.assertEqual(expected, actual)
# refids
actual_ids = grouputils.get_member_refids(group)
self.assertEqual(['ID-r0', 'ID-r1'], actual_ids)
def test_group_with_failed_members(self):
group = mock.Mock()
t = template_format.parse(nested_stack)
stack = utils.parse_stack(t)
self.patchobject(group, 'nested', return_value=stack)
# Just failed for whatever reason
rsrc_err = stack.resources['r0']
rsrc_err.status = rsrc_err.FAILED
rsrc_ok = stack.resources['r1']
self.assertEqual([rsrc_ok], grouputils.get_members(group))
self.assertEqual(['ID-r1'], grouputils.get_member_refids(group))
class GroupInspectorTest(common.HeatTestCase):
resources = [
{
'updated_time': '2018-01-01T12:00',
'creation_time': '2018-01-01T02:00',
'resource_name': 'A',
'physical_resource_id': 'a',
'resource_action': 'UPDATE',
'resource_status': 'COMPLETE',
'resource_status_reason': 'resource changed',
'resource_type': 'OS::Heat::Test',
'resource_id': 'aaaaaaaa',
'stack_identity': 'bar',
'stack_name': 'nested_test',
'required_by': [],
'parent_resource': 'stack_resource',
},
{
'updated_time': '2018-01-01T10:00',
'creation_time': '2018-01-01T03:00',
'resource_name': 'E',
'physical_resource_id': 'e',
'resource_action': 'UPDATE',
'resource_status': 'FAILED',
'resource_status_reason': 'reasons',
'resource_type': 'OS::Heat::Test',
'resource_id': 'eeeeeeee',
'stack_identity': 'bar',
'stack_name': 'nested_test',
'required_by': [],
'parent_resource': 'stack_resource',
},
{
'updated_time': '2018-01-01T11:00',
'creation_time': '2018-01-01T03:00',
'resource_name': 'B',
'physical_resource_id': 'b',
'resource_action': 'UPDATE',
'resource_status': 'FAILED',
'resource_status_reason': 'reasons',
'resource_type': 'OS::Heat::Test',
'resource_id': 'bbbbbbbb',
'stack_identity': 'bar',
'stack_name': 'nested_test',
'required_by': [],
'parent_resource': 'stack_resource',
},
{
'updated_time': '2018-01-01T13:00',
'creation_time': '2018-01-01T01:00',
'resource_name': 'C',
'physical_resource_id': 'c',
'resource_action': 'UPDATE',
'resource_status': 'COMPLETE',
'resource_status_reason': 'resource changed',
'resource_type': 'OS::Heat::Test',
'resource_id': 'cccccccc',
'stack_identity': 'bar',
'stack_name': 'nested_test',
'required_by': [],
'parent_resource': 'stack_resource',
},
{
'updated_time': '2018-01-01T04:00',
'creation_time': '2018-01-01T04:00',
'resource_name': 'F',
'physical_resource_id': 'f',
'resource_action': 'CREATE',
'resource_status': 'COMPLETE',
'resource_status_reason': 'resource changed',
'resource_type': 'OS::Heat::Test',
'resource_id': 'ffffffff',
'stack_identity': 'bar',
'stack_name': 'nested_test',
'required_by': [],
'parent_resource': 'stack_resource',
},
{
'updated_time': '2018-01-01T04:00',
'creation_time': '2018-01-01T04:00',
'resource_name': 'D',
'physical_resource_id': 'd',
'resource_action': 'CREATE',
'resource_status': 'COMPLETE',
'resource_status_reason': 'resource changed',
'resource_type': 'OS::Heat::Test',
'resource_id': 'dddddddd',
'stack_identity': 'bar',
'stack_name': 'nested_test',
'required_by': [],
'parent_resource': 'stack_resource',
},
]
template = {
'heat_template_version': 'newton',
'resources': {
'A': {
'type': 'OS::Heat::TestResource',
},
},
}
def setUp(self):
super(GroupInspectorTest, self).setUp()
self.ctx = mock.Mock()
self.rpc_client = mock.Mock(spec=rpc_client.EngineClient)
self.identity = identifier.HeatIdentifier('foo', 'nested_test', 'bar')
self.list_rsrcs = self.rpc_client.list_stack_resources
self.get_tmpl = self.rpc_client.get_template
self.insp = grouputils.GroupInspector(self.ctx, self.rpc_client,
self.identity)
def test_no_identity(self):
self.insp = grouputils.GroupInspector(self.ctx, self.rpc_client, None)
self.assertEqual(0, self.insp.size(include_failed=True))
self.assertEqual([], list(self.insp.member_names(include_failed=True)))
self.assertIsNone(self.insp.template())
self.list_rsrcs.assert_not_called()
self.get_tmpl.assert_not_called()
def test_size_include_failed(self):
self.list_rsrcs.return_value = self.resources
self.assertEqual(6, self.insp.size(include_failed=True))
self.list_rsrcs.assert_called_once_with(self.ctx, dict(self.identity))
def test_size_exclude_failed(self):
self.list_rsrcs.return_value = self.resources
self.assertEqual(4, self.insp.size(include_failed=False))
self.list_rsrcs.assert_called_once_with(self.ctx, dict(self.identity))
def test_member_names_include_failed(self):
self.list_rsrcs.return_value = self.resources
self.assertEqual(['B', 'E', 'C', 'A', 'D', 'F'],
list(self.insp.member_names(include_failed=True)))
self.list_rsrcs.assert_called_once_with(self.ctx, dict(self.identity))
def test_member_names_exclude_failed(self):
self.list_rsrcs.return_value = self.resources
self.assertEqual(['C', 'A', 'D', 'F'],
list(self.insp.member_names(include_failed=False)))
self.list_rsrcs.assert_called_once_with(self.ctx, dict(self.identity))
def test_list_rsrc_caching(self):
self.list_rsrcs.return_value = self.resources
self.insp.size(include_failed=False)
list(self.insp.member_names(include_failed=True))
self.insp.size(include_failed=True)
list(self.insp.member_names(include_failed=False))
self.list_rsrcs.assert_called_once_with(self.ctx, dict(self.identity))
self.get_tmpl.assert_not_called()
def test_get_template(self):
self.get_tmpl.return_value = self.template
tmpl = self.insp.template()
self.assertEqual(self.template, tmpl.t)
self.get_tmpl.assert_called_once_with(self.ctx, dict(self.identity))
def test_get_tmpl_caching(self):
self.get_tmpl.return_value = self.template
self.insp.template()
self.insp.template()
self.get_tmpl.assert_called_once_with(self.ctx, dict(self.identity))
self.list_rsrcs.assert_not_called()
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model based on combination of 2D depthwise and 1x1 convolutions."""
from kws_streaming.layers import modes
from kws_streaming.layers import speech_features
from kws_streaming.layers import stream
from kws_streaming.layers.compat import tf
import kws_streaming.models.model_utils as utils
def model_parameters(parser_nn):
"""Depthwise Convolutional(DS CNN) model parameters.
In more details parameters are described at:
https://www.tensorflow.org/api_docs/python/tf/keras/layers/BatchNormalization
https://www.tensorflow.org/api_docs/python/tf/keras/layers/DepthwiseConv2D
https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv2D
Args:
parser_nn: global command line args parser
Returns: parser with updated arguments
"""
parser_nn.add_argument(
'--cnn1_kernel_size',
type=str,
default='3,3',
help='Heights and widths of the first 2D convolution',
)
parser_nn.add_argument(
'--cnn1_dilation_rate',
type=str,
default='2,1',
help='Dilation rate of the first 2D convolution',
)
parser_nn.add_argument(
'--cnn1_strides',
type=str,
default='1,1',
help='Strides of the first 2D convolution along the height and width',
)
parser_nn.add_argument(
'--cnn1_padding',
type=str,
default='valid',
help="One of 'valid' or 'same'",
)
parser_nn.add_argument(
'--cnn1_filters',
type=int,
default=300,
help='Number of output filters in the first 2D convolution layers',
)
parser_nn.add_argument(
'--cnn1_act',
type=str,
default='relu',
help='Activation function in the first 2D convolution layers',
)
parser_nn.add_argument(
'--bn_momentum',
type=float,
default=0.98,
help='Momentum for the moving average',
)
parser_nn.add_argument(
'--bn_center',
type=int,
default=1,
help='If True, add offset of beta to normalized tensor.'
'If False, beta is ignored',
)
parser_nn.add_argument(
'--bn_scale',
type=int,
default=0,
help='If True, multiply by gamma. If False, gamma is not used. '
'When the next layer is linear (also e.g. nn.relu), this can be disabled'
'since the scaling will be done by the next layer.',
)
parser_nn.add_argument(
'--bn_renorm',
type=int,
default=0,
help='Whether to use Batch Renormalization',
)
parser_nn.add_argument(
'--dw2_kernel_size',
type=str,
default='(3,3),(3,3),(10,3),(5,3),(10,3)',
help='Height and width of the 2D Depthwise convolutions',
)
parser_nn.add_argument(
'--dw2_dilation_rate',
type=str,
default='(1,1),(2,2),(1,1),(2,2),(1,1)',
help='Dilation rate of the 2D Depthwise convolutions',
)
parser_nn.add_argument(
'--dw2_strides',
type=str,
default='(1,1),(1,1),(1,1),(1,1),(1,1)',
help='Strides of the 2D Depthwise convolutions',
)
parser_nn.add_argument(
'--dw2_padding',
type=str,
default='valid',
help="One of 'valid' or 'same'",
)
parser_nn.add_argument(
'--dw2_act',
type=str,
default="'relu','relu','relu','relu','relu'",
help='Activation functions in the Depthwise convolution layers',
)
parser_nn.add_argument(
'--cnn2_filters',
type=str,
default='300,300,300,300,300',
help='Number of output filters in 1x1 convolution layers',
)
parser_nn.add_argument(
'--cnn2_act',
type=str,
default="'relu','relu','relu','relu','relu'",
help='Activation functions in 1x1 convolution layers',
)
parser_nn.add_argument(
'--dropout1',
type=float,
default=0.2,
help='Percentage of data dropped',
)
def model(flags):
"""Depthwise convolutional model.
It is based on paper:
MobileNets: Efficient Convolutional Neural Networks for
Mobile Vision Applications https://arxiv.org/abs/1704.04861
Model topology is similar with "Hello Edge: Keyword Spotting on
Microcontrollers" https://arxiv.org/pdf/1711.07128.pdf
Args:
flags: data/model parameters
Returns:
Keras model for training
"""
input_audio = tf.keras.layers.Input(
shape=modes.get_input_data_shape(flags, modes.Modes.TRAINING),
batch_size=flags.batch_size)
net = input_audio
if flags.preprocess == 'raw':
# it is a self contained model, user need to feed raw audio only
net = speech_features.SpeechFeatures(
speech_features.SpeechFeatures.get_params(flags))(
net)
net = tf.keras.backend.expand_dims(net)
net = stream.Stream(
cell=tf.keras.layers.Conv2D(
kernel_size=utils.parse(flags.cnn1_kernel_size),
dilation_rate=utils.parse(flags.cnn1_dilation_rate),
filters=flags.cnn1_filters,
padding=flags.cnn1_padding,
strides=utils.parse(flags.cnn1_strides)))(
net)
net = tf.keras.layers.BatchNormalization(
momentum=flags.bn_momentum,
center=flags.bn_center,
scale=flags.bn_scale,
renorm=flags.bn_renorm)(
net)
net = tf.keras.layers.Activation('relu')(net)
for kernel_size, dw2_act, dilation_rate, strides, filters, cnn2_act in zip(
utils.parse(flags.dw2_kernel_size), utils.parse(flags.dw2_act),
utils.parse(flags.dw2_dilation_rate), utils.parse(flags.dw2_strides),
utils.parse(flags.cnn2_filters), utils.parse(flags.cnn2_act)):
net = stream.Stream(
cell=tf.keras.layers.DepthwiseConv2D(
kernel_size=kernel_size,
dilation_rate=dilation_rate,
padding=flags.dw2_padding,
strides=strides))(
net)
net = tf.keras.layers.BatchNormalization(
momentum=flags.bn_momentum,
center=flags.bn_center,
scale=flags.bn_scale,
renorm=flags.bn_renorm)(
net)
net = tf.keras.layers.Activation(dw2_act)(net)
net = tf.keras.layers.Conv2D(kernel_size=(1, 1), filters=filters)(net)
net = tf.keras.layers.BatchNormalization(
momentum=flags.bn_momentum,
center=flags.bn_center,
scale=flags.bn_scale,
renorm=flags.bn_renorm)(
net)
net = tf.keras.layers.Activation(cnn2_act)(net)
net = stream.Stream(
cell=tf.keras.layers.AveragePooling2D(
pool_size=(int(net.shape[1]), int(net.shape[2]))))(
net)
net = stream.Stream(cell=tf.keras.layers.Flatten())(net)
net = tf.keras.layers.Dropout(rate=flags.dropout1)(net)
net = tf.keras.layers.Dense(units=flags.label_count)(net)
if flags.return_softmax:
net = tf.keras.layers.Activation('softmax')(net)
return tf.keras.Model(input_audio, net)
|
|
""" Performance test for pika
"""
import logging
from optparse import OptionParser
import sys
import pika
g_log = logging.getLogger("pika_perf")
ROUTING_KEY = "test"
#logging.getLogger("pika").setLevel(logging.DEBUG)
def main():
logging.basicConfig(
level=logging.INFO,
format='%(asctime)-15s %(name)s(%(process)s) - %(levelname)s - %(message)s',
disable_existing_loggers=False)
topHelpString = (
"\n"
"\t%prog COMMAND OPTIONS\n"
"\t%prog --help\n"
"\t%prog COMMAND --help\n"
"\n"
"Supported COMMANDs:\n"
"\tpublish - publish messages using one of several pika connection classes")
topParser = OptionParser(topHelpString)
if len(sys.argv) < 2:
topParser.error("Missing COMMAND")
command = sys.argv[1]
if command == "publish":
_handlePublishTest(sys.argv[2:])
elif not command.startswith("-"):
topParser.error("Unexpected action: %s" % (command,))
else:
try:
topParser.parse_args()
except:
raise
else:
topParser.error("Unknown command=%s" % command)
def _handlePublishTest(args):
""" Parse args and invoke the publish test using the requested connection
class
:param args: sequence of commandline args passed after the "publish" keyword
"""
helpString = (
"\n"
"\t%%prog publish OPTIONS\n"
"\t%%prog publish --help\n"
"\t%%prog --help\n"
"\n"
"Publishes the given number of messages of the\n"
"given size to the given exchange and routing_key=%s using the specified\n"
"pika connection class") % (ROUTING_KEY,)
parser = OptionParser(helpString)
implChoices = ["BlockingConnection",
"SynchronousConnection",
"SelectConnection"]
parser.add_option(
"--impl",
action="store",
type="choice",
dest="impl",
choices=implChoices,
help=("Selection of pika connection class "
"[REQUIRED; must be one of: %s]" % ", ".join(implChoices)))
parser.add_option(
"--exg",
action="store",
type="string",
dest="exchange",
help="Destination exchange [REQUIRED]")
parser.add_option(
"--msgs",
action="store",
type="int",
dest="numMessages",
default=1000,
help="Number of messages to send [default: %default]")
parser.add_option(
"--size",
action="store",
type="int",
dest="messageSize",
default=1024,
help="Size of each message in bytes [default: %default]")
parser.add_option(
"--pubacks",
action="store_true",
dest="deliveryConfirmation",
default=False,
help="Publish in delivery confirmation mode [defaults to OFF]")
options, positionalArgs = parser.parse_args(sys.argv[2:])
if positionalArgs:
raise parser.error("Unexpected to have any positional args, but got: %r"
% positionalArgs)
if not options.impl:
parser.error("--impl is required")
if options.exchange is None:
parser.error("--exg must be specified with a valid destination exchange name")
if options.impl in ["BlockingConnection", "SynchronousConnection"]:
runBlockingPublishTest(implClassName=options.impl,
exchange=options.exchange,
numMessages=options.numMessages,
messageSize=options.messageSize,
deliveryConfirmation=options.deliveryConfirmation)
else:
assert options.impl == "SelectConnection", options.impl
runSelectPublishTest(implClassName=options.impl,
exchange=options.exchange,
numMessages=options.numMessages,
messageSize=options.messageSize,
deliveryConfirmation=options.deliveryConfirmation)
def runBlockingPublishTest(implClassName,
exchange,
numMessages,
messageSize,
deliveryConfirmation):
g_log.info("runBlockingPublishTest: impl=%s; exchange=%s; numMessages=%d; "
"messageSize=%s; deliveryConfirmation=%s", implClassName, exchange,
numMessages, messageSize, deliveryConfirmation)
connectionClass = getattr(pika, implClassName)
connection = connectionClass(getPikaConnectionParameters())
g_log.info("%s: opened connection", implClassName)
message = "a" * messageSize
channel = connection.channel()
g_log.info("%s: opened channel", implClassName)
if deliveryConfirmation:
channel.confirm_delivery()
g_log.info("%s: enabled message delivery confirmation", implClassName)
for i in xrange(numMessages):
res = channel.basic_publish(exchange=exchange, routing_key=ROUTING_KEY,
immediate=False, mandatory=False, body=message)
if deliveryConfirmation:
assert res is True, repr(res)
else:
assert res is None, repr(res)
else:
g_log.info("Published %d messages of size=%d via=%s",
i+1, messageSize, connectionClass)
g_log.info("%s: closing channel", implClassName)
channel.close()
g_log.info("%s: closing connection", implClassName)
connection.close()
g_log.info("%s: DONE", implClassName)
def runSelectPublishTest(implClassName,
exchange,
numMessages,
messageSize,
deliveryConfirmation):
g_log.info("runSelectPublishTest: impl=%s; exchange=%s; numMessages=%d; "
"messageSize=%s; deliveryConfirmation=%s", implClassName, exchange,
numMessages, messageSize, deliveryConfirmation)
message = "a" * messageSize
class Counter(object):
numPublishConfirms = 0
lastConfirmedDeliveryTag = 0
def onDeliveryConfirmation(ch, methodFrame):
# Got Basic.Ack or Basic.Nack
if isinstance(methodFrame.method, pika.spec.Basic.Ack):
Counter.numPublishConfirms += 1
Counter.lastConfirmedDeliveryTag = methodFrame.method.delivery_tag
if Counter.lastConfirmedDeliveryTag == numMessages:
g_log.info("All messages confirmed, closing Select channel...")
ch.close()
else:
msg = "Failed: message was not Ack'ed; got instead %r" % (methodFrame,)
g_log.error(msg)
raise Exception(msg)
def onMessageReturn(*args):
msg = "Failed: message was returned: %s" % (args,)
g_log.error(msg)
raise Exception(msg)
def onChannelOpen(ch):
if deliveryConfirmation:
ch.confirm_delivery(
callback=lambda *args: onDeliveryConfirmation(ch, *args))
g_log.info("%s: enabled message delivery confirmation", implClassName)
g_log.info("Select publishing...")
for i in xrange(numMessages):
ch.basic_publish(exchange=exchange, routing_key=ROUTING_KEY,
immediate=False, mandatory=False, body=message)
else:
g_log.info("Published %d messages of size=%d via=%s",
i+1, messageSize, connectionClass)
if not deliveryConfirmation:
g_log.info("Closing Select channel...")
ch.close()
def onChannelClosed(ch, reasonCode, reasonText):
g_log.info("Select channel closed (%s): %s", reasonCode, reasonText)
g_log.info("Closing Select connection...")
ch.connection.close()
def onConnectionOpen(connection):
g_log.info("Select opening channel...")
ch = connection.channel(on_open_callback=onChannelOpen)
ch.add_on_close_callback(onChannelClosed)
ch.add_on_return_callback(onMessageReturn)
def onConnectionClosed(connection, reasonCode, reasonText):
g_log.info("Select connection closed (%s): %s", reasonCode, reasonText)
connectionClass = getattr(pika, implClassName)
connection = connectionClass(
getPikaConnectionParameters(),
on_open_callback=onConnectionOpen,
on_close_callback=onConnectionClosed)
connection.ioloop.start()
if deliveryConfirmation:
assert Counter.lastConfirmedDeliveryTag == numMessages, (
"lastConfirmedDeliveryTag=%s, numPublishConfirms=%s" % (
Counter.lastConfirmedDeliveryTag, Counter.numPublishConfirms))
else:
assert Counter.numPublishConfirms == 0, Counter.numPublishConfirms
g_log.info("%s: DONE", implClassName)
def getPikaConnectionParameters():
"""
:returns: instance of pika.ConnectionParameters for the AMQP broker (RabbitMQ
most likely)
"""
host = "localhost"
vhost = "/"
credentials = pika.PlainCredentials("guest", "guest")
return pika.ConnectionParameters(host=host, virtual_host=vhost,
credentials=credentials)
if __name__ == '__main__':
main()
|
|
"""Unit tests for qrscp.py verification service."""
import logging
import os
import subprocess
import sys
import tempfile
import time
import pytest
from pydicom import dcmread
from pydicom.uid import (
ExplicitVRLittleEndian, ImplicitVRLittleEndian,
DeflatedExplicitVRLittleEndian, ExplicitVRBigEndian
)
from pynetdicom import AE, evt, debug_logger, DEFAULT_TRANSFER_SYNTAXES
from pynetdicom.sop_class import VerificationSOPClass, CTImageStorage
#debug_logger()
APP_DIR = os.path.join(os.path.dirname(__file__), '../')
APP_FILE = os.path.join(APP_DIR, 'qrscp', 'qrscp.py')
DATA_DIR = os.path.join(APP_DIR, '../', 'tests', 'dicom_files')
DATASET_FILE = os.path.join(DATA_DIR, 'CTImageStorage.dcm')
def which(program):
# Determine if a given program is installed on PATH
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
def start_qrscp(args):
"""Start the qrscp.py app and return the process."""
pargs = [which('python'), APP_FILE] + [*args]
return subprocess.Popen(pargs)
def start_qrscp_cli(args):
"""Start the qrscp app using CLI and return the process."""
pargs = [which('python'), '-m', 'pynetdicom', 'qrscp'] + [*args]
return subprocess.Popen(pargs)
class EchoSCPBase(object):
"""Tests for echoscp.py"""
def setup(self):
"""Run prior to each test"""
self.ae = None
self.p = None
self.func = None
self.tfile = tempfile.NamedTemporaryFile()
self.db_location = self.tfile.name
self.instance_location = tempfile.TemporaryDirectory()
def teardown(self):
"""Clear any active threads"""
if self.ae:
self.ae.shutdown()
if self.p:
self.p.kill()
self.p.wait(timeout=5)
def test_default(self):
"""Test default settings."""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_requested_context(VerificationSOPClass)
self.p = p = self.func([
'--database-location', self.db_location,
'--instance-location', self.instance_location.name
])
time.sleep(1)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assoc.release()
p.terminate()
p.wait()
assert p.returncode != 0
assert 16382 == assoc.acceptor.maximum_length
cxs = assoc.accepted_contexts
assert len(cxs) == 1
cxs = {cx.abstract_syntax: cx for cx in cxs}
assert VerificationSOPClass in cxs
def test_flag_version(self, capfd):
"""Test --version flag."""
self.p = p = self.func([
'--database-location', self.db_location,
'--instance-location', self.instance_location.name,
'--version'
])
p.wait()
assert p.returncode == 0
out, err = capfd.readouterr()
assert 'qrscp.py v' in out
def test_flag_quiet(self, capfd):
"""Test --quiet flag."""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_requested_context(VerificationSOPClass)
self.p = p = self.func([
'--database-location', self.db_location,
'--instance-location', self.instance_location.name,
'-q'
])
time.sleep(0.5)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
status = assoc.send_c_echo()
assert status.Status == 0x0000
assoc.release()
p.terminate()
p.wait()
out, err = capfd.readouterr()
assert out == err == ''
def test_flag_verbose(self, capfd):
"""Test --verbose flag."""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_requested_context(VerificationSOPClass)
out, err = [], []
self.p = p = self.func([
'--database-location', self.db_location,
'--instance-location', self.instance_location.name,
'-v'
])
time.sleep(0.5)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
status = assoc.send_c_echo()
assert status.Status == 0x0000
assoc.release()
p.terminate()
p.wait()
out, err = capfd.readouterr()
assert "Accepting Association" in err
assert "Received Echo Request" in err
assert "Association Released" in err
def test_flag_debug(self, capfd):
"""Test --debug flag."""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_requested_context(VerificationSOPClass)
self.p = p = self.func([
'--database-location', self.db_location,
'--instance-location', self.instance_location.name,
'-d'
])
time.sleep(0.5)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
status = assoc.send_c_echo()
assert status.Status == 0x0000
assoc.release()
p.terminate()
p.wait()
out, err = capfd.readouterr()
assert "pydicom.read_dataset()" in err
assert "Accept Parameters" in err
assert 'Received C-ECHO request from' in err
def test_flag_log_collision(self):
"""Test error with -q -v and -d flag."""
self.p = p = self.func([
'--database-location', self.db_location,
'--instance-location', self.instance_location.name,
'-v', '-d'
])
p.wait()
assert p.returncode != 0
class TestEchoSCP(EchoSCPBase):
"""Tests for echoscp.py"""
def setup(self):
"""Run prior to each test"""
super().setup()
self.func = start_qrscp
class TestEchoSCPCLI(EchoSCPBase):
"""Tests for echoscp using CLI"""
def setup(self):
"""Run prior to each test"""
super().setup()
self.func = start_qrscp_cli
|
|
#!/usr/bin/env python
"""
Created by stevertaylor
Copyright (c) 2014 Stephen R. Taylor
Code contributions by Rutger van Haasteren (piccard) and Justin Ellis (PAL/PAL2).
"""
import numpy as np
from numpy import *
import os
import math
from scipy import integrate
from scipy import optimize
from scipy import constants
from numpy import random
from scipy import special as ss
from scipy import linalg as sl
import numexpr as ne
import optparse
import cProfile
import ephem
from ephem import *
import PALInferencePTMCMC as PAL
import libstempo as T2
import time
from time import gmtime, strftime
import NX01_AnisCoefficients as anis
import NX01_utils as utils
import NX01_psr
parser = optparse.OptionParser(description = 'NX01 - Precursor to the PANTHER Group ENTERPRISE project')
############################
############################
parser.add_option('--nmodes', dest='nmodes', action='store', type=int, default=50,
help='Number of modes in low-rank time-frequency approximation (default = 50 modes)')
parser.add_option('--num_gwfreq_wins', dest='num_gwfreq_wins', action='store', type=int, default=1,
help='Number windows to split the band into (useful for evolving anisotropic searches (default = 1 windows)')
parser.add_option('--lmax', dest='LMAX', action='store', type=int, default=0,
help='Maximum multipole in anisotropic search (default = 0, i.e. isotropic-search)')
parser.add_option('--use-gpu', dest='use_gpu', action='store_true', default=False,
help='Do you want to use the GPU for accelerated linear algebra? (default = False)')
parser.add_option('--fix-slope', dest='fix_slope', action='store_true', default=False,
help='Do you want to fix the slope of the GWB spectrum? (default = False)')
parser.add_option('--snr-tag', dest='snr_tag', action='store', type=float, default=0.9,
help='Do you want the 90%, 95% or 100% SNR dataset? [6, 11, and 41 pulsars respectively] (default=0.90)')
parser.add_option('--limit-or-detect', dest='limit_or_detect', action='store', type=str, default='limit',
help='Do you want to use a uniform prior on log_10(Agwb) [detect] or Agwb itself [upper-limit] (default=\'limit\')?')
(args, x) = parser.parse_args()
if args.use_gpu:
import pycuda.autoinit
from pycuda.compiler import SourceModule
import pycuda.gpuarray as gpuarray
import pycuda.driver as drv
import pycuda.elementwise as el
import pycuda.tools as tools
import scikits.cuda.cublas as cublas
import scikits.cuda.cula as cula
import scikits.cuda.linalg as culinalg
import scikits.cuda.misc as cumisc
culinalg.init()
master_path = os.getcwd()
path = '/Users/staylor/Research/EPTAv2/UniEQ'
if args.snr_tag == 0.9:
dir = ['J1909-3744', 'J1713+0747', 'J1744-1134', 'J0613-0200', 'J1600-3053', 'J1012+5307'] #gives 90% of total SNR^2
snr_tag_ext = '90pct'
elif args.snr_tag == 0.95:
dir = ['J1909-3744', 'J1713+0747', 'J1744-1134', 'J0613-0200', 'J1600-3053', 'J1012+5307', \
'J1640+2224', 'J2145-0750', 'J1857+0943', 'J1022+1001', 'J0030+0451'] # gives 95% of total SNR^2
snr_tag_ext = '95pct'
elif args.snr_tag == 1.0:
os.chdir(path)
dir = os.walk('.').next()[1]
dir.remove('J1939+2134')
os.chdir(master_path)
snr_tag_ext = '100pct'
if not os.path.exists('chains_Analysis'):
os.makedirs('chains_Analysis')
pulsars = [s for s in dir if "J" in s]
pulsars.sort()
print pulsars
################################################################################################################################
# PASSING THROUGH TEMPO2 VIA libstempo
################################################################################################################################
par_ext = 'ML' # Running fixed-noise search with ML parameters. Need ML EFACs and EQUADs to represent covariance of params
t2psr=[]
for ii in range(len(pulsars)):
os.chdir(path+'/'+pulsars[ii])
if os.path.isfile('{0}_NoAFB.par'.format(pulsars[ii])):
t2psr.append(T2.tempopulsar(parfile=path+'/'+pulsars[ii]+'/'+pulsars[ii]+'_TD.{0}.par'.format(par_ext),\
timfile=path+'/'+pulsars[ii]+'/'+pulsars[ii]+'_NoAFB.tim'))
else:
t2psr.append(T2.tempopulsar(parfile=path+'/'+pulsars[ii]+'/'+pulsars[ii]+'_TD.{0}.par'.format(par_ext),\
timfile=path+'/'+pulsars[ii]+'/'+pulsars[ii]+'_all.tim'))
os.chdir(path)
t2psr[ii].fit(iters=10)
if np.any(np.isfinite(t2psr[ii].residuals())==False)==True:
os.chdir(path+'/'+pulsars[ii])
if os.path.isfile('{0}_NoAFB.par'.format(pulsars[ii])):
t2psr[ii] = T2.tempopulsar(parfile=path+'/'+pulsars[ii]+'/'+pulsars[ii]+'_TD.{0}.par'.format(par_ext),\
timfile=path+'/'+pulsars[ii]+'/'+pulsars[ii]+'_NoAFB.tim')
else:
t2psr[ii] = T2.tempopulsar(parfile=path+'/'+pulsars[ii]+'/'+pulsars[ii]+'_TD.{0}.par'.format(par_ext),\
timfile=path+'/'+pulsars[ii]+'/'+pulsars[ii]+'_all.tim')
os.chdir(path)
os.chdir(master_path)
################################################################################################################################
# MAKING A PULSAR OBJECT, THEN GRABBING ALL THE VARIABLES, e.g. toas, residuals, error-bars, designmatrices etc.
################################################################################################################################
psr = [NX01_psr.PsrObj(t2psr[ii]) for ii in range(len(t2psr))]
[psr[ii].grab_all_vars() for ii in range(len(psr))]
psr_positions = [np.array([psr[ii].psr_locs[0], np.pi/2. - psr[ii].psr_locs[1]]) for ii in range(len(psr))]
positions = np.array(psr_positions).copy()
CorrCoeff = np.array(anis.CorrBasis(positions,args.LMAX)) # computing all the correlation basis-functions for the array
harm_sky_vals = utils.SetupPriorSkyGrid(args.LMAX) # computing the values of the spherical-harmonics up to order
# LMAX on a pre-specified grid
gwfreqs_per_win = int(1.*args.nmodes/(1.*args.num_gwfreq_wins)) # getting the number of GW frequencies per window
################################################################################################################################
# GETTING MAXIMUM TIME, COMPUTING FOURIER DESIGN MATRICES, AND GETTING MODES
################################################################################################################################
Tmax = np.max([psr[p].toas.max() - psr[p].toas.min() for p in range(len(psr))])
# initialize fourier design matrices
[psr[ii].makeFtot(args.nmodes, Tmax) for ii in range(len(psr))]
F = [psr[ii].Ftot for ii in range(len(psr))]
# get GW frequencies
fqs = np.linspace(1/Tmax, args.nmodes/Tmax, args.nmodes)
################################################################################################################################
# FORM A LIST COMPOSED OF NP ARRAYS CONTAINING THE INDEX POSITIONS WHERE EACH UNIQUE 'sys' BACKEND IS APPLIED
################################################################################################################################
backends = []
[psr[ii].get_backends() for ii in range(len(psr))]
for ii in range(len(psr)):
backends.append(psr[ii].bkends)
################################################################################################################################
# GETTING MAXIMUM-LIKELIHOOD VALUES OF SINGLE-PULSAR ANALYSIS FOR OUR STARTING POINT
################################################################################################################################
Adm_ML=[]
gam_dm_ML=[]
Ared_ML=[]
gam_red_ML=[]
EFAC_ML = [[0.0]*len(backends[jj]) for jj in range(len(backends))]
EQUAD_ML = [[0.0]*len(backends[jj]) for jj in range(len(backends))]
for ii in range(len(pulsars)):
with open(path+'/{0}/{0}_Taylor_TimeDomain_model1.txt'.format(psr[ii].name), 'r') as f:
Adm_ML.append(float(f.readline().split()[3]))
gam_dm_ML.append(float(f.readline().split()[3]))
Ared_ML.append(float(f.readline().split()[3]))
gam_red_ML.append(float(f.readline().split()[3]))
for jj in range(len(backends[ii])):
EFAC_ML[ii][jj] = float(f.readline().split()[3])
for jj in range(len(backends[ii])):
EQUAD_ML[ii][jj] = float(f.readline().split()[3])
################################################################################################################################
# GETTING MEAN AND ERROR-BARS VALUES OF SINGLE-PULSAR ANALYSIS FOR OUR INITIAL PARAMETER COVARIANCE ESTIMATE
################################################################################################################################
Adm_mean=[]
Adm_err=[]
gam_dm_mean=[]
gam_dm_err=[]
Ared_mean=[]
Ared_err=[]
gam_red_mean=[]
gam_red_err=[]
EFAC_mean = [[0.0]*len(backends[jj]) for jj in range(len(backends))]
EFAC_err = [[0.0]*len(backends[jj]) for jj in range(len(backends))]
EQUAD_mean = [[0.0]*len(backends[jj]) for jj in range(len(backends))]
EQUAD_err = [[0.0]*len(backends[jj]) for jj in range(len(backends))]
for ii in range(len(pulsars)):
with open(path+'/{0}/{0}_Taylor_TimeDomain_model1.txt'.format(psr[ii].name), 'r') as f:
line = f.readline().split()
Adm_mean.append( 0.5 * (np.log10(float(line[5])) + np.log10(float(line[4]))) ) # the means and error bars will be in log10
Adm_err.append( 0.5 * (np.log10(float(line[5])) - np.log10(float(line[4]))) )
line = f.readline().split()
gam_dm_mean.append( 0.5 * (np.log10(float(line[5])) + np.log10(float(line[4]))) )
gam_dm_err.append( 0.5 * (np.log10(float(line[5])) - np.log10(float(line[4]))) )
line = f.readline().split()
Ared_mean.append( 0.5 * (np.log10(float(line[5])) + np.log10(float(line[4]))) )
Ared_err.append( 0.5 * (np.log10(float(line[5])) - np.log10(float(line[4]))) )
line = f.readline().split()
gam_red_mean.append( 0.5 * (np.log10(float(line[5])) + np.log10(float(line[4]))) )
gam_red_err.append( 0.5 * (np.log10(float(line[5])) - np.log10(float(line[4]))) )
for jj in range(len(backends[ii])):
line = f.readline().split()
EFAC_mean[ii][jj] = 0.5 * (np.log10(float(line[5])) + np.log10(float(line[4])))
EFAC_err[ii][jj] = 0.5 * (np.log10(float(line[5])) - np.log10(float(line[4])))
for jj in range(len(backends[ii])):
line = f.readline().split()
EQUAD_mean[ii][jj] = 0.5 * (np.log10(float(line[5])) + np.log10(float(line[4])))
EQUAD_err[ii][jj] = 0.5 * (np.log10(float(line[5])) - np.log10(float(line[4])))
f.close()
################################################################################################################################
# MAKE FIXED NOISE MATRICES FROM MAXIMUM-LIKELIHOOD VALUES OF SINGLE-PULSAR ANALYSIS
################################################################################################################################
Diag=[]
res_prime=[]
F_prime=[]
for ii in range(len(psr)):
psr[ii].two_comp_noise(MLerrors=psr[ii].toaerrs)
Diag.append( psr[ii].diag_white )
res_prime.append( psr[ii].res_prime )
F_prime.append( psr[ii].Ftot_prime )
################################################################################################################################
# SETTING UP PRIOR RANGES
################################################################################################################################
pmin = np.array([-20.0])
if args.fix_slope is False:
pmin = np.append(pmin,[0.0])
pmin = np.append(pmin,-10.0*np.ones( args.num_gwfreq_wins*(((args.LMAX+1)**2)-1) ))
pmax = np.array([-10.0])
if args.fix_slope is False:
pmax = np.append(pmax,[7.0])
pmax = np.append(pmax,10.0*np.ones( args.num_gwfreq_wins*(((args.LMAX+1)**2)-1) ))
##################################################################################################################################
loglike1 = 0
FtNF = []
for p in range(len(psr)):
# compute d
if p == 0:
d = np.dot(F_prime[p].T, res_prime[p]/Diag[p] )
else:
d = np.append(d, np.dot(F_prime[p].T, res_prime[p]/Diag[p] ))
# compute FT N F
N = 1./Diag[p]
right = (N*F_prime[p].T).T
FtNF.append(np.dot(F_prime[p].T, right))
# log determinant of N
logdet_N = np.sum(np.log( Diag[p] ))
# triple product in likelihood function
dtNdt = np.sum(res_prime[p]**2.0/( Diag[p] ))
loglike1 += -0.5 * (logdet_N + dtNdt)
def my_prior(x):
logp = 0.
if np.all(x <= pmax) and np.all(x >= pmin):
logp = np.sum(np.log(1/(pmax-pmin)))
else:
logp = -np.inf
return logp
def modelIndependentFullPTANoisePL(x):
"""
Model Independent stochastic background likelihood function
"""
Agwb = 10.0**x[0]
if args.fix_slope:
gam_gwb = 13./3.
ct = 1
else:
gam_gwb = x[1]
ct = 2
#####
###################
orf_coeffs = x[ct:]
orf_coeffs = orf_coeffs.reshape((args.num_gwfreq_wins,((args.LMAX+1)**2)-1))
clm = np.array([[0.0]*((args.LMAX+1)**2) for ii in range(args.num_gwfreq_wins)])
clm[:,0] = 2.0*np.sqrt(np.pi)
physicality = 0.
if args.LMAX!=0:
for kk in range(args.num_gwfreq_wins):
for ii in range(1,((args.LMAX+1)**2)):
clm[kk,ii] = orf_coeffs[kk,ii-1]
if (utils.PhysPrior(clm[kk],harm_sky_vals) == 'Unphysical'):
physicality += -10.0**7.0
else:
physicality += 0.
npsr = len(psr)
ORF=[]
for ii in range(args.num_gwfreq_wins): # number of frequency windows
for jj in range(gwfreqs_per_win): # number of frequencies in this window
ORF.append( sum(clm[ii,kk]*CorrCoeff[kk] for kk in range(len(CorrCoeff))) )
for ii in range(args.num_gwfreq_wins): # number of frequency windows
for jj in range(gwfreqs_per_win): # number of frequencies in this window
ORF.append( np.zeros((npsr,npsr)) )
ORF = np.array(ORF)
ORFtot = np.zeros((4*args.nmodes,npsr,npsr)) # shouldn't be applying ORF to dmfreqs, but the projection of GW spec onto dmfreqs is defined as zero below
ORFtot[0::2] = ORF
ORFtot[1::2] = ORF
# parameterize intrinsic red noise as power law
Tspan = (1/fqs[0])*86400.0
f1yr = 1/3.16e7
rho = np.log10(Agwb**2/12/np.pi**2 * f1yr**(gam_gwb-3) * (fqs/86400.0)**(-gam_gwb)/Tspan)
# parameterize intrinsic red-noise and DM-variations as power law
kappa = []
for ii in range(npsr):
kappa.append(np.log10( np.append( Ared_ML[ii]**2/12/np.pi**2 * f1yr**(gam_red_ML[ii]-3) * (fqs/86400.0)**(-gam_red_ML[ii])/Tspan,\
Adm_ML[ii]**2/12/np.pi**2 * f1yr**(gam_dm_ML[ii]-3) * (fqs/86400.0)**(-gam_dm_ML[ii])/Tspan ) ))
# construct elements of sigma array
sigdiag = []
sigoffdiag = []
for ii in range(npsr):
tot = np.zeros(4*args.nmodes)
offdiag = np.zeros(4*args.nmodes)
# off diagonal terms
offdiag[0::2] = np.append( 10**rho, np.zeros(len(rho)) )
offdiag[1::2] = np.append( 10**rho, np.zeros(len(rho)) )
# diagonal terms
tot[0::2] = ORF[:,ii,ii]*np.append( 10**rho, np.zeros(len(rho)) ) + 10**kappa[ii]
tot[1::2] = ORF[:,ii,ii]*np.append( 10**rho, np.zeros(len(rho)) ) + 10**kappa[ii]
# fill in lists of arrays
sigdiag.append(tot)
sigoffdiag.append(offdiag)
# compute Phi inverse from Lindley's code
smallMatrix = np.zeros((4*args.nmodes, npsr, npsr))
for ii in range(npsr):
for jj in range(ii,npsr):
if ii == jj:
smallMatrix[:,ii,jj] = sigdiag[jj]
else:
smallMatrix[:,ii,jj] = ORFtot[:,ii,jj] * sigoffdiag[jj]
smallMatrix[:,jj,ii] = smallMatrix[:,ii,jj]
# invert them
logdet_Phi = 0
non_pos_def = 0
for ii in range(4*args.nmodes):
try:
L = sl.cho_factor(smallMatrix[ii,:,:])
smallMatrix[ii,:,:] = sl.cho_solve(L, np.eye(npsr))
logdet_Phi += np.sum(2*np.log(np.diag(L[0])))
except np.linalg.LinAlgError:
print 'Cholesky Decomposition Failed!! Rejecting...'
non_pos_def += 1
if non_pos_def > 0:
return -np.inf
else:
nftot = 4*args.nmodes
Phi = np.zeros((npsr*nftot, npsr*nftot))
# now fill in real covariance matrix
ind = [np.arange(kk*nftot, kk*nftot+nftot) for kk in range(npsr)]
for ii in range(npsr):
for jj in range(npsr):
Phi[ind[ii],ind[jj]] = smallMatrix[:,ii,jj]
# compute sigma
Sigma = sl.block_diag(*FtNF) + Phi
# cholesky decomp for second term in exponential
if args.use_gpu:
try:
Sigma_gpu = gpuarray.to_gpu( Sigma.astype(np.float64).copy() )
expval2_gpu = gpuarray.to_gpu( d.astype(np.float64).copy() )
culinalg.cho_solve( Sigma_gpu, expval2_gpu ) # in-place linear-algebra: Sigma and expval2 overwritten
logdet_Sigma = np.sum(2.0*np.log(np.diag(Sigma_gpu.get())))
except cula.culaDataError:
print 'Cholesky Decomposition Failed (GPU error!!!!!!!!!!)'
return -np.inf
logLike = -0.5 * (logdet_Phi + logdet_Sigma) + 0.5 * (np.dot(d, expval2_gpu.get() )) + loglike1
else:
try:
cf = sl.cho_factor(Sigma)
expval2 = sl.cho_solve(cf, d)
logdet_Sigma = np.sum(2*np.log(np.diag(cf[0])))
except np.linalg.LinAlgError:
print 'Cholesky Decomposition Failed second time!! Using SVD instead'
u,s,v = sl.svd(Sigma)
expval2 = np.dot(v.T, 1/s*np.dot(u.T, d))
logdet_Sigma = np.sum(np.log(s))
logLike = -0.5 * (logdet_Phi + logdet_Sigma) + 0.5 * (np.dot(d, expval2)) + loglike1
if args.limit_or_detect == 'limit':
prior_factor = np.log(Agwb * np.log(10.0))
else:
prior_factor = 0.0
return logLike + prior_factor + physicality
#########################
#########################
parameters = ["Agwb"]
if args.fix_slope is False:
parameters.append("gam_gwb")
gamma_ext = 'GamVary'
else:
gamma_ext = 'Gam4p33'
for ii in range( args.num_gwfreq_wins*(((args.LMAX+1)**2)-1) ):
parameters.append('clm_{0}'.format(ii+1))
print "\n You are searching for the following parameters: {0}\n".format(parameters)
n_params = len(parameters)
print "\n The total number of parameters is {0}\n".format(n_params)
x0 = np.array([-15.0])
if args.fix_slope is False:
x0 = np.append(x0,[13./3.])
x0 = np.append(x0,np.zeros( args.num_gwfreq_wins*(((args.LMAX+1)**2)-1) ))
print "\n Your initial parameters are {0}\n".format(x0)
cov_diag = np.array([0.5])
if args.fix_slope is False:
cov_diag = np.append(cov_diag,[0.5])
cov_diag = np.append(cov_diag,0.05*np.ones( args.num_gwfreq_wins*(((args.LMAX+1)**2)-1) ))
print "\n Running a quick profile on the likelihood to estimate evaluation speed...\n"
cProfile.run('modelIndependentFullPTANoisePL(x0)')
#####################
# Now, we sample.....
#####################
print "\n Now, we sample... \n"
sampler = PAL.PTSampler(ndim=n_params,logl=modelIndependentFullPTANoisePL,logp=my_prior,cov=np.diag(cov_diag),\
outDir='./chains_Analysis/EPTAv2_{0}_{1}mode_MLnoise_nmodes{2}_Lmax{3}_{4}'.format(snr_tag_ext,args.limit_or_detect,args.nmodes,args.LMAX,gamma_ext),resume=False)
sampler.sample(p0=x0,Niter=500000,thin=10)
|
|
"""Compressed Sparse Row matrix format"""
from __future__ import division, print_function, absolute_import
__docformat__ = "restructuredtext en"
__all__ = ['csr_matrix', 'isspmatrix_csr']
import numpy as np
from scipy.lib.six import xrange
from ._sparsetools import csr_tocsc, csr_tobsr, csr_count_blocks, \
get_csr_submatrix, csr_sample_values
from .sputils import upcast, isintlike, IndexMixin, issequence, get_index_dtype
from .compressed import _cs_matrix
class csr_matrix(_cs_matrix, IndexMixin):
"""
Compressed Sparse Row matrix
This can be instantiated in several ways:
csr_matrix(D)
with a dense matrix or rank-2 ndarray D
csr_matrix(S)
with another sparse matrix S (equivalent to S.tocsr())
csr_matrix((M, N), [dtype])
to construct an empty matrix with shape (M, N)
dtype is optional, defaulting to dtype='d'.
csr_matrix((data, (row_ind, col_ind)), [shape=(M, N)])
where ``data``, ``row_ind`` and ``col_ind`` satisfy the
relationship ``a[row_ind[k], col_ind[k]] = data[k]``.
csr_matrix((data, indices, indptr), [shape=(M, N)])
is the standard CSR representation where the column indices for
row i are stored in ``indices[indptr[i]:indptr[i+1]]`` and their
corresponding values are stored in ``data[indptr[i]:indptr[i+1]]``.
If the shape parameter is not supplied, the matrix dimensions
are inferred from the index arrays.
Attributes
----------
dtype : dtype
Data type of the matrix
shape : 2-tuple
Shape of the matrix
ndim : int
Number of dimensions (this is always 2)
nnz
Number of nonzero elements
data
CSR format data array of the matrix
indices
CSR format index array of the matrix
indptr
CSR format index pointer array of the matrix
has_sorted_indices
Whether indices are sorted
Notes
-----
Sparse matrices can be used in arithmetic operations: they support
addition, subtraction, multiplication, division, and matrix power.
Advantages of the CSR format
- efficient arithmetic operations CSR + CSR, CSR * CSR, etc.
- efficient row slicing
- fast matrix vector products
Disadvantages of the CSR format
- slow column slicing operations (consider CSC)
- changes to the sparsity structure are expensive (consider LIL or DOK)
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csr_matrix
>>> csr_matrix((3, 4), dtype=np.int8).toarray()
array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]], dtype=int8)
>>> row = np.array([0, 0, 1, 2, 2, 2])
>>> col = np.array([0, 2, 2, 0, 1, 2])
>>> data = np.array([1, 2, 3, 4, 5, 6])
>>> csr_matrix((data, (row, col)), shape=(3, 3)).toarray()
array([[1, 0, 2],
[0, 0, 3],
[4, 5, 6]])
>>> indptr = np.array([0, 2, 3, 6])
>>> indices = np.array([0, 2, 2, 0, 1, 2])
>>> data = np.array([1, 2, 3, 4, 5, 6])
>>> csr_matrix((data, indices, indptr), shape=(3, 3)).toarray()
array([[1, 0, 2],
[0, 0, 3],
[4, 5, 6]])
As an example of how to construct a CSR matrix incrementally,
the following snippet builds a term-document matrix from texts:
>>> docs = [["hello", "world", "hello"], ["goodbye", "cruel", "world"]]
>>> indptr = [0]
>>> indices = []
>>> data = []
>>> vocabulary = {}
>>> for d in docs:
... for term in d:
... index = vocabulary.setdefault(term, len(vocabulary))
... indices.append(index)
... data.append(1)
... indptr.append(len(indices))
...
>>> csr_matrix((data, indices, indptr), dtype=int).toarray()
array([[2, 1, 0, 0],
[0, 1, 1, 1]])
"""
def transpose(self, copy=False):
from .csc import csc_matrix
M,N = self.shape
return csc_matrix((self.data,self.indices,self.indptr), shape=(N,M), copy=copy)
def tolil(self):
from .lil import lil_matrix
lil = lil_matrix(self.shape,dtype=self.dtype)
self.sort_indices() # lil_matrix needs sorted column indices
ptr,ind,dat = self.indptr,self.indices,self.data
rows, data = lil.rows, lil.data
for n in xrange(self.shape[0]):
start = ptr[n]
end = ptr[n+1]
rows[n] = ind[start:end].tolist()
data[n] = dat[start:end].tolist()
return lil
def tocsr(self, copy=False):
if copy:
return self.copy()
else:
return self
def tocsc(self):
idx_dtype = get_index_dtype((self.indptr, self.indices),
maxval=max(self.nnz, self.shape[0]))
indptr = np.empty(self.shape[1] + 1, dtype=idx_dtype)
indices = np.empty(self.nnz, dtype=idx_dtype)
data = np.empty(self.nnz, dtype=upcast(self.dtype))
csr_tocsc(self.shape[0], self.shape[1],
self.indptr.astype(idx_dtype),
self.indices.astype(idx_dtype),
self.data,
indptr,
indices,
data)
from .csc import csc_matrix
A = csc_matrix((data, indices, indptr), shape=self.shape)
A.has_sorted_indices = True
return A
def tobsr(self, blocksize=None, copy=True):
from .bsr import bsr_matrix
if blocksize is None:
from .spfuncs import estimate_blocksize
return self.tobsr(blocksize=estimate_blocksize(self))
elif blocksize == (1,1):
arg1 = (self.data.reshape(-1,1,1),self.indices,self.indptr)
return bsr_matrix(arg1, shape=self.shape, copy=copy)
else:
R,C = blocksize
M,N = self.shape
if R < 1 or C < 1 or M % R != 0 or N % C != 0:
raise ValueError('invalid blocksize %s' % blocksize)
blks = csr_count_blocks(M,N,R,C,self.indptr,self.indices)
idx_dtype = get_index_dtype((self.indptr, self.indices),
maxval=max(N//C, blks))
indptr = np.empty(M//R+1, dtype=idx_dtype)
indices = np.empty(blks, dtype=idx_dtype)
data = np.zeros((blks,R,C), dtype=self.dtype)
csr_tobsr(M, N, R, C,
self.indptr.astype(idx_dtype),
self.indices.astype(idx_dtype),
self.data,
indptr, indices, data.ravel())
return bsr_matrix((data,indices,indptr), shape=self.shape)
# these functions are used by the parent class (_cs_matrix)
# to remove redudancy between csc_matrix and csr_matrix
def _swap(self,x):
"""swap the members of x if this is a column-oriented matrix
"""
return (x[0],x[1])
def __getitem__(self, key):
def asindices(x):
try:
x = np.asarray(x)
# Check index contents, to avoid creating 64-bit arrays needlessly
idx_dtype = get_index_dtype((x,), check_contents=True)
if idx_dtype != x.dtype:
x = x.astype(idx_dtype)
except:
raise IndexError('invalid index')
else:
return x
def check_bounds(indices, N):
if indices.size == 0:
return (0, 0)
max_indx = indices.max()
if max_indx >= N:
raise IndexError('index (%d) out of range' % max_indx)
min_indx = indices.min()
if min_indx < -N:
raise IndexError('index (%d) out of range' % (N + min_indx))
return (min_indx,max_indx)
def extractor(indices,N):
"""Return a sparse matrix P so that P*self implements
slicing of the form self[[1,2,3],:]
"""
indices = asindices(indices)
(min_indx,max_indx) = check_bounds(indices,N)
if min_indx < 0:
indices = indices.copy()
indices[indices < 0] += N
indptr = np.arange(len(indices)+1, dtype=indices.dtype)
data = np.ones(len(indices), dtype=self.dtype)
shape = (len(indices),N)
return csr_matrix((data,indices,indptr), shape=shape)
row, col = self._unpack_index(key)
# First attempt to use original row optimized methods
# [1, ?]
if isintlike(row):
# [i, j]
if isintlike(col):
return self._get_single_element(row, col)
# [i, 1:2]
elif isinstance(col, slice):
return self._get_row_slice(row, col)
# [i, [1, 2]]
elif issequence(col):
P = extractor(col,self.shape[1]).T
return self[row, :] * P
elif isinstance(row, slice):
# [1:2,??]
if ((isintlike(col) and row.step in (1, None)) or
(isinstance(col, slice) and
col.step in (1, None) and
row.step in (1, None))):
# col is int or slice with step 1, row is slice with step 1.
return self._get_submatrix(row, col)
elif issequence(col):
P = extractor(col,self.shape[1]).T # [1:2,[1,2]]
# row is slice, col is sequence.
return self[row,:]*P
elif issequence(row):
# [[1,2],??]
if isintlike(col) or isinstance(col,slice):
P = extractor(row, self.shape[0]) # [[1,2],j] or [[1,2],1:2]
return (P*self)[:,col]
if not (issequence(col) and issequence(row)):
# Sample elementwise
row, col = self._index_to_arrays(row, col)
row = asindices(row)
col = asindices(col)
if row.shape != col.shape:
raise IndexError('number of row and column indices differ')
assert row.ndim <= 2
num_samples = np.size(row)
if num_samples == 0:
return csr_matrix(np.atleast_2d(row).shape, dtype=self.dtype)
check_bounds(row, self.shape[0])
check_bounds(col, self.shape[1])
val = np.empty(num_samples, dtype=self.dtype)
csr_sample_values(self.shape[0], self.shape[1],
self.indptr, self.indices, self.data,
num_samples, row.ravel(), col.ravel(), val)
if row.ndim == 1:
# row and col are 1d
return np.asmatrix(val)
return self.__class__(val.reshape(row.shape))
def getrow(self, i):
"""Returns a copy of row i of the matrix, as a (1 x n)
CSR matrix (row vector).
"""
return self._get_submatrix(i, slice(None))
def getcol(self, i):
"""Returns a copy of column i of the matrix, as a (m x 1)
CSR matrix (column vector).
"""
return self._get_submatrix(slice(None), i)
def _get_row_slice(self, i, cslice):
"""Returns a copy of row self[i, cslice]
"""
if i < 0:
i += self.shape[0]
if i < 0 or i >= self.shape[0]:
raise IndexError('index (%d) out of range' % i)
start, stop, stride = cslice.indices(self.shape[1])
if stride == 1:
# for stride == 1, _get_submatrix is ~30% faster than below
row_slice = self._get_submatrix(i, cslice)
else:
# other strides need new code
row_indices = self.indices[self.indptr[i]:self.indptr[i + 1]]
row_data = self.data[self.indptr[i]:self.indptr[i + 1]]
if stride > 0:
ind = (row_indices >= start) & (row_indices < stop)
elif stride < 0:
ind = (row_indices <= start) & (row_indices > stop)
if abs(stride) > 1:
ind = ind & ((row_indices - start) % stride == 0)
row_indices = (row_indices[ind] - start) // stride
row_data = row_data[ind]
row_indptr = np.array([0, len(row_indices)])
if stride < 0:
row_data = row_data[::-1]
row_indices = abs(row_indices[::-1])
shape = (1, int(np.ceil(float(stop - start) / stride)))
row_slice = csr_matrix((row_data, row_indices, row_indptr),
shape=shape)
return row_slice
def _get_submatrix(self, row_slice, col_slice):
"""Return a submatrix of this matrix (new matrix is created)."""
M,N = self.shape
def process_slice(sl, num):
if isinstance(sl, slice):
if sl.step not in (1, None):
raise ValueError('slicing with step != 1 not supported')
i0, i1 = sl.start, sl.stop
if i0 is None:
i0 = 0
elif i0 < 0:
i0 = num + i0
if i1 is None:
i1 = num
elif i1 < 0:
i1 = num + i1
return i0, i1
elif isintlike(sl):
if sl < 0:
sl += num
return sl, sl + 1
else:
raise TypeError('expected slice or scalar')
def check_bounds(i0, i1, num):
if not (0 <= i0 <= num) or not (0 <= i1 <= num) or not (i0 <= i1):
raise IndexError(
"index out of bounds: 0 <= %d <= %d, 0 <= %d <= %d,"
" %d <= %d" % (i0, num, i1, num, i0, i1))
i0, i1 = process_slice(row_slice, M)
j0, j1 = process_slice(col_slice, N)
check_bounds(i0, i1, M)
check_bounds(j0, j1, N)
indptr, indices, data = get_csr_submatrix(M, N,
self.indptr, self.indices, self.data,
int(i0), int(i1), int(j0), int(j1))
shape = (i1 - i0, j1 - j0)
return self.__class__((data,indices,indptr), shape=shape)
def isspmatrix_csr(x):
return isinstance(x, csr_matrix)
|
|
# Copyright 2010 Google Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import errno
import httplib
import os
import re
import socket
import time
import boto
from boto import config, storage_uri_for_key
from boto.connection import AWSAuthConnection
from boto.exception import ResumableDownloadException
from boto.exception import ResumableTransferDisposition
"""
Resumable download handler.
Resumable downloads will retry failed downloads, resuming at the byte count
completed by the last download attempt. If too many retries happen with no
progress (per configurable num_retries param), the download will be aborted.
The caller can optionally specify a tracker_file_name param in the
ResumableDownloadHandler constructor. If you do this, that file will
save the state needed to allow retrying later, in a separate process
(e.g., in a later run of gsutil).
Note that resumable downloads work across providers (they depend only
on support Range GETs), but this code is in the boto.s3 package
because it is the wrong abstraction level to go in the top-level boto
package.
TODO: At some point we should refactor the code to have a storage_service
package where all these provider-independent files go.
"""
class ByteTranslatingCallbackHandler(object):
"""
Proxy class that translates progress callbacks made by
boto.s3.Key.get_file(), taking into account that we're resuming
a download.
"""
def __init__(self, proxied_cb, download_start_point):
self.proxied_cb = proxied_cb
self.download_start_point = download_start_point
def call(self, total_bytes_uploaded, total_size):
self.proxied_cb(self.download_start_point + total_bytes_uploaded,
total_size)
def get_cur_file_size(fp, position_to_eof=False):
"""
Returns size of file, optionally leaving fp positioned at EOF.
"""
if not position_to_eof:
cur_pos = fp.tell()
fp.seek(0, os.SEEK_END)
cur_file_size = fp.tell()
if not position_to_eof:
fp.seek(cur_pos, os.SEEK_SET)
return cur_file_size
class ResumableDownloadHandler(object):
"""
Handler for resumable downloads.
"""
ETAG_REGEX = '([a-z0-9]{32})\n'
RETRYABLE_EXCEPTIONS = (httplib.HTTPException, IOError, socket.error,
socket.gaierror)
def __init__(self, tracker_file_name=None, num_retries=None):
"""
Constructor. Instantiate once for each downloaded file.
:type tracker_file_name: string
:param tracker_file_name: optional file name to save tracking info
about this download. If supplied and the current process fails
the download, it can be retried in a new process. If called
with an existing file containing an unexpired timestamp,
we'll resume the transfer for this file; else we'll start a
new resumable download.
:type num_retries: int
:param num_retries: the number of times we'll re-try a resumable
download making no progress. (Count resets every time we get
progress, so download can span many more than this number of
retries.)
"""
self.tracker_file_name = tracker_file_name
self.num_retries = num_retries
self.etag_value_for_current_download = None
if tracker_file_name:
self._load_tracker_file_etag()
# Save download_start_point in instance state so caller can
# find how much was transferred by this ResumableDownloadHandler
# (across retries).
self.download_start_point = None
def _load_tracker_file_etag(self):
f = None
try:
f = open(self.tracker_file_name, 'r')
etag_line = f.readline()
m = re.search(self.ETAG_REGEX, etag_line)
if m:
self.etag_value_for_current_download = m.group(1)
else:
print('Couldn\'t read etag in tracker file (%s). Restarting '
'download from scratch.' % self.tracker_file_name)
except IOError, e:
# Ignore non-existent file (happens first time a download
# is attempted on an object), but warn user for other errors.
if e.errno != errno.ENOENT:
# Will restart because
# self.etag_value_for_current_download == None.
print('Couldn\'t read URI tracker file (%s): %s. Restarting '
'download from scratch.' %
(self.tracker_file_name, e.strerror))
finally:
if f:
f.close()
def _save_tracker_info(self, key):
self.etag_value_for_current_download = key.etag.strip('"\'')
if not self.tracker_file_name:
return
f = None
try:
f = open(self.tracker_file_name, 'w')
f.write('%s\n' % self.etag_value_for_current_download)
except IOError, e:
raise ResumableDownloadException(
'Couldn\'t write tracker file (%s): %s.\nThis can happen'
'if you\'re using an incorrectly configured download tool\n'
'(e.g., gsutil configured to save tracker files to an '
'unwritable directory)' %
(self.tracker_file_name, e.strerror),
ResumableTransferDisposition.ABORT)
finally:
if f:
f.close()
def _remove_tracker_file(self):
if (self.tracker_file_name and
os.path.exists(self.tracker_file_name)):
os.unlink(self.tracker_file_name)
def _attempt_resumable_download(self, key, fp, headers, cb, num_cb,
torrent, version_id):
"""
Attempts a resumable download.
Raises ResumableDownloadException if any problems occur.
"""
cur_file_size = get_cur_file_size(fp, position_to_eof=True)
if (cur_file_size and
self.etag_value_for_current_download and
self.etag_value_for_current_download == key.etag.strip('"\'')):
# Try to resume existing transfer.
if cur_file_size > key.size:
raise ResumableDownloadException(
'%s is larger (%d) than %s (%d).\nDeleting tracker file, so '
'if you re-try this download it will start from scratch' %
(fp.name, cur_file_size, str(storage_uri_for_key(key)),
key.size), ResumableTransferDisposition.ABORT)
elif cur_file_size == key.size:
if key.bucket.connection.debug >= 1:
print 'Download complete.'
return
if key.bucket.connection.debug >= 1:
print 'Resuming download.'
headers = headers.copy()
headers['Range'] = 'bytes=%d-%d' % (cur_file_size, key.size - 1)
cb = ByteTranslatingCallbackHandler(cb, cur_file_size).call
self.download_start_point = cur_file_size
else:
if key.bucket.connection.debug >= 1:
print 'Starting new resumable download.'
self._save_tracker_info(key)
self.download_start_point = 0
# Truncate the file, in case a new resumable download is being
# started atop an existing file.
fp.truncate(0)
# Disable AWSAuthConnection-level retry behavior, since that would
# cause downloads to restart from scratch.
key.get_file(fp, headers, cb, num_cb, torrent, version_id,
override_num_retries=0)
fp.flush()
def _check_final_md5(self, key, file_name):
"""
Checks that etag from server agrees with md5 computed after the
download completes. This is important, since the download could
have spanned a number of hours and multiple processes (e.g.,
gsutil runs), and the user could change some of the file and not
realize they have inconsistent data.
"""
fp = open(file_name, 'r')
if key.bucket.connection.debug >= 1:
print 'Checking md5 against etag.'
hex_md5 = key.compute_md5(fp)[0]
if hex_md5 != key.etag.strip('"\''):
file_name = fp.name
fp.close()
os.unlink(file_name)
raise ResumableDownloadException(
'File changed during download: md5 signature doesn\'t match '
'etag (incorrect downloaded file deleted)',
ResumableTransferDisposition.ABORT)
def get_file(self, key, fp, headers, cb=None, num_cb=10, torrent=False,
version_id=None):
"""
Retrieves a file from a Key
:type key: :class:`boto.s3.key.Key` or subclass
:param key: The Key object from which upload is to be downloaded
:type fp: file
:param fp: File pointer into which data should be downloaded
:type headers: string
:param: headers to send when retrieving the files
:type cb: function
:param cb: (optional) a callback function that will be called to report
progress on the download. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted from the storage service and
the second representing the total number of bytes that need
to be transmitted.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the cb
parameter this parameter determines the granularity of the callback
by defining the maximum number of times the callback will be
called during the file transfer.
:type torrent: bool
:param torrent: Flag for whether to get a torrent for the file
:type version_id: string
:param version_id: The version ID (optional)
Raises ResumableDownloadException if a problem occurs during
the transfer.
"""
debug = key.bucket.connection.debug
if not headers:
headers = {}
# Use num-retries from constructor if one was provided; else check
# for a value specified in the boto config file; else default to 5.
if self.num_retries is None:
self.num_retries = config.getint('Boto', 'num_retries', 5)
progress_less_iterations = 0
while True: # Retry as long as we're making progress.
had_file_bytes_before_attempt = get_cur_file_size(fp)
try:
self._attempt_resumable_download(key, fp, headers, cb, num_cb,
torrent, version_id)
# Download succceded, so remove the tracker file (if have one).
self._remove_tracker_file()
self._check_final_md5(key, fp.name)
if debug >= 1:
print 'Resumable download complete.'
return
except self.RETRYABLE_EXCEPTIONS, e:
if debug >= 1:
print('Caught exception (%s)' % e.__repr__())
if isinstance(e, IOError) and e.errno == errno.EPIPE:
# Broken pipe error causes httplib to immediately
# close the socket (http://bugs.python.org/issue5542),
# so we need to close and reopen the key before resuming
# the download.
key.get_file(fp, headers, cb, num_cb, torrent, version_id,
override_num_retries=0)
except ResumableDownloadException, e:
if (e.disposition ==
ResumableTransferDisposition.ABORT_CUR_PROCESS):
if debug >= 1:
print('Caught non-retryable ResumableDownloadException '
'(%s)' % e.message)
raise
elif (e.disposition ==
ResumableTransferDisposition.ABORT):
if debug >= 1:
print('Caught non-retryable ResumableDownloadException '
'(%s); aborting and removing tracker file' %
e.message)
self._remove_tracker_file()
raise
else:
if debug >= 1:
print('Caught ResumableDownloadException (%s) - will '
'retry' % e.message)
# At this point we had a re-tryable failure; see if made progress.
if get_cur_file_size(fp) > had_file_bytes_before_attempt:
progress_less_iterations = 0
else:
progress_less_iterations += 1
if progress_less_iterations > self.num_retries:
# Don't retry any longer in the current process.
raise ResumableDownloadException(
'Too many resumable download attempts failed without '
'progress. You might try this download again later',
ResumableTransferDisposition.ABORT_CUR_PROCESS)
# Close the key, in case a previous download died partway
# through and left data in the underlying key HTTP buffer.
# Do this within a try/except block in case the connection is
# closed (since key.close() attempts to do a final read, in which
# case this read attempt would get an IncompleteRead exception,
# which we can safely ignore.
try:
key.close()
except httplib.IncompleteRead:
pass
sleep_time_secs = 2**progress_less_iterations
if debug >= 1:
print('Got retryable failure (%d progress-less in a row).\n'
'Sleeping %d seconds before re-trying' %
(progress_less_iterations, sleep_time_secs))
time.sleep(sleep_time_secs)
|
|
import logging
log = logging.getLogger(__name__)
import lmfit
import numpy as np
from scipy import optimize
from collections import OrderedDict
from pycqed.analysis import fitting_models as fit_mods
from pycqed.analysis_v3 import fitting as fit_mod
from pycqed.analysis_v3 import plotting as plot_mod
from pycqed.analysis_v3 import helper_functions as hlp_mod
from pycqed.analysis_v3 import processing_pipeline as pp_mod
from pycqed.measurement import sweep_points as sp_mod
from pycqed.measurement.calibration import calibration_points as cp_mod
from copy import deepcopy
import sys
pp_mod.search_modules.add(sys.modules[__name__])
# Create pipelines
def pipeline_single_qubit_rb_ssro(meas_obj_names, mospm, sweep_points,
n_shots, dim_hilbert, cal_points=None,
ro_thresholds=None, nreps=1,
plot_all_shots=False, sweep_type=None,
processing_pipeline=None):
"""
Wrapper to create the standard processing pipeline for an single qubit RB
measurement, measured in SSRO.
WARNING: if you use plot_all_shots=True, disable data saving. It will try
to save a huge string of the large numpy array this node will generate.
:param meas_obj_names: list of measured object names
:param mospm: meas_obj_sweep_points_map
:param sweep_points: SweepPoints object (of one file if the measurement
was split into several files)
:param n_shots: number of shots
:param dim_hilbert: dimension of Hilebert space. 4 for 2QB RB, 2 for 1QB RB
:param cal_points: CalibrationPoints object
:param ro_thresholds: optional (the threshold_data node can also extract
them from the data_dict. See docstring there).
Dict with meas_obj_names as keys and their readout thresholds as values.
:param nreps: int specifying the number of files to combine into one
measurement. IMPORTANT! This feature only works if the measurement was
split by seeds, not by cliffords. Meaning that each measurement file
contains data for all the Cliffords in sweep_points, but for a subset
of the total seeds.
:param plot_all_shots: bool specifying whether to produce a raw plot of
of all the shots vs cliffords. SEE WARNING ABOVE.
:param sweep_type: dict of the form
{'cliffords': sweep_dim, 'seeds': sweep_dim} where sweep_dim is either
0 or 1 and specifies whether the measurement was run with seeds in the
fast dimension (0) and cliffords in the slow dimensino (1), or the other
way around.
:param processing_pipeline: ProcessingPipeline instance to which this
function will append.
:return: the unresolved ProcessingPipeline
"""
if sweep_type is None:
sweep_type = {'cliffords': 0, 'seeds': 1}
slow_cliffords = sweep_type['cliffords'] == 1
sweep_points = sp_mod.SweepPoints(sweep_points)
if cal_points is None:
num_cal_states = 0
else:
if isinstance(cal_points, str):
cal_points = cp_mod.CalibrationPoints.from_string(cal_points)
num_cal_states = len(cal_points.states)
if slow_cliffords:
# n_segments = nr_seeds + nr_cal_segments
n_segments = nreps*(sweep_points.length(sweep_type['seeds']) +
num_cal_states)
# n_sequences = nr_cliffords
n_sequences = sweep_points.length(sweep_type['cliffords'])
else:
# n_segments = nr_cliffords + nr_cal_segments
n_segments = nreps*(sweep_points.length(sweep_type['cliffords']) +
num_cal_states)
# n_sequences = nr_seeds
n_sequences = sweep_points.length(sweep_type['seeds'])
if processing_pipeline is None:
processing_pipeline = pp_mod.ProcessingPipeline()
if nreps > 1:
processing_pipeline.add_node('combine_datafiles_split_by_seeds',
keys_in='raw',
n_shots=n_shots,
meas_obj_names=meas_obj_names)
keys_in = 'previous combine_datafiles_split_by_seeds' if nreps > 1 else 'raw'
processing_pipeline.add_node('threshold_data',
keys_in=keys_in,
ro_thresholds=ro_thresholds,
meas_obj_names=meas_obj_names)
processing_pipeline.add_node('average_data',
# shape=(n_shots, n_segments*n_sequences),
# averaging_axis=0,
shape=(n_sequences, n_shots, n_segments),
averaging_axis=1,
keys_in='previous threshold_data',
meas_obj_names=meas_obj_names)
for label in ['rb']:
pp = pp_mod.ProcessingPipeline(keys_out_container=label)
pp.add_node('average_data',
shape=(n_sequences, n_segments),
averaging_axis=-1 if slow_cliffords else 0,
keys_in='previous average_data',
meas_obj_names=meas_obj_names)
pp.add_node('get_std_deviation',
shape=(n_sequences, n_segments) ,
averaging_axis=-1 if slow_cliffords else 0,
keys_in='previous average_data',
meas_obj_names=meas_obj_names)
pp.add_node('rb_analysis',
d=dim_hilbert,
sweep_type=sweep_type,
keys_in=f'previous {label}.average_data',
keys_in_std=f'previous {label}.get_std_deviation',
keys_in_all_seeds_data='previous average_data',
do_plotting=False,
keys_out=None,
meas_obj_names=meas_obj_names)
for mobjn in meas_obj_names:
cliffords = sweep_points.get_sweep_params_property(
'values', sweep_type['cliffords'], mospm[mobjn][
sweep_type['cliffords']])
if plot_all_shots:
pp.add_node('prepare_1d_raw_data_plot_dicts',
sp_name=mospm[mobjn][sweep_type['cliffords']],
xvals=np.repeat(cliffords, n_segments*n_shots
if slow_cliffords else n_sequences*n_shots),
do_plotting=False,
figname_suffix=f'shots_{label}',
title_suffix=' - All shots',
plot_params={'linestyle': 'none'},
keys_in=keys_in,
keys_out=None,
meas_obj_names=mobjn)
if slow_cliffords:
xvals = np.repeat(cliffords, n_segments)
else:
xvals = np.tile(cliffords, n_sequences)
pp.add_node('prepare_1d_raw_data_plot_dicts',
sp_name=mospm[mobjn][sweep_type['cliffords']],
xvals=xvals,
do_plotting=False,
figname_suffix=f'{label}',
title_suffix=' - All seeds',
plot_params={'linestyle': 'none'},
ylabel='Probability, $P(|e\\rangle)$',
yunit='',
keys_in='previous average_data',
keys_out=None,
meas_obj_names=mobjn)
processing_pipeline += pp
# do plotting of all plot_dicts in the data_dict
processing_pipeline.add_node('plot')
return processing_pipeline
def pipeline_interleaved_rb_irb_classif(meas_obj_names, mospm, sweep_points,
dim_hilbert, cal_points=None, nreps=1,
sweep_type=None,
processing_pipeline=None):
"""
Wrapper to create the standard processing pipeline for an interleaved RB/RIB
measurement, measured with a the classifier detector with qutrit readout
:param meas_obj_names: list of measured object names
:param mospm: meas_obj_sweep_points_map
:param sweep_points: SweepPoints object (of one file if the measurement
was split into several files)
:param dim_hilbert: dimension of Hilebert space. 4 for 2QB RB, 2 for 1QB RB
:param cal_points: CalibrationPoints object
:param nreps: int specifying the number of files to combine into one
measurement. IMPORTANT! This feature only works if the measurement was
split by seeds, not by cliffords. Meaning that each measurement file
contains data for all the Cliffords in sweep_points, but for a subset
of the total seeds.
:param sweep_type: dict of the form
{'cliffords': sweep_dim, 'seeds': sweep_dim} where sweep_dim is either
0 or 1 and specifies whether the measurement was run with seeds in the
fast dimension (0) and cliffords in the slow dimensino (1), or the other
way around.
:param processing_pipeline: ProcessingPipeline instance to which this
function will append.
:return: the unresolved ProcessingPipeline
"""
if sweep_type is None:
sweep_type = {'cliffords': 0, 'seeds': 1}
slow_cliffords = sweep_type['cliffords'] == 1
sweep_points = sp_mod.SweepPoints(sweep_points)
if cal_points is None:
num_cal_states = 0
else:
if isinstance(cal_points, str):
cal_points = cp_mod.CalibrationPoints.from_string(cal_points)
num_cal_states = len(cal_points.states)
if slow_cliffords:
# n_segments = nr_seeds + nr_cal_segments
n_segments = nreps*(sweep_points.length(sweep_type['seeds'])
+ num_cal_states)
# n_sequences = nr_cliffords
n_sequences = sweep_points.length(sweep_type['cliffords'])
else:
# n_segments = nr_cliffords + nr_cal_segments
n_segments = nreps*(sweep_points.length(sweep_type['cliffords'])
+ num_cal_states)
# n_sequences = nr_seeds
n_sequences = sweep_points.length(sweep_type['seeds'])
if processing_pipeline is None:
processing_pipeline = pp_mod.ProcessingPipeline()
if nreps > 1:
processing_pipeline.add_node('combine_datafiles_split_by_seeds',
keys_in='raw',
interleaved_irb=True,
sweep_type=sweep_type,
meas_obj_names=meas_obj_names)
for label in ['rb', 'irb']:
pp = pp_mod.ProcessingPipeline(global_keys_out_container=label)
keys_in = 'previous combine_datafiles_split_by_seeds' if \
nreps > 1 else 'raw'
pp.add_node('submsmt_data_from_interleaved_msmt', msmt_name=label,
keys_in=keys_in, meas_obj_names=meas_obj_names)
pp.add_node('average_data',
shape=(n_sequences, n_segments),
averaging_axis=-1 if slow_cliffords else 0,
keys_in=f'previous {label}.submsmt_'
f'data_from_interleaved_msmt',
meas_obj_names=meas_obj_names)
pp.add_node('get_std_deviation',
shape=(n_sequences, n_segments),
averaging_axis=-1 if slow_cliffords else 0,
keys_in=f'previous {label}.submsmt_'
f'data_from_interleaved_msmt',
meas_obj_names=meas_obj_names)
pp.add_node('rb_analysis',
d=dim_hilbert,
keys_in=f'previous {label}.average_data',
keys_in_std=f'previous {label}.get_std_deviation',
keys_in_all_seeds_data=f'previous {label}.submsmt_'
f'data_from_interleaved_msmt',
do_plotting=False,
keys_out=None,
meas_obj_names=meas_obj_names)
for mobjn in meas_obj_names:
cliffords = sweep_points.get_sweep_params_property(
'values', sweep_type['cliffords'], mospm[mobjn][
sweep_type['cliffords']])
pp.add_node('prepare_1d_raw_data_plot_dicts',
sp_name=mospm[mobjn][-1],
xvals=np.repeat(cliffords, n_segments),
do_plotting=False,
figname_suffix=f'{label}',
title_suffix=' - All seeds',
plot_params={'linestyle': 'none'},
ylabel='Probability, $P(|ee\\rangle)$' if
mobjn=='correlation_object' else None,
yunit='',
keys_in=f'previous {label}.submsmt_'
f'data_from_interleaved_msmt',
keys_out=None,
meas_obj_names=mobjn)
processing_pipeline += pp
# calculate interleaved gate error
processing_pipeline.add_node('irb_gate_error',
meas_obj_names='correlation_object',
d=dim_hilbert)
# do plotting of all plot_dicts in the data_dict
processing_pipeline.add_node('plot')
return processing_pipeline
def pipeline_ssro_measurement(meas_obj_names, mospm, sweep_points, n_shots,
dim_hilbert, ro_thresholds=None,
nreps=1, interleaved_irb=False, sweep_type=None,
plot_all_shots=False, processing_pipeline=None,
compression_factor=1, **params):
"""
Wrapper to create the standard processing pipeline for an interleaved RB/RIB
measurement, measured in SSRO.
WARNING: if you use plot_all_shots=True, disable data saving. It will try
to save a huge string of the large numpy array this node will generate.
:param meas_obj_names: list of measured object names
:param mospm: meas_obj_sweep_points_map
:param sweep_points: SweepPoints object (of one file if the measurement
was split into several files)
:param n_shots: number of shots
:param dim_hilbert: dimension of Hilebert space. 4 for 2QB RB, 2 for 1QB RB
:param cal_points: CalibrationPoints object
:param ro_thresholds: optional (the threshold_data node can also extract
them from the data_dict. See docstring there).
Dict with meas_obj_names as keys and their readout thresholds as values.
:param nreps: int specifying the number of files to combine into one
measurement. IMPORTANT! This feature only works if the measurement was
split by seeds, not by cliffords. Meaning that each measurement file
contains data for all the Cliffords in sweep_points, but for a subset
of the total seeds.
:param interleaved_irb: bool specifying whether the measurement was
IRB with RB and IRB interleaved.
:param plot_all_shots: bool specifying whether to produce a raw plot of
of all the shots vs cliffords. SEE WARNING ABOVE.
:param sweep_type: dict of the form
{'cliffords': sweep_dim, 'seeds': sweep_dim} where sweep_dim is either
0 or 1 and specifies whether the measurement was run with seeds in the
fast dimension (0) and cliffords in the slow dimensino (1), or the other
way around.
:param compression_factor: sequence compression factor
:param processing_pipeline: ProcessingPipeline instance to which this
function will append.
:return: the unresolved ProcessingPipeline
"""
if sweep_type is None:
sweep_type = {'cliffords': 0, 'seeds': 1}
slow_cliffords = sweep_type['cliffords'] == 1
nr_swpts0 = sweep_points.length(0)
nr_swpts1 = sweep_points.length(1)
n_segments = nr_swpts0 * compression_factor
n_sequences = (nr_swpts1 * (interleaved_irb + 1)) // compression_factor
if processing_pipeline is None:
processing_pipeline = pp_mod.ProcessingPipeline()
if nreps > 1:
processing_pipeline.add_node('combine_datafiles_split_by_seeds',
keys_in='raw',
n_shots=n_shots,
sweep_type=sweep_type,
interleaved_irb=interleaved_irb,
meas_obj_names=meas_obj_names)
keys_in = 'previous combine_datafiles_split_by_seeds' if nreps > 1 else 'raw'
processing_pipeline.add_node('threshold_data',
keys_in=keys_in,
ro_thresholds=ro_thresholds,
meas_obj_names=meas_obj_names)
processing_pipeline.add_node('average_data',
shape=(n_sequences, n_shots, n_segments),
final_shape=(n_sequences*n_segments),
averaging_axis=1,
keys_in='previous threshold_data',
meas_obj_names=meas_obj_names)
if plot_all_shots:
for mobjn in meas_obj_names:
cliffords = sweep_points.get_sweep_params_property(
'values', sweep_type['cliffords'], mospm[mobjn])[0]
keys_in = 'previous combine_datafiles_split_by_seeds' \
if nreps > 1 else 'raw'
if slow_cliffords:
xvals = np.repeat(cliffords, 2*n_segments*n_shots if
interleaved_irb else n_segments*n_shots)
else:
xvals = np.repeat(cliffords, n_sequences*n_shots)
processing_pipeline.add_node('prepare_1d_raw_data_plot_dicts',
sp_name=mospm[mobjn][-1],
xvals=xvals,
do_plotting=False,
figname_suffix=f'shots',
title_suffix=' - All shots',
plot_params={'linestyle': 'none'},
keys_in=keys_in,
keys_out=None,
meas_obj_names=mobjn)
if dim_hilbert == 4:
processing_pipeline.add_node('correlate_qubits',
keys_in='previous threshold_data',
meas_obj_names=meas_obj_names,
joint_processing=True, num_keys_out=1,
keys_out_container='correlation_object',
add_mobjn_container=False)
processing_pipeline.add_node('average_data',
shape=(n_sequences, n_shots, n_segments),
final_shape=(n_sequences*n_segments),
averaging_axis=1,
keys_in='previous correlate_qubits',
meas_obj_names=['correlation_object'])
meas_obj_names = deepcopy(meas_obj_names)
meas_obj_names += ['correlation_object']
mospm['correlation_object'] = list(mospm.values())[0]
labels = ['rb', 'irb'] if interleaved_irb else ['rb']
for label in labels:
pp = pp_mod.ProcessingPipeline(keys_out_container=label)
keys_in_0 = 'previous average_data'
if interleaved_irb:
pp.add_node('submsmt_data_from_interleaved_msmt',
msmt_name=label,
keys_in='previous average_data',
meas_obj_names=meas_obj_names)
keys_in_0 = f'previous {label}.submsmt_data_from_interleaved_msmt'
pp.add_node('average_data',
shape=(nr_swpts1, nr_swpts0),
averaging_axis=-1 if slow_cliffords else 0,
keys_in=keys_in_0,
meas_obj_names=meas_obj_names)
pp.add_node('get_std_deviation',
shape=(nr_swpts1, nr_swpts0),
averaging_axis=-1 if slow_cliffords else 0,
keys_in=keys_in_0,
meas_obj_names=meas_obj_names)
pp.add_node('rb_analysis',
d=dim_hilbert,
sweep_type=sweep_type,
msmt_type=label,
state_prob_name='e' if dim_hilbert==2 else None,
keys_in=f'previous {label}.average_data',
keys_in_std=f'previous {label}.get_std_deviation',
keys_in_all_seeds_data=keys_in_0,
do_plotting=False,
keys_out=None,
meas_obj_names=meas_obj_names)
for mobjn in meas_obj_names:
cliffords = sweep_points.get_sweep_params_property(
'values', sweep_type['cliffords'], mospm[mobjn])[0]
xvals = np.repeat(cliffords, nr_swpts0) if slow_cliffords else \
np.tile(cliffords, nr_swpts1)
pp.add_node('prepare_1d_raw_data_plot_dicts',
sp_name=mospm[mobjn][-1],
xvals=xvals,
do_plotting=False,
figname_suffix=f'{label}',
title_suffix=' - All seeds',
plot_params={'linestyle': 'none'},
ylabel='Probability, ' + ('$P(|ee\\rangle)$' if
mobjn=='correlation_object' else '$P(|e\\rangle)$'),
yunit='',
keys_in=keys_in_0,
keys_out=None,
meas_obj_names=mobjn)
processing_pipeline += pp
if interleaved_irb:
# calculate interleaved gate error
processing_pipeline.add_node(
'irb_gate_error', meas_obj_names='correlation_object' if
dim_hilbert == 4 else meas_obj_names, d=dim_hilbert)
# do plotting of all plot_dicts in the data_dict
if params.get('do_plotting', True):
processing_pipeline.add_node('plot')
return processing_pipeline
# nodes related to extracting data
def combine_datafiles_split_by_seeds(data_dict, keys_in, keys_out,
interleaved_irb=False, **params):
"""
NOT FULLY IMPLEMENTED FOR slow_cliffords == True!!!
Combines the data from an (interleaved) RB/IRB measurement that was saved in
multiple files into one data set that would look as if it had all been
taken in one measurement (one file).
:param data_dict: OrderedDict containing data to be processed and where
processed data is to be stored
:param keys_in: list of key names or dictionary keys paths in
data_dict for the data to be processed
:param keys_out: list of key names or dictionary keys paths in
data_dict for the processed data to be saved into
:param interleaved_irb: bool specifying whether the measurement was
IRB with RB and IRB interleaved.
:param params: keyword arguments:
Should contain 'exp_metadata_list', 'n_shots', 'mospm', 'rev_movnm',
'cp' if they are not in data_dict
ToDo: put n_shots info in the metadata (27.07.2020)
:return:
Assumptions:
- ASSUMES MEASUREMENT WAS SPLIT BY SEEDS NOT BY CLIFFORDS. Meaning that
each measurement file contains data for all the Cliffords in
sweep_points, but for a subset of the total seeds.
"""
assert len(keys_in) == len(keys_out)
n_shots = hlp_mod.get_param('n_shots', data_dict, default_value=1, **params)
mospm, rev_movnm, cp, mobjn = hlp_mod.get_measurement_properties(
data_dict, props_to_extract=['mospm', 'rev_movnm', 'cp', 'mobjn'],
**params)
metadata_list = hlp_mod.get_param('exp_metadata_list', data_dict,
raise_error=True, **params)
sp_list = [hlp_mod.get_param('sweep_points', mdl, raise_error=True)
for mdl in metadata_list]
sp0 = sp_mod.SweepPoints(sp_list[0])
nr_segments = sp0.length(0) + len(cp.states)
nr_uploads = sp0.length(1)
chunk = nr_segments*n_shots
data_to_proc_dict = hlp_mod.get_data_to_process(data_dict, keys_in)
for keyi, keyo in zip(keys_in, keys_out):
data = data_to_proc_dict[keyi]
if np.ndim(data) != 2:
raise ValueError(f'Data corresponding to {keyi} is not 2D.')
# take the segment_chunk * n_shots for each clifford from each row
# (corresponding to data from one data file) in data and concatenate
# them. Put all the nr_cliffords concatenations in the
# list data_combined
data_combined = [np.concatenate(
[d[m * chunk + j * nr_segments: m * chunk + (j + 1) * nr_segments]
for d in data])
for m in np.arange((interleaved_irb + 1)*nr_uploads)
for j in np.arange(n_shots)]
# concatenate all the lists in data_combined to get one complete
# array of data
data_combined = np.concatenate(data_combined)
hlp_mod.add_param(keyo, data_combined, data_dict, **params)
# update the sweep_points if they were a list
nr_sp0 = sp0.length(0)
nr_exp = len(sp_list)
sp_all_vals_list = [np.zeros(nr_exp*nr_sp0, dtype=int) for _
in range(len(sp0.get_sweep_dimension(0)))]
for i, sp in enumerate(sp_list):
sp = sp_mod.SweepPoints(sp)
sp_vals_list = sp.get_sweep_params_property('values', 0, 'all')
for j, sp_vals in enumerate(sp_vals_list):
sp_all_vals_list[j][i::nr_exp] = sp_vals
sweep_points = sp_mod.SweepPoints()
for i, sp_name in enumerate(sp0.get_sweep_dimension(0)):
sweep_points.add_sweep_parameter(
sp_name, sp_all_vals_list[i],
sp0.get_sweep_params_property('unit', 0, sp_name),
sp0.get_sweep_params_property('label', 0, sp_name))
sweep_points += [sp0.get_sweep_dimension(1)]
hlp_mod.add_param('exp_metadata.sweep_points', sweep_points,
data_dict, add_param_method='replace')
def submsmt_data_from_interleaved_msmt(data_dict, keys_in, msmt_name,
keys_out=None, sweep_type=None,
**params):
start_index = (msmt_name.lower() != 'rb')
if sweep_type is None:
sweep_type = {'cliffords': 0, 'seeds': 1}
slow_cliffords = sweep_type['cliffords'] == 1
n_shots = hlp_mod.get_param('n_shots', data_dict, default_value=1, **params)
data_to_proc_dict = hlp_mod.get_data_to_process(data_dict, keys_in)
sp, cp = hlp_mod.get_measurement_properties(
data_dict, props_to_extract=['sp', 'cp'], **params)
nr_seeds = sp.length(sweep_type['seeds']) + len(cp.states)
nr_cliffords = sp.length(sweep_type['cliffords'])
nr_segments = (nr_seeds if slow_cliffords else nr_cliffords) + len(cp.states)
nr_uploads = (nr_cliffords if slow_cliffords else nr_seeds)
if keys_out is None:
keys_out = [f'{msmt_name}_data_from_interleaved_msmt.{s}'
for s in keys_in]
for keyi, keyo in zip(keys_in, keys_out):
data = data_to_proc_dict[keyi]
if len(data) != nr_segments * (2 * nr_uploads):
raise ValueError(f'The data has the wrong size of {len(data)}, '
f'which is not expected for {nr_segments} '
f'segments and {nr_uploads} uploads.')
selected_data = np.concatenate([
data[j*nr_segments*n_shots:(j+1)*nr_segments*n_shots]
for j in np.arange(2*nr_uploads)[start_index::2]])
hlp_mod.add_param(
keyo, selected_data, data_dict, **params)
def rb_analysis(data_dict, keys_in, sweep_type=None, **params):
"""
Does single qubit RB analysis. Prepares fits and plots, and extracts
errors per clifford.
:param data_dict: OrderedDict containing data to be processed and where
processed data is to be stored
:param keys_in: list of key names or dictionary keys paths in
data_dict for the data to be processed
Assumptions:
- cal_points, sweep_points, qb_sweep_points_map, qb_name exist in
metadata or params
- expects a 2d sweep with nr_seeds on innermost sweep and cliffords
on outermost
- if active reset was used, 'filter' must be in the key names of the
filtered data if you want the filtered raw data to be plotted
"""
data_to_proc_dict = hlp_mod.get_data_to_process(data_dict, keys_in)
keys_in = list(data_to_proc_dict)
prep_fit_dicts = hlp_mod.pop_param('prep_fit_dicts', data_dict,
default_value=True, node_params=params)
do_fitting = hlp_mod.pop_param('do_fitting', data_dict,
default_value=True, node_params=params)
prepare_plotting = hlp_mod.pop_param('prepare_plotting', data_dict,
default_value=True, node_params=params)
do_plotting = hlp_mod.pop_param('do_plotting', data_dict,
default_value=True, node_params=params)
sp, mospm, mobjn = hlp_mod.get_measurement_properties(
data_dict, props_to_extract=['sp', 'mospm', 'mobjn'], **params)
if sweep_type is None:
sweep_type = {'cliffords': 0, 'seeds': 1}
nr_seeds = sp.length(sweep_type['seeds'])
if len(data_dict['timestamps']) > 1:
nr_seeds *= len(data_dict['timestamps'])
cliffords = sp.get_sweep_params_property('values', sweep_type['cliffords'],
mospm[mobjn])[0]
# prepare fitting
if prep_fit_dicts:
prepare_rb_fitting(data_dict, data_to_proc_dict, cliffords, nr_seeds,
**params)
if do_fitting:
getattr(fit_mod, 'run_fitting')(data_dict, keys_in=list(
data_dict['fit_dicts']),**params)
# extract EPC, leakage, and seepage from fits and save to
# data_dict[meas_obj_name]
analyze_rb_fit_results(data_dict, keys_in, **params)
# prepare plots
if prepare_plotting:
prepare_rb_plots(data_dict, keys_in, sweep_type, **params)
if do_plotting:
getattr(plot_mod, 'plot')(data_dict, keys_in=list(
data_dict['plot_dicts']), **params)
def prepare_rb_fitting(data_dict, data_to_proc_dict, cliffords, nr_seeds,
**params):
cp, mobjn = hlp_mod.get_measurement_properties(
data_dict, props_to_extract=['cp', 'mobjn'], **params)
conf_level = hlp_mod.get_param('conf_level', data_dict,
default_value=0.68, **params)
do_simple_fit = hlp_mod.get_param(
'do_simple_fit', data_dict, default_value=True, **params)
d = hlp_mod.get_param('d', data_dict, raise_error=True, **params)
print('d: ', d)
guess_pars = {'A': {'value': 0.5, 'min': -1, 'max': 1},
'p': {'value': 0.99, 'min': 0, 'max': 1},
'B': {'value': 0.05, 'min': -1, 'max': 1}}
fit_guess_params = hlp_mod.get_param('fit_guess_params', data_dict,
default_value={}, **params)
guess_pars.update(fit_guess_params)
fit_dicts = OrderedDict()
rb_mod = lmfit.Model(fit_mods.RandomizedBenchmarkingDecay)
rb_mod.set_param_hint('Amplitude', **guess_pars['A'])
rb_mod.set_param_hint('p', **guess_pars['p'])
rb_mod.set_param_hint('offset', **guess_pars['B'])
rb_mod.set_param_hint('fidelity_per_Clifford',
expr=f'1-(({d}-1)*(1-p)/{d})')
rb_mod.set_param_hint('error_per_Clifford',
expr='1-fidelity_per_Clifford')
gate_decomp = hlp_mod.get_param('gate_decomp', data_dict,
default_value='HZ', **params)
if gate_decomp == 'XY':
rb_mod.set_param_hint('fidelity_per_gate',
expr='fidelity_per_Clifford**(1./1.875)')
elif gate_decomp == 'HZ':
rb_mod.set_param_hint('fidelity_per_gate',
expr='fidelity_per_Clifford**(1./1.125)')
else:
raise ValueError('Gate decomposition not recognized.')
rb_mod.set_param_hint('error_per_gate', expr='1-fidelity_per_gate')
guess_pars = rb_mod.make_params()
keys_in_std = hlp_mod.get_param('keys_in_std', data_dict, raise_error=False,
**params)
if keys_in_std is None:
keys_in_std = [''] * len(data_to_proc_dict)
if len(keys_in_std) != len(data_to_proc_dict):
raise ValueError('keys_in_std and keys_in do not have '
'the same length.')
for keyi, keys in zip(data_to_proc_dict, keys_in_std):
if 'pf' in keyi:
# if this is the |f> state population data, then do an additional
# fit based on the Google style
fit_mod.prepare_rbleakage_fit_dict(
data_dict, [keyi], indep_var_array=cliffords,
fit_name='rbleak_fit', **params)
# do standard fit to A*p**m + B
key = 'rb_fit' + keyi
data_fit = hlp_mod.get_msmt_data(data_to_proc_dict[keyi], cp, mobjn)
model = deepcopy(rb_mod)
fit_dicts[key] = {
'fit_fn': fit_mods.RandomizedBenchmarkingDecay,
'fit_xvals': {'numCliff': cliffords},
'fit_yvals': {'data': np.array(data_fit).flatten()},
'guess_pars': guess_pars}
if do_simple_fit:
fit_kwargs = {}
elif keys is not None:
stds = np.array(hlp_mod.get_param(keys, data_dict)).flatten()
fit_kwargs = {'scale_covar': False,
'weights': 1/stds}
else:
# Run once to get an estimate for the error per Clifford
fit_res = model.fit(data_fit, numCliff=cliffords,
params=guess_pars)
# Use the found error per Clifford to standard errors for
# the data points fro Helsen et al. (2017)
epsilon_guess = hlp_mod.get_param('epsilon_guess', data_dict,
default_value=0.01, **params)
epsilon = calculate_rb_confidence_intervals(
nr_seeds=nr_seeds,
nr_cliffords=cliffords,
depolariz_param=fit_res.best_values['p'],
conf_level=conf_level,
epsilon_guess=epsilon_guess, d=2)
hlp_mod.add_param(
keys, epsilon, data_dict,
add_param_method=params.get('add_param_method', None))
# Run fit again with scale_covar=False, and
# weights = 1/epsilon if an entry in epsilon_sqrd is 0,
# replace it with half the minimum value in the epsilon_sqrd
# array
idxs = np.where(epsilon == 0)[0]
epsilon[idxs] = min([eps for eps in epsilon if eps != 0])/2
fit_kwargs = {'scale_covar': False, 'weights': 1/epsilon}
fit_dicts[key]['fit_kwargs'] = fit_kwargs
hlp_mod.add_param('fit_dicts', fit_dicts, data_dict,
add_param_method='update')
def analyze_rb_fit_results(data_dict, keys_in, **params):
mobjn = hlp_mod.get_measurement_properties(
data_dict, props_to_extract=['mobjn'], **params)
msmt_type = hlp_mod.get_param('msmt_type', data_dict, **params)
keys_out_container = hlp_mod.get_param('keys_out_container', data_dict,
default_value='', **params)
if not len(keys_out_container) or keys_out_container is None:
keys_out_container = f'{mobjn}.{msmt_type}'
fit_dicts = hlp_mod.get_param('fit_dicts', data_dict, raise_error=True)
for keyi in keys_in:
fit_res = fit_dicts['rb_fit' + keyi]['fit_res']
hlp_mod.add_param(f'{keys_out_container}.EPC value',
fit_res.params['error_per_Clifford'].value,
data_dict, add_param_method='replace')
hlp_mod.add_param(f'{keys_out_container}.EPC stderr',
fit_res.params['fidelity_per_Clifford'].stderr,
data_dict, add_param_method='replace')
hlp_mod.add_param(
f'{keys_out_container}.depolarization parameter value',
fit_res.params['p'].value, data_dict,
add_param_method='replace')
hlp_mod.add_param(
f'{keys_out_container}.depolarization parameter stderr',
fit_res.params['p'].stderr, data_dict,
add_param_method='replace')
if 'pf' in keyi:
A = fit_res.best_values['Amplitude']
Aerr = fit_res.params['Amplitude'].stderr
p = fit_res.best_values['p']
perr = fit_res.params['p'].stderr
# IBM-style leakage and seepage:
# https://journals.aps.org/pra/abstract/10.1103/PhysRevA.97.032306
hlp_mod.add_param(f'{keys_out_container}.IBM-style leakage value',
A*(1-p),
data_dict,
add_param_method='replace')
hlp_mod.add_param(f'{keys_out_container}.IBM-style leakage stderr',
np.sqrt((A*perr)**2 + (Aerr*(p-1))**2),
data_dict,
add_param_method='replace')
hlp_mod.add_param(f'{keys_out_container}.IBM-style seepage value',
(1-A)*(1-p),
data_dict,
add_param_method='replace')
hlp_mod.add_param(f'{keys_out_container}.IBM-style seepage stderr',
np.sqrt((Aerr*(p-1))**2 + ((A-1)*perr)**2),
data_dict,
add_param_method='replace')
# Google-style leakage and seepage:
# https://journals.aps.org/prl/pdf/10.1103/PhysRevLett.116.020501
fit_res = fit_dicts['rbleak_fit' + keyi]['fit_res']
hlp_mod.add_param(f'{keys_out_container}.Google-style leakage value',
fit_res.best_values['pu'],
data_dict,
add_param_method='replace')
hlp_mod.add_param(f'{keys_out_container}.Google-style leakage stderr',
fit_res.params['pu'].stderr,
data_dict,
add_param_method='replace')
hlp_mod.add_param(f'{keys_out_container}.Google-style seepage value',
fit_res.best_values['pd'],
data_dict,
add_param_method='replace')
hlp_mod.add_param(f'{keys_out_container}.Google-style seepage stderr',
fit_res.params['pd'].stderr,
data_dict,
add_param_method='replace')
if hlp_mod.get_param('plot_T1_lim', data_dict, default_value=False,
**params):
# get T1, T2, gate length from HDF file
get_meas_obj_coh_times(data_dict, **params)
F_T1, p_T1 = calc_rb_coherence_limited_fidelity(
hlp_mod.get_param(f'{mobjn}.T1', data_dict, **params),
hlp_mod.get_param(f'{mobjn}.T2', data_dict, **params),
hlp_mod.get_param(f'{mobjn}.ge_sigma', data_dict, **params) *
hlp_mod.get_param(f'{mobjn}.ge_nr_sigma', data_dict, **params),
hlp_mod.get_param('gate_decomp', data_dict,
default_value='HZ', **params))
hlp_mod.add_param(f'{keys_out_container}.EPC coh_lim', 1-F_T1,
data_dict, add_param_method='replace')
hlp_mod.add_param(
f'{keys_out_container}.depolarization parameter coh_lim', p_T1,
data_dict, add_param_method='replace')
def prepare_rb_plots(data_dict, keys_in, sweep_type, **params):
sp, cp, mospm, mobjn, movnm = hlp_mod.get_measurement_properties(
data_dict, props_to_extract=['sp', 'cp', 'mospm', 'mobjn', 'movnm'],
**params)
plot_dicts = OrderedDict()
keys_in_std = hlp_mod.get_param('keys_in_std', data_dict, raise_error=False,
**params)
stpn = hlp_mod.get_param(
'state_prob_name', data_dict,
default_value='gg' if 'corr' in mobjn else 'e', **params)
classified_msmt = any([v == 3 for v in [len(chs) for chs in movnm.values()]])
lw = plot_mod.get_default_plot_params(
set_params=False, return_full_rc_params=True)['lines.linewidth']
ms = plot_mod.get_default_plot_params(
set_params=False, return_full_rc_params=True)['lines.markersize']
llsp = plot_mod.get_default_plot_params(
set_params=False, return_full_rc_params=True)['legend.labelspacing']
lcsp = plot_mod.get_default_plot_params(
set_params=False, return_full_rc_params=True)['legend.columnspacing']
ylabel = hlp_mod.pop_param('ylabel', data_dict, node_params=params)
if ylabel is None:
if isinstance(stpn, (tuple, list)):
# assumed of the form ('gg', '+', 'ee', '-', 'ge', '-', 'eg')
prob_states = stpn[0::2]
prob_labels = [f'$P(|{{{p}}}\\rangle)$' for p in prob_states]
ylabel = (2*len(prob_states)-1)*['']
ylabel[0::2] = prob_labels
ylabel[1::2] = list(stpn[1::2])
ylabel = ''.join(ylabel)
else:
ylabel = f'Probability, $P(|{{{stpn}}}\\rangle)$'
figure_name_suffix = hlp_mod.get_param('figure_name_suffix', data_dict,
default_value='', **params)
for keyi, keys in zip(keys_in, keys_in_std):
figure_name = f'RB_{keyi}_{mobjn}{figure_name_suffix}'
sp_name = [p for p in mospm[mobjn] if 'clifford' in p][0]
# plot data
pd = \
plot_mod.prepare_1d_plot_dicts(data_dict=data_dict, keys_in=[keyi],
figure_name=figure_name,
ylabel=ylabel,
sp_name=sp_name,
yerr_key=keys,
data_labels=['avg.'],
plot_params={
'zorder': 2, 'marker': 'o',
'legend_ncol': 3,
'line_kws': {
'elinewidth': lw+3,
'markersize': ms+1,
'alpha_errorbars': 0.25}},
do_plotting=False, **params)
plot_dicts.update(pd)
# plot all seeds
keys_in_all_seeds_data = hlp_mod.get_param('keys_in_all_seeds_data',
data_dict, **params)
clf_dim = sweep_type['cliffords']
seeds_dim = sweep_type['seeds']
cliffords = sp.get_sweep_params_property('values', clf_dim, sp_name)
xvals = np.repeat(cliffords, sp.length(seeds_dim)) if clf_dim == 1 \
else np.tile(cliffords, sp.length(seeds_dim))
if keys_in_all_seeds_data is not None:
pd = \
plot_mod.prepare_1d_plot_dicts(data_dict=data_dict,
keys_in=keys_in_all_seeds_data,
figure_name=figure_name,
xvals=xvals,
ylabel=ylabel,
sp_name=sp_name,
data_labels=['seeds'],
plot_params={
'linestyle': 'none',
'marker': '.',
'color': 'gray',
'line_kws': {'alpha': 0.5},
'zorder': 1},
do_plotting=False, **params)
plot_dicts.update(pd)
if len(cp.states) != 0:
# plot cal states
plot_dicts.update(
plot_mod.prepare_cal_states_plot_dicts(data_dict=data_dict,
keys_in=[keyi],
figure_name=figure_name,
sp_name=sp_name,
do_plotting=False,
**params))
if 'fit_dicts' in data_dict:
# plot fits
fit_dicts = data_dict['fit_dicts']
textstr = ''
if 'pf' in keyi:
# plot Google-style leakage fit + textbox
plot_dicts.update(plot_mod.prepare_fit_plot_dicts(
data_dict=data_dict,
figure_name=figure_name,
fit_names=['rbleak_fit' + keyi],
plot_params={'color': 'C1',
'setlabel': 'fit - Google',
'legend_ncol': 3},
do_plotting=False, **params))
textstr += get_rb_textbox_properties(
data_dict, fit_dicts['rbleak_fit' + keyi]['fit_res'],
textstr_style=['leakage_google'],
**params)[0]
# plot fit trace
pd = plot_mod.prepare_fit_plot_dicts(
data_dict=data_dict,
figure_name=figure_name,
fit_names=['rb_fit' + keyi],
plot_params={'color': 'C0',
'setlabel': 'fit - IBM' if 'pf' in keyi else 'fit',
'legend_ncol': 3},
do_plotting=False, **params)
plot_dicts.update(pd)
# plot coherence-limit
fit_res = fit_dicts['rb_fit' + keyi]['fit_res']
if hlp_mod.get_param('plot_T1_lim', data_dict,
default_value=False, **params) and 'pf' not in keyi:
keys_out_container = hlp_mod.get_param('keys_out_container',
data_dict,
default_value=mobjn,
**params)
epc_T1 = hlp_mod.get_param(f'{keys_out_container}.EPC coh_lim',
data_dict, **params)
p_T1 = hlp_mod.get_param(
f'{keys_out_container}.depolarization parameter coh_lim',
data_dict, **params)
clfs_fine = np.linspace(cliffords[0], cliffords[-1], 1000)
T1_limited_curve = fit_res.model.func(
clfs_fine, fit_res.best_values['Amplitude'], p_T1,
fit_res.best_values['offset'])
plot_dicts['t1Lim_' + keyi] = {
'fig_id': figure_name,
'plotfn': 'plot_line',
'xvals': clfs_fine,
'yvals': T1_limited_curve,
'setlabel': 'coh-lim',
'do_legend': True,
'linestyle': '--',
'line_kws': {'linewidth': lw-0.5},
'zorder': 0,
'marker': ''}
else:
epc_T1 = None
# add texbox
va_text = hlp_mod.get_param('va_text', data_dict, **params)
if va_text is None:
va_text = 'top' if 'g' in stpn else 'bottom'
textstr, ha, hp, va, vp = get_rb_textbox_properties(
data_dict, fit_res, epc_T1=None if 'pf' in keyi else epc_T1,
va=va_text,
textstr_style='leakage_ibm' if 'pf' in keyi else 'regular',
textstr=textstr if 'pf' in keyi else '', **params)
plot_dicts['text_msg_' + keyi] = {
'fig_id': figure_name,
'plotfn': 'plot_text',
'ypos': vp,
'xpos': hp,
'horizontalalignment': ha,
'verticalalignment': va,
'box_props': None,
'text_string': textstr}
plot_dicts[list(plot_dicts)[-2]].update({
'legend_bbox_to_anchor': (1.025, -0.15),
'legend_pos': 'upper right',
'legend_labelspacing': llsp-0.25,
'legend_columnspacing': lcsp-1,
'legend_ncol': 1 if 'pf' in keyi else 2,
'yrange': hlp_mod.get_param('yrange', data_dict, **params)
})
hlp_mod.add_param('plot_dicts', plot_dicts, data_dict,
add_param_method='update')
def prepare_irb_plot(data_dict, plot_dict_names_irb_plot=None,
figure_name=None, **params):
plot_dicts_updated = OrderedDict()
do_plotting = params.pop('do_plotting', False)
if figure_name is None:
figure_name = 'IRB'
mobjn = hlp_mod.get_measurement_properties(
data_dict, props_to_extract=['mobjn'],
**params)
if plot_dict_names_irb_plot is None:
plot_dict_names_irb_plot = hlp_mod.get_param(
'plot_dict_names_irb_plot', data_dict, **params)
plot_dicts = hlp_mod.get_param('plot_dicts', data_dict, **params)
for label in ['rb', 'irb']:
epc_value = hlp_mod.get_param(f'{mobjn}.{label}.EPC value',
data_dict, **params)
print(epc_value)
leg_label = ''
if epc_value is not None:
epc_stderr = hlp_mod.get_param(f'{mobjn}.{label}.EPC stderr',
data_dict, **params)
leg_label = f'{label.upper()}:\t' \
f'{100*epc_value:.2f}%$\\pm${100*epc_stderr:.2f}% EPC'
print(leg_label)
plot_dicts_updated[f'legend_data_IRB_{label}'] = {
'fig_id': figure_name,
'plotfn': 'plot_line',
'xvals': [],
'yvals': [],
'color': 'C0' if label == 'rb' else 'C1',
'marker': 'o',
'linestyle': '-',
'setlabel': leg_label,
}
pd_plot_type = [pdn for pdn in plot_dict_names_irb_plot['rb']
if 'seeds' in pdn]
if len(pd_plot_type):
pd_name = plot_dict_names_irb_plot['rb'][pd_plot_type[0]]
plot_dicts_updated['legend_seeds_IRB'] = \
deepcopy(plot_dicts[f'{pd_name}'])
plot_dicts_updated['legend_seeds_IRB'].update({
'xvals': [], 'yvals': [], 'yerr': None,
'setlabel': 'all seeds'})
cz_err = hlp_mod.get_param('cz_err_value', data_dict, **params)
if cz_err is None:
cz_err = hlp_mod.get_param(
'correlation_object.average_gate_error_CZ value', data_dict)
cz_err_stderr = hlp_mod.get_param('cz_err_stderr', data_dict, **params)
if cz_err_stderr is None:
cz_err_stderr = hlp_mod.get_param(
'correlation_object.average_gate_error_CZ stderr', data_dict)
if cz_err is not None:
textstr = \
f'Gate error:\n{100*cz_err:.2f}%$\\pm${100*cz_err_stderr:.2f}%'
plot_dicts_updated['text_msg_IRB'] = {
'fig_id': figure_name,
'plotfn': 'plot_text',
'ypos': 0.05,
'xpos': 0.4,
'horizontalalignment': 'left',
'verticalalignment': 'bottom',
'box_props': None,
'text_string': textstr}
for label in ['rb', 'irb']:
for plot_type in list(plot_dict_names_irb_plot[label])[::-1]:
pd_name = plot_dict_names_irb_plot[label][plot_type]
plot_dicts_updated[f'{pd_name} IRB'] = deepcopy(plot_dicts[pd_name])
updated_vals = {'fig_id': figure_name,
'color': 'C0' if label == 'rb' else 'C1',
'setlabel': '', 'legend_ncol': 1}
plot_dicts_updated[f'{pd_name} IRB'].update(updated_vals)
plotsize = plot_mod.get_default_plot_params(
set_params=False, return_full_rc_params=True)['figure.figsize']
plotsize = (plotsize[0], 3*plotsize[1])
last_pd = plot_dicts_updated[list(plot_dicts_updated)[-1]]
last_pd.update({'legend_bbox_to_anchor': (0.35, 0.08),
'legend_ncol': 1,
'legend_pos': 'lower left',
'plotsize': plotsize})
hlp_mod.add_param('plot_dicts', plot_dicts_updated, data_dict,
add_param_method='update')
if do_plotting:
getattr(plot_mod, 'plot')(data_dict, keys_in=list(plot_dicts),
**params)
def get_rb_leakage_ibm_textstr(data_dict, fit_res=None, **params):
mobjn = hlp_mod.get_measurement_properties(
data_dict, props_to_extract=['mobjn'], **params)
msmt_type = hlp_mod.get_param('msmt_type', data_dict, **params)
keys_out_container = hlp_mod.get_param('keys_out_container', data_dict,
default_value='', **params)
if not len(keys_out_container) or keys_out_container is None:
keys_out_container = f'{mobjn}.{msmt_type}'
textstr = 'IBM style:'
p_value = hlp_mod.get_param(
f'{keys_out_container}.depolarization parameter value', data_dict,
raise_error=True)
p_stderr = hlp_mod.get_param(
f'{keys_out_container}.depolarization parameter stderr', data_dict)
textstr += f'\np = {100*p_value:.4f}%'
if p_stderr is not None:
textstr += f'$\pm$ {100*p_stderr:.3f}%'
L_value = hlp_mod.get_param(
f'{keys_out_container}.IBM-style leakage value', data_dict,
raise_error=True)
textstr += f'\nL = {100*L_value:.4f}%'
L_stderr = hlp_mod.get_param(
f'{keys_out_container}.IBM-style leakage stderr', data_dict)
if L_stderr is not None:
textstr += f'$\pm$ {100*L_stderr:.3f}%'
S_value = hlp_mod.get_param(
f'{keys_out_container}.IBM-style seepage value', data_dict,
raise_error=True)
textstr += f'\nS = {100*S_value:.4f}%'
S_stderr = hlp_mod.get_param(
f'{keys_out_container}.IBM-style seepage stderr', data_dict)
if S_stderr is not None:
textstr += f'$\pm$ {100*S_stderr:.3f}%'
return textstr
def get_rb_leakage_google_textstr(fit_res, **params):
textstr = 'Google style:'
textstr += ('\n$p_{\\uparrow}$' +
' = {:.4f}% $\pm$ {:.3f}%'.format(
fit_res.params['pu'].value*100,
fit_res.params['pu'].stderr*100) +
'\n$p_{\\downarrow}$' +
' = {:.4f}% $\pm$ {:.3f}%'.format(
fit_res.params['pd'].value*100,
fit_res.params['pd'].stderr*100) +
'\n$p_0$' + ' = {:.2f}% $\pm$ {:.2f}%\n'.format(
fit_res.params['p0'].value,
fit_res.params['p0'].stderr))
return textstr
def get_rb_regular_textstr(fit_res, epc_T1=None, **params):
textstr = ('$r_{\mathrm{Cl}}$' + ' = {:.4f}% $\pm$ {:.3f}%'.format(
(1-fit_res.params['fidelity_per_Clifford'].value)*100,
fit_res.params['fidelity_per_Clifford'].stderr*100))
if epc_T1 is not None:
textstr += ('\n$r_{\mathrm{coh-lim}}$ = ' +
'{:.3f}%'.format(epc_T1*100))
textstr += ('\n' + 'p = {:.4f}% $\pm$ {:.3f}%'.format(
fit_res.params['p'].value*100, fit_res.params['p'].stderr*100))
textstr += ('\n' + r'$\langle \sigma_z \rangle _{m=0}$ = ' +
'{:.2f} $\pm$ {:.2f}'.format(
fit_res.params['Amplitude'].value +
fit_res.params['offset'].value,
np.sqrt(fit_res.params['offset'].stderr**2 +
fit_res.params['Amplitude'].stderr**2)))
return textstr
def get_cz_irb_textstr(fit_res, epc_T1=None, **params):
suffix = params.get('suffix', 'RB')
textstr = (f'$r_{{\mathrm{{Cl}}, {{{suffix}}}}}$' +
' = {:.4f}% $\pm$ {:.3f}%'.format(
(1-fit_res.params['fidelity_per_Clifford'].value)*100,
fit_res.params['fidelity_per_Clifford'].stderr*100))
if epc_T1 is not None:
textstr += ('\n$r_{\mathrm{coh-lim}}$ = ' +
'{:.3f}%'.format(epc_T1*100))
textstr += (f'\n$p_{{\\uparrow, {suffix}}}$' +
' = {:.4f}% $\pm$ {:.3f}%'.format(
fit_res.params['pu'].value*100,
fit_res.params['pu'].stderr*100) +
f'\n$p_{{\\downarrow, {suffix}}}$' +
' = {:.4f}% $\pm$ {:.3f}%'.format(
fit_res.params['pd'].value*100,
fit_res.params['pd'].stderr*100))
return textstr
def get_rb_textbox_properties(data_dict, fit_res, epc_T1=None,
textstr_style=(), textstr='', **params):
if len(textstr_style) != 0:
textstr += '\n'
if 'regular' in textstr_style:
textstr += get_rb_regular_textstr(fit_res, epc_T1, **params)
if 'leakage_google' in textstr_style:
textstr += get_rb_leakage_google_textstr(fit_res, **params)
if 'leakage_ibm' in textstr_style:
textstr += get_rb_leakage_ibm_textstr(data_dict, **params)
if 'irb' in textstr_style:
textstr += get_cz_irb_textstr(fit_res, **params)
if len(textstr) == 0:
raise NotImplementedError(f'The textstring style {textstr_style} '
f'has not been implemented yet.')
va = 'top'
vp = -0.15
ha = 'left'
hp = -0.12
return textstr, ha, hp, va, vp
def irb_gate_error(data_dict, keys_container_rb, keys_container_irb, **params):
"""
Calculates the average gate error from a set of RB-IRB measurements and
saves it in data_dict.
:param data_dict: OrderedDict containing the results of running rb_analysis
node.
:param params: keyword arguments:
meas_obj_names (str): name of the measurement object
for which to calculate average gate error.
Should be correlation_object for a two-qubit RB.
d (int): dimension of the Hilbert space
interleaved_gate (str or int): the interleaved gate for which to
calculate average gate error.
Assumptions:
- meas_obj_names, d, interleaved_gate must exist wither in data_dict,
metadata, or params
"""
mobjn = hlp_mod.get_measurement_properties(
data_dict, props_to_extract=['mobjn'], **params)
d = hlp_mod.get_param('d', data_dict, raise_error=True, **params)
interleaved_gate = hlp_mod.get_param(
'interleaved_gate', data_dict, raise_error=True, **params)
if interleaved_gate == 4368:
interleaved_gate = 'CZ'
keys_out_container = hlp_mod.get_param('keys_out_container', data_dict,
default_value='', **params)
prb = hlp_mod.get_param(
f'{keys_container_rb}.depolarization parameter value', data_dict,
raise_error=True, **params)
prb_err = hlp_mod.get_param(
f'{keys_container_rb}.depolarization parameter stderr', data_dict,
raise_error=True, **params)
pirb = hlp_mod.get_param(
f'{keys_container_irb}.depolarization parameter value', data_dict,
raise_error=True, **params)
pirb_err = hlp_mod.get_param(
f'{keys_container_irb}.depolarization parameter stderr', data_dict,
raise_error=True, **params)
if not len(keys_out_container) or keys_out_container is None:
keys_out_container = f'{mobjn}.average_gate_error_{interleaved_gate}'
if mobjn not in keys_out_container:
keys_out_container = f'{mobjn}.{keys_out_container}'
hlp_mod.add_param(f'{keys_out_container}.value',
((d-1)/d)*(1 - pirb/prb),
data_dict, **params)
hlp_mod.add_param(f'{keys_out_container}.stderr',
((d-1)/d)*np.sqrt((pirb_err*prb)**2 +
(prb_err*pirb)**2)/(prb**2),
data_dict, **params)
def calc_rb_coherence_limited_fidelity(T1, T2, pulse_length, gate_decomp='HZ'):
"""
Formula from Asaad et al. (2016):
https://www.nature.com/articles/npjqi201629
Returns:
F_cl (float): decoherence limited fildelity
p (float): decoherence limited depolarization parameter
"""
# Np = 1.875 # Avg. number of gates per Clifford for XY decomposition
# Np = 1.125 # Avg. number of gates per Clifford for HZ decomposition
if gate_decomp == 'HZ':
Np = 1.125
elif gate_decomp == 'XY':
Np = 1.875
else:
raise ValueError('Gate decomposition not recognized.')
F_cl = (1/6*(3 + 2*np.exp(-1*pulse_length/(T2)) +
np.exp(-pulse_length/T1)))**Np
p = 2*F_cl - 1
return F_cl, p
def get_meas_obj_coh_times(data_dict, extract_T2s=True, **params):
mobjn = hlp_mod.get_measurement_properties(
data_dict, props_to_extract=['mobjn'], **params)
# Get from the hdf5 file any parameters specified in
# params_dict and numeric_params.
params_dict = {}
s = 'Instrument settings.' + mobjn
for trans_name in ['', '_ef']:
if hlp_mod.get_param(f'{mobjn}.T1{trans_name}', data_dict) is None:
params_dict[f'{mobjn}.T1{trans_name}'] = s + f'.T1{trans_name}'
if hlp_mod.get_param(f'{mobjn}.T2{trans_name}', data_dict) is None:
params_dict[f'{mobjn}.T2{trans_name}'] = s + (
f'.T2_star{trans_name}' if extract_T2s else f'.T2{trans_name}')
for trans_name in ['ge', 'ef']:
if hlp_mod.get_param(f'{mobjn}.T1{trans_name}', data_dict) is None and \
hlp_mod.get_param(f'{mobjn}.T1{trans_name}', data_dict) is None:
params_dict[f'{mobjn}.{trans_name}_sigma'] = \
s + f'.{trans_name}_sigma'
params_dict[f'{mobjn}.{trans_name}_nr_sigma'] = \
s + f'.{trans_name}_nr_sigma'
if len(params_dict) > 0:
hlp_mod.get_params_from_hdf_file(data_dict, params_dict=params_dict,
numeric_params=list(params_dict),
**params)
def calculate_rb_confidence_intervals(
nr_seeds, nr_cliffords, conf_level=0.68, depolariz_param=1,
epsilon_guess=0.01, d=2):
# From Helsen et al. (2017)
# For each number of cliffords in nr_cliffords (array), finds epsilon
# such that with probability greater than conf_level, the true value of
# the survival probability, p_N_m, for a given N=nr_seeds and
# m=nr_cliffords, is in the interval
# [p_N_m_measured-epsilon, p_N_m_measured+epsilon]
# See Helsen et al. (2017) for more details.
# eta is the SPAM-dependent prefactor defined in Helsen et al. (2017)
epsilon = []
delta = 1-conf_level
infidelity = (d-1)*(1-depolariz_param)/d
for n_cl in nr_cliffords:
if n_cl == 0:
epsilon.append(0)
else:
if d == 2:
V_short_n_cl = (13*n_cl*infidelity**2)/2
V_long_n_cl = 7*infidelity/2
V = min(V_short_n_cl, V_long_n_cl)
else:
V_short_n_cl = \
(0.25*(-2+d**2)/((d-1)**2)) * (infidelity**2) + \
(0.5*n_cl*(n_cl-1)*(d**2)/((d-1)**2)) * (infidelity**2)
V1 = 0.25*((-2+d**2)/((d-1)**2))*n_cl*(infidelity**2) * \
depolariz_param**(n_cl-1) + ((d/(d-1))**2) * \
(infidelity**2)*(
(1+(n_cl-1)*(depolariz_param**(2*n_cl)) -
n_cl*(depolariz_param**(2*n_cl-2))) /
(1-depolariz_param**2)**2 )
V = min(V1, V_short_n_cl)
H = lambda eps: (1/(1-eps))**((1-eps)/(V+1)) * \
(V/(V+eps))**((V+eps)/(V+1)) - \
(delta/2)**(1/nr_seeds)
epsilon.append(optimize.fsolve(H, epsilon_guess)[0])
return np.asarray(epsilon)
|
|
from core.clients import ProximityClient
from core.models import Interface, Method, Trigger, MethodParameter
from django.conf import settings
from django.test.testcases import TestCase
from mock import patch
import json
# Patch the MirriClient used by forms with a mock object in every test
@patch('core.forms.MirriClient', spec_set=True)
class InterfaceFileAPITestCase(TestCase):
fixtures = ['interface_file_api_testdata']
def setUp(self):
self.old_setting = settings.PROXIMITY_SERVER['default']
settings.PROXIMITY_SERVER['default'] = settings.PROXIMITY_SERVER['test']
self.proximity_client = ProximityClient()
self.proximity_client.flush()
def tearDown(self):
settings.PROXIMITY_SERVER['default'] = self.old_setting
def test_good_upload(self, MirriMockClass):
# Check the initial amount of interfaces in the database
self.assertEqual(Interface.objects.count(), 1)
# Check the initial amount of methods in the database
self.assertEqual(Method.objects.count(), 2)
# Check the initial amount of method parameters in the database
self.assertEqual(MethodParameter.objects.count(), 0)
# Check the initial amount of triggers in the database
self.assertEqual(Trigger.objects.count(), 0)
# TalkingDevice
f = open('core/fixtures/talkingDevices/talkingDevice.py')
response = self.client.post('/api/interface_file/', {'file': f})
f.seek(0)
interface_code = f.read()
f.close()
self.assertEqual(response.status_code, 201)
self.assertTrue(Interface.objects.filter(name='TalkingDevice').exists())
self.assertTrue(Method.objects.filter(interface__name='TalkingDevice', name='isWilling').exists())
self.assertTrue(Method.objects.filter(interface__name='TalkingDevice', name='isSilent').exists())
self.assertTrue(Trigger.objects.filter(method__interface__name='TalkingDevice', method__name='isSilent').exists())
# Check that the file was uploaded to Mirri
mirri_mock = MirriMockClass.return_value
self.assertEqual(mirri_mock.upload_interface_file.call_count, 1)
self.assertEqual(len(mirri_mock.upload_interface_file.call_args[0]), 2)
self.assertEqual(mirri_mock.upload_interface_file.call_args[0][0], 'TalkingDevice')
mirri_mock.upload_interface_file.call_args[0][1].seek(0)
self.assertEqual(mirri_mock.upload_interface_file.call_args[0][1].read(), interface_code)
# CalendarSource
mirri_mock.reset_mock()
f = open('core/fixtures/calendarReminders/calendarSource.py')
response = self.client.post('/api/interface_file/', {'file': f})
f.seek(0)
interface_code = f.read()
f.close()
self.assertEqual(response.status_code, 201)
self.assertTrue(Interface.objects.filter(name='CalendarSource').exists())
self.assertTrue(Method.objects.filter(interface__name='CalendarSource', name='eventApproaching').exists())
self.assertTrue(MethodParameter.objects.filter(method__interface__name='CalendarSource', method__name='eventApproaching', name='eid').exists())
self.assertTrue(Trigger.objects.filter(method__interface__name='CalendarSource', method__name='eventApproaching').exists())
# Check that the file was uploaded to Mirri
self.assertEqual(mirri_mock.upload_interface_file.call_count, 1)
self.assertEqual(len(mirri_mock.upload_interface_file.call_args[0]), 2)
self.assertEqual(mirri_mock.upload_interface_file.call_args[0][0], 'CalendarSource')
mirri_mock.upload_interface_file.call_args[0][1].seek(0)
self.assertEqual(mirri_mock.upload_interface_file.call_args[0][1].read(), interface_code)
# Check the final amount of interfaces in the database
self.assertEqual(Interface.objects.count(), 3)
# Check the final amount of methods in the database
self.assertEqual(Method.objects.count(), 5)
# Check the final amount of method parameters in the database
self.assertEqual(MethodParameter.objects.count(), 1)
# Check the final amount of triggers in the database
self.assertEqual(Trigger.objects.count(), 2)
def test_bad_upload(self, MirriMockClass):
# Check the initial amount of interfaces in the database
self.assertEqual(Interface.objects.count(), 1)
# Check the initial amount of methods in the database
self.assertEqual(Method.objects.count(), 2)
# Check the initial amount of method parameters in the database
self.assertEqual(MethodParameter.objects.count(), 0)
# Check the initial amount of triggers in the database
self.assertEqual(Trigger.objects.count(), 0)
# A syntax error in the interface file
f = open('core/fixtures/talkingDevices/invalid_files/talkingDevice_syntax_error.py')
response = self.client.post('/api/interface_file/', {'file': f})
f.close()
self.assertEqual(response.status_code, 400)
content = json.loads(response.content)
self.assertEqual(content['file'], ['File talkingDevice_syntax_error.py contains syntax errors (line 15, col 22): class TalkingDevice()\n'])
# Check that no file was uploaded to Mirri
mirri_mock = MirriMockClass.return_value
self.assertEqual(mirri_mock.upload_interface_file.call_count, 0)
# The interface file does not contain any interfaces
mirri_mock.reset_mock()
f = open('core/fixtures/talkingDevices/invalid_files/talkingDevice_no_interface.py')
response = self.client.post('/api/interface_file/', {'file': f})
f.close()
self.assertEqual(response.status_code, 400)
content = json.loads(response.content)
self.assertEqual(content['file'], ['Interface parse error in file talkingDevice_no_interface.py: Interface decorator @deviceInterface is missing.'])
# Check that no file was uploaded to Mirri
self.assertEqual(mirri_mock.upload_interface_file.call_count, 0)
# The interface file does not contain any precondition methods
mirri_mock.reset_mock()
f = open('core/fixtures/talkingDevices/invalid_files/talkingDevice_no_precondition_methods.py')
response = self.client.post('/api/interface_file/', {'file': f})
f.close()
self.assertEqual(response.status_code, 400)
content = json.loads(response.content)
self.assertEqual(content['file'], ['Interface parse error in file talkingDevice_no_precondition_methods.py: Interface TalkingDevice does not have any precondition methods defined with decorator @precondition.'])
# Check that no file was uploaded to Mirri
self.assertEqual(mirri_mock.upload_interface_file.call_count, 0)
# The interface file does not contain any triggers
mirri_mock.reset_mock()
f = open('core/fixtures/talkingDevices/invalid_files/talkingDevice_no_triggers.py')
response = self.client.post('/api/interface_file/', {'file': f})
f.close()
self.assertEqual(response.status_code, 400)
content = json.loads(response.content)
self.assertEqual(content['file'], ['Interface parse error in file talkingDevice_no_triggers.py: No trigger method class with base class TriggeringEvent has been defined.'])
# Check that no file was uploaded to Mirri
self.assertEqual(mirri_mock.upload_interface_file.call_count, 0)
# A trigger in the interface file does not match any of the interface precondition methods
mirri_mock.reset_mock()
f = open('core/fixtures/talkingDevices/invalid_files/talkingDevice_no_trigger_match.py')
response = self.client.post('/api/interface_file/', {'file': f})
f.close()
self.assertEqual(response.status_code, 400)
content = json.loads(response.content)
self.assertEqual(content['file'], ['Interface parse error in file talkingDevice_no_trigger_match.py: The name of the trigger method class IsFree does not match any of the precondition methods defined in interface TalkingDevice.'])
# Check that no file was uploaded to Mirri
self.assertEqual(mirri_mock.upload_interface_file.call_count, 0)
# The interface file contains multiple interfaces
mirri_mock.reset_mock()
f = open('core/fixtures/talkingDevices/invalid_files/talkingDevice_multiple_interfaces.py')
response = self.client.post('/api/interface_file/', {'file': f})
f.close()
self.assertEqual(response.status_code, 400)
content = json.loads(response.content)
self.assertEqual(content['file'], ['Interface parse error in file talkingDevice_multiple_interfaces.py: Multiple interface classes with decorator @deviceInterface have been defined.'])
# Check that no file was uploaded to Mirri
self.assertEqual(mirri_mock.upload_interface_file.call_count, 0)
# The interface file contains a duplicate precondition method
mirri_mock.reset_mock()
f = open('core/fixtures/talkingDevices/invalid_files/talkingDevice_duplicate_precondition_method.py')
response = self.client.post('/api/interface_file/', {'file': f})
f.close()
self.assertEqual(response.status_code, 400)
content = json.loads(response.content)
self.assertEqual(content['file'], ['Interface parse error in file talkingDevice_duplicate_precondition_method.py: Duplicate precondition method isWilling.'])
# Check that no file was uploaded to Mirri
self.assertEqual(mirri_mock.upload_interface_file.call_count, 0)
# The interface file contains a duplicate trigger
mirri_mock.reset_mock()
f = open('core/fixtures/talkingDevices/invalid_files/talkingDevice_duplicate_trigger.py')
response = self.client.post('/api/interface_file/', {'file': f})
f.close()
self.assertEqual(response.status_code, 400)
content = json.loads(response.content)
self.assertEqual(content['file'], ['Interface parse error in file talkingDevice_duplicate_trigger.py: Duplicate trigger method class IsSilent.'])
# Check that no file was uploaded to Mirri
self.assertEqual(mirri_mock.upload_interface_file.call_count, 0)
# The interface file contains a duplicate method parameter
mirri_mock.reset_mock()
f = open('core/fixtures/calendarReminders/invalid_files/calendarSource_duplicate_method_parameter.py')
response = self.client.post('/api/interface_file/', {'file': f})
f.close()
self.assertEqual(response.status_code, 400)
content = json.loads(response.content)
self.assertEqual(content['file'], ['Interface parse error in file calendarSource_duplicate_method_parameter.py: Duplicate parameter eid for precondition method eventApproaching in interface CalendarSource.'])
# Check that no file was uploaded to Mirri
self.assertEqual(mirri_mock.upload_interface_file.call_count, 0)
# The interface in the interface file already exists
mirri_mock.reset_mock()
f = open('core/fixtures/talkingDevices/invalid_files/talkingDevice_interface_exists.py')
response = self.client.post('/api/interface_file/', {'file': f})
f.close()
self.assertEqual(response.status_code, 400)
content = json.loads(response.content)
self.assertEqual(content['file'], ['Interface TalkingDeviceTest already exists.'])
# Check that no file was uploaded to Mirri
self.assertEqual(mirri_mock.upload_interface_file.call_count, 0)
# Check the final amount of interfaces in the database
self.assertEqual(Interface.objects.count(), 1)
# Check the final amount of methods in the database
self.assertEqual(Method.objects.count(), 2)
# Check the final amount of method parameters in the database
self.assertEqual(MethodParameter.objects.count(), 0)
# Check the final amount of triggers in the database
self.assertEqual(Trigger.objects.count(), 0)
|
|
# Copyright (C) 2002, Thomas Hamelryck (thamelry@binf.ku.dk)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Classify protein backbone structure according to Kolodny et al's fragment
libraries.
It can be regarded as a form of objective secondary structure classification.
Only fragments of length 5 or 7 are supported (ie. there is a 'central'
residue).
Full reference:
Kolodny R, Koehl P, Guibas L, Levitt M.
Small libraries of protein fragments model native protein structures accurately.
J Mol Biol. 2002 323(2):297-307.
The definition files of the fragments can be obtained from:
U{http://csb.stanford.edu/~rachel/fragments/}
You need these files to use this module.
The following example uses the library with 10 fragments of length 5.
The library files can be found in directory 'fragment_data'.
>>> model = structure[0]
>>> fm = FragmentMapper(model, lsize=10, flength=5, dir="fragment_data")
>>> fragment = fm[residue]
"""
from __future__ import print_function
import numpy
from Bio.SVDSuperimposer import SVDSuperimposer
from Bio.PDB import Selection
from Bio.PDB.PDBExceptions import PDBException
from Bio.PDB.PDBParser import PDBParser
from Bio.PDB.Polypeptide import PPBuilder
__docformat__ = "restructuredtext en"
# fragment file (lib_SIZE_z_LENGTH.txt)
# SIZE=number of fragments
# LENGTH=length of fragment (4,5,6,7)
_FRAGMENT_FILE = "lib_%s_z_%s.txt"
def _read_fragments(size, length, dir="."):
"""
Read a fragment spec file (available from
U{http://csb.stanford.edu/rachel/fragments/}
and return a list of Fragment objects.
@param size: number of fragments in the library
@type size: int
@param length: length of the fragments
@type length: int
@param dir: directory where the fragment spec files can be found
@type dir: string
"""
filename = (dir + "/" + _FRAGMENT_FILE) % (size, length)
with open(filename, "r") as fp:
flist = []
# ID of fragment=rank in spec file
fid = 0
for l in fp.readlines():
# skip comment and blank lines
if l[0] == "*" or l[0] == "\n":
continue
sl = l.split()
if sl[1] == "------":
# Start of fragment definition
f = Fragment(length, fid)
flist.append(f)
# increase fragment id (rank)
fid += 1
continue
# Add CA coord to Fragment
coord = numpy.array([float(x) for x in sl[0:3]])
# XXX= dummy residue name
f.add_residue("XXX", coord)
return flist
class Fragment(object):
"""
Represent a polypeptide C-alpha fragment.
"""
def __init__(self, length, fid):
"""
@param length: length of the fragment
@type length: int
@param fid: id for the fragment
@type fid: int
"""
# nr of residues in fragment
self.length = length
# nr of residues added
self.counter = 0
self.resname_list = []
# CA coordinate matrix
self.coords_ca = numpy.zeros((length, 3), "d")
self.fid = fid
def get_resname_list(self):
"""
@return: the residue names
@rtype: [string, string,...]
"""
return self.resname_list
def get_id(self):
"""
@return: id for the fragment
@rtype: int
"""
return self.fid
def get_coords(self):
"""
@return: the CA coords in the fragment
@rtype: Numeric (Nx3) array
"""
return self.coords_ca
def add_residue(self, resname, ca_coord):
"""
@param resname: residue name (eg. GLY).
@type resname: string
@param ca_coord: the c-alpha coorinates of the residues
@type ca_coord: Numeric array with length 3
"""
if self.counter >= self.length:
raise PDBException("Fragment boundary exceeded.")
self.resname_list.append(resname)
self.coords_ca[self.counter] = ca_coord
self.counter = self.counter + 1
def __len__(self):
"""
@return: length of fragment
@rtype: int
"""
return self.length
def __sub__(self, other):
"""
Return rmsd between two fragments.
Example:
>>> rmsd=fragment1-fragment2
@return: rmsd between fragments
@rtype: float
"""
sup = SVDSuperimposer()
sup.set(self.coords_ca, other.coords_ca)
sup.run()
return sup.get_rms()
def __repr__(self):
"""
Returns <Fragment length=L id=ID> where L=length of fragment
and ID the identifier (rank in the library).
"""
return "<Fragment length=%i id=%i>" % (self.length, self.fid)
def _make_fragment_list(pp, length):
"""
Dice up a peptide in fragments of length "length".
@param pp: a list of residues (part of one peptide)
@type pp: [L{Residue}, L{Residue}, ...]
@param length: fragment length
@type length: int
"""
frag_list = []
for i in range(0, len(pp) - length + 1):
f = Fragment(length, -1)
for j in range(0, length):
residue = pp[i + j]
resname = residue.get_resname()
if residue.has_id("CA"):
ca = residue["CA"]
else:
raise PDBException("CHAINBREAK")
if ca.is_disordered():
raise PDBException("CHAINBREAK")
ca_coord = ca.get_coord()
f.add_residue(resname, ca_coord)
frag_list.append(f)
return frag_list
def _map_fragment_list(flist, reflist):
"""
Map all frgaments in flist to the closest
(in RMSD) fragment in reflist.
Returns a list of reflist indices.
@param flist: list of protein fragments
@type flist: [L{Fragment}, L{Fragment}, ...]
@param reflist: list of reference (ie. library) fragments
@type reflist: [L{Fragment}, L{Fragment}, ...]
"""
mapped = []
for f in flist:
rank = []
for i in range(0, len(reflist)):
rf = reflist[i]
rms = f - rf
rank.append((rms, rf))
rank.sort()
fragment = rank[0][1]
mapped.append(fragment)
return mapped
class FragmentMapper(object):
"""
Map polypeptides in a model to lists of representative fragments.
"""
def __init__(self, model, lsize=20, flength=5, fdir="."):
"""
::
@param model: the model that will be mapped
@type model: L{Model}
@param lsize: number of fragments in the library
@type lsize: int
@param flength: length of fragments in the library
@type flength: int
@param fdir: directory where the definition files are
found (default=".")
@type fdir: string
"""
if flength == 5:
self.edge = 2
elif flength == 7:
self.edge = 3
else:
raise PDBException("Fragment length should be 5 or 7.")
self.flength = flength
self.lsize = lsize
self.reflist = _read_fragments(lsize, flength, fdir)
self.model = model
self.fd = self._map(self.model)
def _map(self, model):
"""
@param model: the model that will be mapped
@type model: L{Model}
"""
ppb = PPBuilder()
ppl = ppb.build_peptides(model)
fd = {}
for pp in ppl:
try:
# make fragments
flist = _make_fragment_list(pp, self.flength)
# classify fragments
mflist = _map_fragment_list(flist, self.reflist)
for i in range(0, len(pp)):
res = pp[i]
if i < self.edge:
# start residues
continue
elif i >= (len(pp) - self.edge):
# end residues
continue
else:
# fragment
index = i - self.edge
assert(index >= 0)
fd[res] = mflist[index]
except PDBException as why:
if why == 'CHAINBREAK':
# Funny polypeptide - skip
pass
else:
raise PDBException(why)
return fd
def has_key(self, res):
"""(Obsolete)
@type res: L{Residue}
"""
import warnings
from Bio import BiopythonDeprecationWarning
warnings.warn("has_key is deprecated; use 'res in object' instead", BiopythonDeprecationWarning)
return (res in self)
def __contains__(self, res):
"""True if the given residue is in any of the mapped fragments.
@type res: L{Residue}
"""
return (res in self.fd)
def __getitem__(self, res):
"""
@type res: L{Residue}
@return: fragment classification
@rtype: L{Fragment}
"""
return self.fd[res]
if __name__ == "__main__":
import sys
p = PDBParser()
s = p.get_structure("X", sys.argv[1])
m = s[0]
fm = FragmentMapper(m, 10, 5, "levitt_data")
for r in Selection.unfold_entities(m, "R"):
print("%s:" % r)
if r in fm:
print(fm[r])
|
|
from unittest import mock
from django.contrib.contenttypes.models import ContentType
from django.db.models import Count, Q
from django.test import TestCase
from wagtail.core.models import Locale, Page, PageViewRestriction, Site
from wagtail.core.signals import page_unpublished
from wagtail.search.query import MATCH_ALL
from wagtail.tests.testapp.models import EventPage, SimplePage, SingleEventPage, StreamPage
class TestPageQuerySet(TestCase):
fixtures = ['test.json']
def test_live(self):
pages = Page.objects.live()
# All pages must be live
for page in pages:
self.assertTrue(page.live)
# Check that the homepage is in the results
homepage = Page.objects.get(url_path='/home/')
self.assertTrue(pages.filter(id=homepage.id).exists())
def test_not_live(self):
pages = Page.objects.not_live()
# All pages must not be live
for page in pages:
self.assertFalse(page.live)
# Check that "someone elses event" is in the results
event = Page.objects.get(url_path='/home/events/someone-elses-event/')
self.assertTrue(pages.filter(id=event.id).exists())
def test_in_menu(self):
pages = Page.objects.in_menu()
# All pages must be be in the menus
for page in pages:
self.assertTrue(page.show_in_menus)
# Check that the events index is in the results
events_index = Page.objects.get(url_path='/home/events/')
self.assertTrue(pages.filter(id=events_index.id).exists())
def test_not_in_menu(self):
pages = Page.objects.not_in_menu()
# All pages must not be in menus
for page in pages:
self.assertFalse(page.show_in_menus)
# Check that the root page is in the results
self.assertTrue(pages.filter(id=1).exists())
def test_page(self):
homepage = Page.objects.get(url_path='/home/')
pages = Page.objects.page(homepage)
# Should only select the homepage
self.assertEqual(pages.count(), 1)
self.assertEqual(pages.first(), homepage)
def test_not_page(self):
homepage = Page.objects.get(url_path='/home/')
pages = Page.objects.not_page(homepage)
# Should select everything except for the homepage
self.assertEqual(pages.count(), Page.objects.all().count() - 1)
for page in pages:
self.assertNotEqual(page, homepage)
def test_descendant_of(self):
events_index = Page.objects.get(url_path='/home/events/')
pages = Page.objects.descendant_of(events_index)
# Check that all pages descend from events index
for page in pages:
self.assertTrue(page.get_ancestors().filter(id=events_index.id).exists())
def test_descendant_of_inclusive(self):
events_index = Page.objects.get(url_path='/home/events/')
pages = Page.objects.descendant_of(events_index, inclusive=True)
# Check that all pages descend from events index, includes event index
for page in pages:
self.assertTrue(page == events_index or page.get_ancestors().filter(id=events_index.id).exists())
# Check that event index was included
self.assertTrue(pages.filter(id=events_index.id).exists())
def test_not_descendant_of(self):
events_index = Page.objects.get(url_path='/home/events/')
pages = Page.objects.not_descendant_of(events_index)
# Check that no pages descend from events_index
for page in pages:
self.assertFalse(page.get_ancestors().filter(id=events_index.id).exists())
# As this is not inclusive, events index should be in the results
self.assertTrue(pages.filter(id=events_index.id).exists())
def test_not_descendant_of_inclusive(self):
events_index = Page.objects.get(url_path='/home/events/')
pages = Page.objects.not_descendant_of(events_index, inclusive=True)
# Check that all pages descend from homepage but not events index
for page in pages:
self.assertFalse(page.get_ancestors().filter(id=events_index.id).exists())
# As this is inclusive, events index should not be in the results
self.assertFalse(pages.filter(id=events_index.id).exists())
def test_child_of(self):
homepage = Page.objects.get(url_path='/home/')
pages = Page.objects.child_of(homepage)
# Check that all pages are children of homepage
for page in pages:
self.assertEqual(page.get_parent(), homepage)
def test_not_child_of(self):
events_index = Page.objects.get(url_path='/home/events/')
pages = Page.objects.not_child_of(events_index)
# Check that all pages are not children of events_index
for page in pages:
self.assertNotEqual(page.get_parent(), events_index)
def test_ancestor_of(self):
root_page = Page.objects.get(id=1)
homepage = Page.objects.get(url_path='/home/')
events_index = Page.objects.get(url_path='/home/events/')
pages = Page.objects.ancestor_of(events_index)
self.assertEqual(pages.count(), 2)
self.assertEqual(pages[0], root_page)
self.assertEqual(pages[1], homepage)
def test_ancestor_of_inclusive(self):
root_page = Page.objects.get(id=1)
homepage = Page.objects.get(url_path='/home/')
events_index = Page.objects.get(url_path='/home/events/')
pages = Page.objects.ancestor_of(events_index, inclusive=True)
self.assertEqual(pages.count(), 3)
self.assertEqual(pages[0], root_page)
self.assertEqual(pages[1], homepage)
self.assertEqual(pages[2], events_index)
def test_not_ancestor_of(self):
root_page = Page.objects.get(id=1)
homepage = Page.objects.get(url_path='/home/')
events_index = Page.objects.get(url_path='/home/events/')
pages = Page.objects.not_ancestor_of(events_index)
# Test that none of the ancestors are in pages
for page in pages:
self.assertNotEqual(page, root_page)
self.assertNotEqual(page, homepage)
# Test that events index is in pages
self.assertTrue(pages.filter(id=events_index.id).exists())
def test_not_ancestor_of_inclusive(self):
root_page = Page.objects.get(id=1)
homepage = Page.objects.get(url_path='/home/')
events_index = Page.objects.get(url_path='/home/events/')
pages = Page.objects.not_ancestor_of(events_index, inclusive=True)
# Test that none of the ancestors or the events_index are in pages
for page in pages:
self.assertNotEqual(page, root_page)
self.assertNotEqual(page, homepage)
self.assertNotEqual(page, events_index)
def test_parent_of(self):
homepage = Page.objects.get(url_path='/home/')
events_index = Page.objects.get(url_path='/home/events/')
pages = Page.objects.parent_of(events_index)
# Pages must only contain homepage
self.assertEqual(pages.count(), 1)
self.assertEqual(pages[0], homepage)
def test_not_parent_of(self):
homepage = Page.objects.get(url_path='/home/')
events_index = Page.objects.get(url_path='/home/events/')
pages = Page.objects.not_parent_of(events_index)
# Pages must not contain homepage
for page in pages:
self.assertNotEqual(page, homepage)
# Test that events index is in pages
self.assertTrue(pages.filter(id=events_index.id).exists())
def test_sibling_of_default(self):
"""
sibling_of should default to an inclusive definition of sibling
if 'inclusive' flag not passed
"""
events_index = Page.objects.get(url_path='/home/events/')
event = Page.objects.get(url_path='/home/events/christmas/')
pages = Page.objects.sibling_of(event)
# Check that all pages are children of events_index
for page in pages:
self.assertEqual(page.get_parent(), events_index)
# Check that the event is included
self.assertTrue(pages.filter(id=event.id).exists())
def test_sibling_of_exclusive(self):
events_index = Page.objects.get(url_path='/home/events/')
event = Page.objects.get(url_path='/home/events/christmas/')
pages = Page.objects.sibling_of(event, inclusive=False)
# Check that all pages are children of events_index
for page in pages:
self.assertEqual(page.get_parent(), events_index)
# Check that the event is not included
self.assertFalse(pages.filter(id=event.id).exists())
def test_sibling_of_inclusive(self):
events_index = Page.objects.get(url_path='/home/events/')
event = Page.objects.get(url_path='/home/events/christmas/')
pages = Page.objects.sibling_of(event, inclusive=True)
# Check that all pages are children of events_index
for page in pages:
self.assertEqual(page.get_parent(), events_index)
# Check that the event is included
self.assertTrue(pages.filter(id=event.id).exists())
def test_not_sibling_of_default(self):
"""
not_sibling_of should default to an inclusive definition of sibling -
i.e. eliminate self from the results as well -
if 'inclusive' flag not passed
"""
events_index = Page.objects.get(url_path='/home/events/')
event = Page.objects.get(url_path='/home/events/christmas/')
pages = Page.objects.not_sibling_of(event)
# Check that all pages are not children of events_index
for page in pages:
self.assertNotEqual(page.get_parent(), events_index)
# Check that the event is not included
self.assertFalse(pages.filter(id=event.id).exists())
# Test that events index is in pages
self.assertTrue(pages.filter(id=events_index.id).exists())
def test_not_sibling_of_exclusive(self):
events_index = Page.objects.get(url_path='/home/events/')
event = Page.objects.get(url_path='/home/events/christmas/')
pages = Page.objects.not_sibling_of(event, inclusive=False)
# Check that all pages are not children of events_index
for page in pages:
if page != event:
self.assertNotEqual(page.get_parent(), events_index)
# Check that the event is included
self.assertTrue(pages.filter(id=event.id).exists())
# Test that events index is in pages
self.assertTrue(pages.filter(id=events_index.id).exists())
def test_not_sibling_of_inclusive(self):
events_index = Page.objects.get(url_path='/home/events/')
event = Page.objects.get(url_path='/home/events/christmas/')
pages = Page.objects.not_sibling_of(event, inclusive=True)
# Check that all pages are not children of events_index
for page in pages:
self.assertNotEqual(page.get_parent(), events_index)
# Check that the event is not included
self.assertFalse(pages.filter(id=event.id).exists())
# Test that events index is in pages
self.assertTrue(pages.filter(id=events_index.id).exists())
def test_type(self):
pages = Page.objects.type(EventPage)
# Check that all objects are EventPages
for page in pages:
self.assertIsInstance(page.specific, EventPage)
# Check that "someone elses event" is in the results
event = Page.objects.get(url_path='/home/events/someone-elses-event/')
self.assertIn(event, pages)
# Check that "Saint Patrick" (an instance of SingleEventPage, a subclass of EventPage)
# is in the results
event = Page.objects.get(url_path='/home/events/saint-patrick/')
self.assertIn(event, pages)
def test_type_with_multiple_models(self):
pages = Page.objects.type(EventPage, SimplePage)
# Check that all objects are EventPages or SimplePages
for page in pages:
self.assertTrue(
isinstance(page.specific, (EventPage, SimplePage))
)
# Check that "someone elses event" is in the results
event = Page.objects.get(url_path='/home/events/someone-elses-event/')
self.assertIn(event, pages)
# Check that "Saint Patrick" (an instance of SingleEventPage, a subclass of EventPage)
# is in the results
event = Page.objects.get(url_path='/home/events/saint-patrick/')
self.assertIn(event, pages)
# Check that "About us" (an instance of SimplePage) is in the results
about_us = Page.objects.get(url_path='/home/about-us/')
self.assertIn(about_us, pages)
def test_not_type(self):
pages = Page.objects.not_type(EventPage)
# Check that no objects are EventPages
for page in pages:
self.assertNotIsInstance(page.specific, EventPage)
# Check that "About us" is in the results
about_us = Page.objects.get(url_path='/home/about-us/')
self.assertIn(about_us, pages)
# Check that the homepage is in the results
homepage = Page.objects.get(url_path='/home/')
self.assertIn(homepage, pages)
def test_not_type_with_multiple_models(self):
pages = Page.objects.not_type(EventPage, SimplePage)
# Check that no objects are EventPages or SimplePages
for page in pages:
self.assertFalse(
isinstance(page.specific, (EventPage, SimplePage))
)
# Check that "About us" is NOT in the results
about_us = Page.objects.get(url_path='/home/about-us/')
self.assertNotIn(about_us, pages)
# Check that the homepage IS in the results
homepage = Page.objects.get(url_path='/home/')
self.assertIn(homepage, pages)
def test_exact_type(self):
pages = Page.objects.exact_type(EventPage)
# Check that all objects are EventPages (and not a subclass)
for page in pages:
self.assertIs(page.specific_class, EventPage)
# Check that "someone elses event" is in the results
event = Page.objects.get(url_path='/home/events/someone-elses-event/')
self.assertIn(event, pages)
# Check that "Saint Patrick" (an instance of SingleEventPage, a subclass of EventPage)
# is NOT in the results
single_event = Page.objects.get(url_path='/home/events/saint-patrick/')
self.assertNotIn(single_event, pages)
def test_exact_type_with_multiple_models(self):
pages = Page.objects.exact_type(EventPage, Page)
# Check that all objects are EventPages or Pages (and not a subclass)
for page in pages:
self.assertIn(page.specific_class, (EventPage, Page))
# Check that "someone elses event" is in the results
event = Page.objects.get(url_path='/home/events/someone-elses-event/')
self.assertIn(event, pages)
# Check that "Saint Patrick" (an instance of SingleEventPage, a subclass of EventPage
# and Page) is NOT in the results
single_event = Page.objects.get(url_path='/home/events/saint-patrick/')
self.assertNotIn(single_event, pages)
# Check that the homepage (a generic Page only) is in the results
homepage = Page.objects.get(url_path='/home/')
self.assertIn(homepage, pages)
# Check that "About us" (an instance of SimplePage, a subclass of Page)
# is NOT in the results
about_us = Page.objects.get(url_path='/home/about-us/')
self.assertNotIn(about_us, pages)
def test_not_exact_type(self):
pages = Page.objects.not_exact_type(EventPage)
# Check that no objects are EventPages
for page in pages:
self.assertIsNot(page.specific_class, EventPage)
# Check that the homepage is in the results
homepage = Page.objects.get(url_path='/home/')
self.assertIn(homepage, pages)
# Check that "Saint Patrick" (an instance of SingleEventPage, a subclass of EventPage)
# is in the results
event = Page.objects.get(url_path='/home/events/saint-patrick/')
self.assertIn(event, pages)
def test_not_exact_type_with_multiple_models(self):
pages = Page.objects.not_exact_type(EventPage, Page)
# Check that no objects are EventPages or generic Pages
for page in pages:
self.assertNotIn(page.specific_class, (EventPage, Page))
# Check that "Saint Patrick" (an instance of SingleEventPage, a subclass of EventPage)
# is in the results
event = Page.objects.get(url_path='/home/events/saint-patrick/')
self.assertIn(event, pages)
# Check that "About us" (an instance of SimplePage, a subclass of Page)
# is in the results
about_us = Page.objects.get(url_path='/home/about-us/')
self.assertIn(about_us, pages)
def test_public(self):
events_index = Page.objects.get(url_path='/home/events/')
event = Page.objects.get(url_path='/home/events/christmas/')
homepage = Page.objects.get(url_path='/home/')
# Add PageViewRestriction to events_index
PageViewRestriction.objects.create(page=events_index, password='hello')
with self.assertNumQueries(4):
# Get public pages
pages = Page.objects.public()
# Check that the homepage is in the results
self.assertTrue(pages.filter(id=homepage.id).exists())
# Check that the events index is not in the results
self.assertFalse(pages.filter(id=events_index.id).exists())
# Check that the event is not in the results
self.assertFalse(pages.filter(id=event.id).exists())
def test_not_public(self):
events_index = Page.objects.get(url_path='/home/events/')
event = Page.objects.get(url_path='/home/events/christmas/')
homepage = Page.objects.get(url_path='/home/')
# Add PageViewRestriction to events_index
PageViewRestriction.objects.create(page=events_index, password='hello')
with self.assertNumQueries(4):
# Get public pages
pages = Page.objects.not_public()
# Check that the homepage is not in the results
self.assertFalse(pages.filter(id=homepage.id).exists())
# Check that the events index is in the results
self.assertTrue(pages.filter(id=events_index.id).exists())
# Check that the event is in the results
self.assertTrue(pages.filter(id=event.id).exists())
def test_merge_queries(self):
type_q = Page.objects.type_q(EventPage)
query = Q()
query |= type_q
self.assertTrue(Page.objects.filter(query).exists())
def test_delete_queryset(self):
Page.objects.all().delete()
self.assertEqual(Page.objects.count(), 0)
def test_delete_is_not_available_on_manager(self):
with self.assertRaises(AttributeError):
Page.objects.delete()
def test_translation_of(self):
en_homepage = Page.objects.get(url_path='/home/')
# Create a translation of the homepage
fr_locale = Locale.objects.create(language_code="fr")
root_page = Page.objects.get(depth=1)
fr_homepage = root_page.add_child(instance=Page(
title="French homepage",
slug="home-fr",
locale=fr_locale,
translation_key=en_homepage.translation_key,
))
with self.assertNumQueries(1):
translations = Page.objects.translation_of(en_homepage)
self.assertListEqual(list(translations), [fr_homepage])
# Now test with inclusive
with self.assertNumQueries(1):
translations = Page.objects.translation_of(en_homepage, inclusive=True).order_by('id')
self.assertListEqual(list(translations), [en_homepage, fr_homepage])
def test_not_translation_of(self):
en_homepage = Page.objects.get(url_path='/home/')
# Create a translation of the homepage
fr_locale = Locale.objects.create(language_code="fr")
root_page = Page.objects.get(depth=1)
fr_homepage = root_page.add_child(instance=Page(
title="French homepage",
slug="home-fr",
locale=fr_locale,
translation_key=en_homepage.translation_key,
))
with self.assertNumQueries(1):
translations = list(Page.objects.not_translation_of(en_homepage))
# Check that every single page is in the queryset, except for fr_homepage
for page in Page.objects.all():
if page in [fr_homepage]:
self.assertNotIn(page, translations)
else:
self.assertIn(page, translations)
# Test with inclusive
with self.assertNumQueries(1):
translations = list(Page.objects.not_translation_of(en_homepage, inclusive=True))
# Check that every single page is in the queryset, except for fr_homepage and en_homepage
for page in Page.objects.all():
if page in [en_homepage, fr_homepage]:
self.assertNotIn(page, translations)
else:
self.assertIn(page, translations)
class TestPageQueryInSite(TestCase):
fixtures = ['test.json']
def setUp(self):
self.site_2_page = SimplePage(
title="Site 2 page",
slug="site_2_page",
content="Hello",
)
Page.get_first_root_node().add_child(instance=self.site_2_page)
self.site_2_subpage = SimplePage(
title="Site 2 subpage",
slug="site_2_subpage",
content="Hello again",
)
self.site_2_page.add_child(instance=self.site_2_subpage)
self.site_2 = Site.objects.create(
hostname='example.com',
port=8080,
root_page=Page.objects.get(pk=self.site_2_page.pk),
is_default_site=False
)
self.about_us_page = SimplePage.objects.get(url_path='/home/about-us/')
def test_in_site(self):
site_2_pages = SimplePage.objects.in_site(self.site_2)
self.assertIn(self.site_2_page, site_2_pages)
self.assertIn(self.site_2_subpage, site_2_pages)
self.assertNotIn(self.about_us_page, site_2_pages)
class TestPageQuerySetSearch(TestCase):
fixtures = ['test.json']
def test_search(self):
pages = EventPage.objects.search('moon', fields=['location'])
self.assertEqual(pages.count(), 2)
self.assertIn(Page.objects.get(url_path='/home/events/tentative-unpublished-event/').specific, pages)
self.assertIn(Page.objects.get(url_path='/home/events/someone-elses-event/').specific, pages)
def test_operators(self):
results = EventPage.objects.search("moon ponies", operator='and')
self.assertEqual(list(results), [
Page.objects.get(url_path='/home/events/tentative-unpublished-event/').specific
])
results = EventPage.objects.search("moon ponies", operator='or')
sorted_results = sorted(results, key=lambda page: page.url_path)
self.assertEqual(sorted_results, [
Page.objects.get(url_path='/home/events/someone-elses-event/').specific,
Page.objects.get(url_path='/home/events/tentative-unpublished-event/').specific,
])
def test_custom_order(self):
pages = EventPage.objects.order_by('url_path').search('moon', fields=['location'], order_by_relevance=False)
self.assertEqual(list(pages), [
Page.objects.get(url_path='/home/events/someone-elses-event/').specific,
Page.objects.get(url_path='/home/events/tentative-unpublished-event/').specific,
])
pages = EventPage.objects.order_by('-url_path').search('moon', fields=['location'], order_by_relevance=False)
self.assertEqual(list(pages), [
Page.objects.get(url_path='/home/events/tentative-unpublished-event/').specific,
Page.objects.get(url_path='/home/events/someone-elses-event/').specific,
])
def test_unpublish(self):
# set up a listener for the unpublish signal
unpublish_signals_fired = []
def page_unpublished_handler(sender, instance, **kwargs):
unpublish_signals_fired.append((sender, instance))
page_unpublished.connect(page_unpublished_handler)
events_index = Page.objects.get(url_path='/home/events/')
events_index.get_children().unpublish()
# Previously-live children of event index should now be non-live
christmas = EventPage.objects.get(url_path='/home/events/christmas/')
saint_patrick = SingleEventPage.objects.get(url_path='/home/events/saint-patrick/')
unpublished_event = EventPage.objects.get(url_path='/home/events/tentative-unpublished-event/')
self.assertFalse(christmas.live)
self.assertFalse(saint_patrick.live)
# Check that a signal was fired for each unpublished page
self.assertIn((EventPage, christmas), unpublish_signals_fired)
self.assertIn((SingleEventPage, saint_patrick), unpublish_signals_fired)
# a signal should not be fired for pages that were in the queryset
# but already unpublished
self.assertNotIn((EventPage, unpublished_event), unpublish_signals_fired)
class TestSpecificQuery(TestCase):
"""
Test the .specific() queryset method. This is isolated in its own test case
because it is sensitive to database changes that might happen for other
tests.
The fixture sets up a page structure like:
=========== =========================================
Type Path
=========== =========================================
Page /
Page /home/
SimplePage /home/about-us/
EventIndex /home/events/
EventPage /home/events/christmas/
EventPage /home/events/someone-elses-event/
EventPage /home/events/tentative-unpublished-event/
SimplePage /home/other/
EventPage /home/other/special-event/
=========== =========================================
"""
fixtures = ['test_specific.json']
def test_specific(self):
root = Page.objects.get(url_path='/home/')
with self.assertNumQueries(0):
# The query should be lazy.
qs = root.get_descendants().specific()
with self.assertNumQueries(4):
# One query to get page type and ID, one query per page type:
# EventIndex, EventPage, SimplePage
pages = list(qs)
self.assertIsInstance(pages, list)
self.assertEqual(len(pages), 7)
for page in pages:
# An instance of the specific page type should be returned,
# not wagtailcore.Page.
content_type = page.content_type
model = content_type.model_class()
self.assertIsInstance(page, model)
# The page should already be the specific type, so this should not
# need another database query.
with self.assertNumQueries(0):
self.assertIs(page, page.specific)
def test_filtering_before_specific(self):
# This will get the other events, and then christmas
# 'someone-elses-event' and the tentative event are unpublished.
with self.assertNumQueries(0):
qs = Page.objects.live().order_by('-url_path')[:3].specific()
with self.assertNumQueries(3):
# Metadata, EventIndex and EventPage
pages = list(qs)
self.assertEqual(len(pages), 3)
self.assertEqual(pages, [
Page.objects.get(url_path='/home/other/special-event/').specific,
Page.objects.get(url_path='/home/other/').specific,
Page.objects.get(url_path='/home/events/christmas/').specific])
def test_filtering_after_specific(self):
# This will get the other events, and then christmas
# 'someone-elses-event' and the tentative event are unpublished.
with self.assertNumQueries(0):
qs = Page.objects.specific().live().in_menu().order_by('-url_path')[:4]
with self.assertNumQueries(4):
# Metadata, EventIndex, EventPage, SimplePage.
pages = list(qs)
self.assertEqual(len(pages), 4)
self.assertEqual(pages, [
Page.objects.get(url_path='/home/other/').specific,
Page.objects.get(url_path='/home/events/christmas/').specific,
Page.objects.get(url_path='/home/events/').specific,
Page.objects.get(url_path='/home/about-us/').specific])
def test_specific_query_with_annotations_performs_no_additional_queries(self):
with self.assertNumQueries(5):
pages = list(Page.objects.live().specific())
self.assertEqual(len(pages), 7)
with self.assertNumQueries(5):
pages = list(Page.objects.live().specific().annotate(count=Count('pk')))
self.assertEqual(len(pages), 7)
def test_specific_query_with_annotation(self):
# Ensure annotations are reapplied to specific() page queries
pages = Page.objects.live()
pages.first().save_revision()
pages.last().save_revision()
results = Page.objects.live().specific().annotate(revision_count=Count('revisions'))
self.assertEqual(results.first().revision_count, 1)
self.assertEqual(results.last().revision_count, 1)
def test_specific_query_with_search_and_annotation(self):
# Ensure annotations are reapplied to specific() page queries
results = Page.objects.live().specific().search(MATCH_ALL).annotate_score('_score')
for result in results:
self.assertTrue(hasattr(result, '_score'))
def test_specific_query_with_search(self):
# 1276 - The database search backend didn't return results with the
# specific type when searching a specific queryset.
pages = list(Page.objects.specific().live().in_menu().search(
MATCH_ALL, backend='wagtail.search.backends.db'))
# Check that each page is in the queryset with the correct type.
# We don't care about order here
self.assertEqual(len(pages), 4)
self.assertIn(Page.objects.get(url_path='/home/other/').specific, pages)
self.assertIn(Page.objects.get(url_path='/home/events/christmas/').specific, pages)
self.assertIn(Page.objects.get(url_path='/home/events/').specific, pages)
self.assertIn(Page.objects.get(url_path='/home/about-us/').specific, pages)
def test_specific_gracefully_handles_missing_models(self):
# 3567 - PageQuerySet.specific should gracefully handle pages whose class definition
# is missing, by keeping them as basic Page instances.
# Create a ContentType that doesn't correspond to a real model
missing_page_content_type = ContentType.objects.create(app_label='tests', model='missingpage')
# Turn /home/events/ into this content type
Page.objects.filter(url_path='/home/events/').update(content_type=missing_page_content_type)
pages = list(Page.objects.get(url_path='/home/').get_children().specific())
self.assertEqual(pages, [
Page.objects.get(url_path='/home/events/'),
Page.objects.get(url_path='/home/about-us/').specific,
Page.objects.get(url_path='/home/other/').specific,
])
def test_specific_gracefully_handles_missing_rows(self):
# 5928 - PageQuerySet.specific should gracefully handle pages whose ContentType
# row in the specific table no longer exists
# Trick specific_iterator into always looking for EventPages
with mock.patch(
'wagtail.core.query.ContentType.objects.get_for_id',
return_value=ContentType.objects.get_for_model(EventPage),
):
with self.assertWarnsRegex(RuntimeWarning, "Specific versions of the following pages could not be found"):
pages = list(Page.objects.get(url_path='/home/').get_children().specific())
# All missing pages should be supplemented with generic pages
self.assertEqual(pages, [
Page.objects.get(url_path='/home/events/'),
Page.objects.get(url_path='/home/about-us/'),
Page.objects.get(url_path='/home/other/'),
])
def test_deferred_specific_query(self):
# Tests the "defer" keyword argument, which defers all specific fields
root = Page.objects.get(url_path='/home/')
stream_page = StreamPage(
title='stream page',
slug='stream-page',
body='[{"type": "text", "value": "foo"}]',
)
root.add_child(instance=stream_page)
with self.assertNumQueries(0):
# The query should be lazy.
qs = root.get_descendants().specific(defer=True)
with self.assertNumQueries(1):
# This did use 5 queries (one for each specific class),
# But now only performs a single query
pages = list(qs)
self.assertIsInstance(pages, list)
self.assertEqual(len(pages), 8)
for page in pages:
# An instance of the specific page type should be returned,
# not wagtailcore.Page.
content_type = page.content_type
model = content_type.model_class()
self.assertIsInstance(page, model)
# The page should already be the specific type, so this should not
# need another database query.
with self.assertNumQueries(0):
self.assertIs(page, page.specific)
# Unlike before, the content fields should be now deferred. This means
# that accessing them will generate a new query.
with self.assertNumQueries(2):
# <EventPage: Christmas>
pages[1].body
# <StreamPage: stream page>
pages[-1].body
class TestFirstCommonAncestor(TestCase):
"""
Uses the same fixture as TestSpecificQuery. See that class for the layout
of pages.
"""
fixtures = ['test_specific.json']
def setUp(self):
self.root_page = Page.objects.get(url_path='/home/')
self.all_events = Page.objects.type(EventPage)
self.regular_events = Page.objects.type(EventPage)\
.exclude(url_path__contains='/other/')
def _create_streampage(self):
stream_page = StreamPage(
title='stream page',
slug='stream-page',
body='[{"type": "text", "value": "foo"}]',
)
self.root_page.add_child(instance=stream_page)
def test_bookkeeping(self):
self.assertEqual(self.all_events.count(), 4)
self.assertEqual(self.regular_events.count(), 3)
def test_event_pages(self):
"""Common ancestor for EventPages"""
# As there are event pages in multiple trees under /home/, the home
# page is the common ancestor
self.assertEqual(
Page.objects.get(slug='home'),
self.all_events.first_common_ancestor())
def test_normal_event_pages(self):
"""Common ancestor for EventPages, excluding /other/ events"""
self.assertEqual(
Page.objects.get(slug='events'),
self.regular_events.first_common_ancestor())
def test_normal_event_pages_include_self(self):
"""
Common ancestor for EventPages, excluding /other/ events, with
include_self=True
"""
self.assertEqual(
Page.objects.get(slug='events'),
self.regular_events.first_common_ancestor(include_self=True))
def test_single_page_no_include_self(self):
"""Test getting a single page, with include_self=False."""
self.assertEqual(
Page.objects.get(slug='events'),
Page.objects.filter(title='Christmas').first_common_ancestor())
def test_single_page_include_self(self):
"""Test getting a single page, with include_self=True."""
self.assertEqual(
Page.objects.get(title='Christmas'),
Page.objects.filter(title='Christmas').first_common_ancestor(include_self=True))
def test_all_pages(self):
self.assertEqual(
Page.get_first_root_node(),
Page.objects.first_common_ancestor())
def test_all_pages_strict(self):
with self.assertRaises(Page.DoesNotExist):
Page.objects.first_common_ancestor(strict=True)
def test_all_pages_include_self_strict(self):
self.assertEqual(
Page.get_first_root_node(),
Page.objects.first_common_ancestor(include_self=True, strict=True))
def test_empty_queryset(self):
self.assertEqual(
Page.get_first_root_node(),
Page.objects.none().first_common_ancestor())
def test_empty_queryset_strict(self):
with self.assertRaises(Page.DoesNotExist):
Page.objects.none().first_common_ancestor(strict=True)
def test_defer_streamfields_without_specific(self):
self._create_streampage()
for page in StreamPage.objects.all().defer_streamfields():
self.assertNotIn('body', page.__dict__)
with self.assertNumQueries(1):
page.body
def test_defer_streamfields_with_specific(self):
self._create_streampage()
for page in Page.objects.exact_type(StreamPage).defer_streamfields().specific():
self.assertNotIn('body', page.__dict__)
with self.assertNumQueries(1):
page.body
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import weakref
from absl.testing import parameterized
import six
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine import input_layer
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.layers import core
from tensorflow.python.keras.optimizer_v2 import adam
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import template
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import training_util
from tensorflow.python.training.tracking import base
from tensorflow.python.training.tracking import graph_view
from tensorflow.python.training.tracking import tracking
from tensorflow.python.training.tracking import util as trackable_utils
class NonLayerTrackable(tracking.AutoTrackable):
def __init__(self):
super(NonLayerTrackable, self).__init__()
self.a_variable = trackable_utils.add_variable(
self, name="a_variable", shape=[])
# pylint: disable=not-callable
class MyModel(training.Model):
"""A concrete Model for testing."""
def __init__(self):
super(MyModel, self).__init__()
self._named_dense = core.Dense(1, use_bias=True)
self._second = core.Dense(1, use_bias=False)
# We can still track Trackables which aren't Layers.
self._non_layer = NonLayerTrackable()
def call(self, values):
ret = self._second(self._named_dense(values))
return ret
class InterfaceTests(test.TestCase):
def testLayerDeduplication(self):
model = training.Model()
layer_one = core.Dense(1)
layer_two = core.Dense(1)
model.other_path = [layer_one, layer_two]
model.l2 = layer_two
model.l1 = layer_one
self.assertEqual([layer_one, layer_two], model.layers)
def testSaveWithOnlyKerasSession(self):
with ops.Graph().as_default():
inp = input_layer.Input([1])
dense = core.Dense(1)(inp)
model = training.Model(inp, dense)
model.compile(optimizer="sgd", loss="mse")
model.fit([1.], [2.])
checkpoint = trackable_utils.Checkpoint(model=model)
checkpoint.save(os.path.join(self.get_temp_dir(), "ckpt"))
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testAddVariable(self):
obj = NonLayerTrackable()
with self.assertRaisesRegexp(ValueError, "do not specify shape"):
trackable_utils.add_variable(
obj, name="shape_specified_twice", shape=[], initializer=1)
constant_initializer = trackable_utils.add_variable(
obj, name="constant_initializer", initializer=1)
with variable_scope.variable_scope("some_variable_scope"):
ones_initializer = trackable_utils.add_variable(
obj,
name="ones_initializer",
shape=[2],
initializer=init_ops.ones_initializer(dtype=dtypes.float32))
bare_initializer = trackable_utils.add_variable(
obj,
name="bare_initializer",
shape=[2, 2],
dtype=dtypes.float64,
initializer=init_ops.zeros_initializer)
# Even in graph mode, there are no naming conflicts between objects, only
# naming conflicts within an object.
other_duplicate = resource_variable_ops.ResourceVariable(
name="duplicate", initial_value=1.)
duplicate = trackable_utils.add_variable(
obj, name="duplicate", shape=[])
with self.assertRaisesRegexp(ValueError, "'duplicate'.*already declared"):
trackable_utils.add_variable(obj, name="duplicate", shape=[])
self.evaluate(trackable_utils.gather_initializers(obj))
self.assertEqual("constant_initializer:0", constant_initializer.name)
self.assertEqual(1, self.evaluate(constant_initializer))
self.assertEqual("some_variable_scope/ones_initializer:0",
ones_initializer.name)
self.assertAllEqual([1, 1], self.evaluate(ones_initializer))
self.assertAllEqual([[0., 0.],
[0., 0.]], self.evaluate(bare_initializer))
self.assertEqual("a_variable:0", obj.a_variable.name)
self.assertEqual("duplicate:0", other_duplicate.name)
if context.executing_eagerly():
# When executing eagerly, there's no uniquification of variable names. The
# checkpoint name will be the same.
self.assertEqual("duplicate:0", duplicate.name)
else:
# The .name attribute may be globally influenced, but the checkpoint name
# won't be (tested below).
self.assertEqual("duplicate_1:0", duplicate.name)
named_variables, _, _ = (
graph_view.ObjectGraphView(obj).serialize_object_graph())
expected_checkpoint_names = (
"a_variable/.ATTRIBUTES/VARIABLE_VALUE",
"bare_initializer/.ATTRIBUTES/VARIABLE_VALUE",
"constant_initializer/.ATTRIBUTES/VARIABLE_VALUE",
"duplicate/.ATTRIBUTES/VARIABLE_VALUE",
"ones_initializer/.ATTRIBUTES/VARIABLE_VALUE",
)
six.assertCountEqual(
self, expected_checkpoint_names, [v.name for v in named_variables])
def testInitNotCalled(self):
class NoInit(tracking.AutoTrackable):
def __init__(self):
pass
# __init__ for Trackable will be called implicitly.
trackable_utils.add_variable(NoInit(), "var", shape=[])
def testShapeDtype(self):
root = tracking.AutoTrackable()
v1 = trackable_utils.add_variable(
root, name="v1", initializer=3., dtype=dtypes.float64)
self.assertEqual(dtypes.float64, v1.dtype)
v2 = trackable_utils.add_variable(
root,
name="v2",
shape=[3],
initializer=init_ops.ones_initializer,
dtype=dtypes.float64)
self.assertEqual(dtypes.float64, v2.dtype)
self.assertAllEqual([1., 1., 1.], self.evaluate(v2))
def testObjectMetadata(self):
with context.eager_mode():
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
dense = core.Dense(1)
checkpoint = trackable_utils.Checkpoint(dense=dense)
dense(constant_op.constant([[1.]]))
save_path = checkpoint.save(checkpoint_prefix)
objects = trackable_utils.object_metadata(save_path)
all_variable_names = []
for obj in objects.nodes:
for attribute in obj.attributes:
all_variable_names.append(attribute.full_name)
self.assertIn("dense/kernel", all_variable_names)
def testNotTrackable(self):
class CallsFunctionalStuff(
tracking.NotTrackable, tracking.AutoTrackable):
pass
test_dir = self.get_temp_dir()
prefix = os.path.join(test_dir, "ckpt")
checkpoint = trackable_utils.Checkpoint(x=CallsFunctionalStuff())
with self.assertRaises(NotImplementedError):
checkpoint.save(prefix)
class CallsFunctionalStuffOtherMRO(
tracking.AutoTrackable, tracking.NotTrackable):
pass
checkpoint_reversed = trackable_utils.Checkpoint(
x=CallsFunctionalStuffOtherMRO())
with self.assertRaises(NotImplementedError):
checkpoint_reversed.save(prefix)
class _MirroringSaveable(saver_lib.BaseSaverBuilder.SaveableObject):
def __init__(self, primary_variable, mirrored_variable, name):
self._primary_variable = primary_variable
self._mirrored_variable = mirrored_variable
tensor = self._primary_variable.read_value()
spec = saver_lib.BaseSaverBuilder.SaveSpec(
tensor=tensor,
slice_spec="",
name=name)
super(_MirroringSaveable, self).__init__(
tensor, [spec], name)
def restore(self, restored_tensors, restored_shapes):
"""Restore the same value into both variables."""
tensor, = restored_tensors
return control_flow_ops.group(
self._primary_variable.assign(tensor),
self._mirrored_variable.assign(tensor))
class _OwnsMirroredVariables(base.Trackable):
"""A Trackable object which returns a more complex SaveableObject."""
def __init__(self):
self.non_dep_variable = variable_scope.get_variable(
name="non_dep_variable", initializer=6., use_resource=True)
self.mirrored = variable_scope.get_variable(
name="mirrored", initializer=15., use_resource=True)
def _gather_saveables_for_checkpoint(self):
def _saveable_factory(name=self.non_dep_variable.name):
return _MirroringSaveable(
primary_variable=self.non_dep_variable,
mirrored_variable=self.mirrored,
name=name)
return {base.VARIABLE_VALUE_KEY: _saveable_factory}
# The Saver sorts by name before parsing, so we need a name property.
@property
def name(self):
return self.non_dep_variable.name
class CheckpointingTests(parameterized.TestCase, test.TestCase):
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testNamingWithOptimizer(self):
input_value = constant_op.constant([[3.]])
model = MyModel()
# A nuisance Model using the same optimizer. Its slot variables should not
# go in the checkpoint, since it is never depended on.
other_model = MyModel()
optimizer = adam.Adam(0.001)
step = training_util.get_or_create_global_step()
root_trackable = trackable_utils.Checkpoint(
optimizer=optimizer, model=model, step=step)
with backprop.GradientTape() as tape:
loss = model(input_value)
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
train_op = control_flow_ops.group(
optimizer.apply_gradients(zip(gradients, variables)),
step.assign_add(1))
with backprop.GradientTape() as tape:
loss = other_model(input_value)
variables = other_model.trainable_variables
gradients = tape.gradient(loss, variables)
optimizer.apply_gradients(zip(gradients, variables))
self.evaluate(trackable_utils.gather_initializers(
root_trackable))
self.evaluate(train_op)
named_variables, serialized_graph, _ = graph_view.ObjectGraphView(
root_trackable).serialize_object_graph()
expected_slot_keys = (
"model/_second/kernel/.OPTIMIZER_SLOT/optimizer/m",
"model/_second/kernel/.OPTIMIZER_SLOT/optimizer/v",
"model/_named_dense/kernel/.OPTIMIZER_SLOT/optimizer/m",
"model/_named_dense/kernel/.OPTIMIZER_SLOT/optimizer/v",
"model/_named_dense/bias/.OPTIMIZER_SLOT/optimizer/m",
"model/_named_dense/bias/.OPTIMIZER_SLOT/optimizer/v",
)
expected_checkpoint_names = (
# Created in the root node, so no prefix.
"step",
"model/_second/kernel",
"model/_named_dense/kernel",
"model/_named_dense/bias",
# non-Layer dependency of the model
"model/_non_layer/a_variable",
"optimizer/learning_rate",
"optimizer/beta_1",
"optimizer/beta_2",
"optimizer/iter",
"optimizer/decay",
) + expected_slot_keys
suffix = "/.ATTRIBUTES/VARIABLE_VALUE"
expected_checkpoint_names = [
name + suffix for name in expected_checkpoint_names]
named_variables = {v.name: v for v in named_variables}
six.assertCountEqual(self, expected_checkpoint_names,
named_variables.keys())
# Check that we've mapped to the right variable objects (not exhaustive)
self.assertEqual(
"global_step",
named_variables["step" + suffix].full_name)
self.assertEqual(
"my_model/dense_1/kernel",
named_variables["model/_second/kernel" + suffix].full_name)
self.assertEqual(
"my_model/dense/kernel",
named_variables["model/_named_dense/kernel" + suffix].full_name)
self.assertEqual("Adam/beta_1",
named_variables["optimizer/beta_1" + suffix].full_name)
self.assertEqual("Adam/beta_2",
named_variables["optimizer/beta_2" + suffix].full_name)
# Spot check the generated protocol buffers.
self.assertEqual("optimizer",
serialized_graph.nodes[0].children[1].local_name)
optimizer_node = serialized_graph.nodes[serialized_graph.nodes[0].children[
1].node_id]
children = [node.local_name for node in optimizer_node.children]
six.assertCountEqual(
self,
# hyper variable dependencies
["beta_1", "beta_2", "iter", "decay", "learning_rate"],
children)
serialized_slot_keys = []
for slot in optimizer_node.slot_variables:
for attribute in (
serialized_graph.nodes[slot.slot_variable_node_id].attributes):
serialized_slot_keys.append(attribute.checkpoint_key)
six.assertCountEqual(
self,
[key + suffix for key in expected_slot_keys],
serialized_slot_keys)
@test_util.run_in_graph_and_eager_modes
def testMoreComplexSaveableReturned(self):
v = _OwnsMirroredVariables()
checkpoint = trackable_utils.Checkpoint(v=v)
test_dir = self.get_temp_dir()
prefix = os.path.join(test_dir, "ckpt")
self.evaluate(v.non_dep_variable.assign(42.))
save_path = checkpoint.save(prefix)
self.evaluate(v.non_dep_variable.assign(43.))
self.evaluate(v.mirrored.assign(44.))
checkpoint.restore(save_path).assert_consumed().initialize_or_restore()
self.assertEqual(42., self.evaluate(v.non_dep_variable))
self.assertEqual(42., self.evaluate(v.mirrored))
self.evaluate(v.non_dep_variable.assign(44.))
save_path = checkpoint.save(prefix)
self.evaluate(v.non_dep_variable.assign(45.))
checkpoint.restore(save_path).assert_consumed().initialize_or_restore()
self.assertEqual(44., self.evaluate(v.non_dep_variable))
self.assertEqual(44., self.evaluate(v.mirrored))
@test_util.run_in_graph_and_eager_modes
def testMoreComplexSaveableReturnedWithGlobalName(self):
# The same object can also be saved using the name-based saver.
v = _OwnsMirroredVariables()
saver = saver_lib.Saver(var_list=[v])
test_dir = self.get_temp_dir()
prefix = os.path.join(test_dir, "ckpt")
with self.cached_session() as sess:
self.evaluate(v.non_dep_variable.assign(42.))
save_path = saver.save(sess, prefix)
self.evaluate(v.non_dep_variable.assign(43.))
self.evaluate(v.mirrored.assign(44.))
saver.restore(sess, save_path)
self.assertEqual(42., self.evaluate(v.non_dep_variable))
self.assertEqual(42., self.evaluate(v.mirrored))
@test_util.run_in_graph_and_eager_modes
def testSaveRestore(self):
model = MyModel()
optimizer = adam.Adam(0.001)
root_trackable = trackable_utils.Checkpoint(
optimizer=optimizer, model=model)
input_value = constant_op.constant([[3.]])
with backprop.GradientTape() as tape:
loss = model(input_value)
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
train_op = optimizer.apply_gradients(zip(gradients, variables))
self.assertFalse(root_trackable.save_counter.trainable)
self.evaluate(trackable_utils.gather_initializers(
root_trackable))
self.evaluate(train_op)
prefix = os.path.join(self.get_temp_dir(), "ckpt")
self.evaluate(state_ops.assign(model._named_dense.variables[1], [42.]))
m_bias_slot = optimizer.get_slot(model._named_dense.variables[1], "m")
self.evaluate(state_ops.assign(m_bias_slot, [1.5]))
save_path = root_trackable.save(file_prefix=prefix)
self.evaluate(state_ops.assign(model._named_dense.variables[1], [43.]))
self.evaluate(state_ops.assign(root_trackable.save_counter, 3))
optimizer_variables = self.evaluate(
sorted(optimizer.variables(), key=lambda v: v.name))
self.evaluate(state_ops.assign(m_bias_slot, [-2.]))
# Immediate restoration
status = root_trackable.restore(save_path=save_path).assert_consumed()
status.run_restore_ops()
self.assertAllEqual([42.], self.evaluate(model._named_dense.variables[1]))
self.assertAllEqual(1, self.evaluate(root_trackable.save_counter))
self.assertAllEqual([1.5], self.evaluate(m_bias_slot))
if not context.executing_eagerly():
return # Restore-on-create is only supported when executing eagerly
on_create_model = MyModel()
on_create_optimizer = adam.Adam(0.001)
on_create_root = trackable_utils.Checkpoint(
optimizer=on_create_optimizer, model=on_create_model)
# Deferred restoration
status = on_create_root.restore(save_path=save_path)
status.assert_nontrivial_match()
status.assert_existing_objects_matched()
with self.assertRaises(AssertionError):
status.assert_consumed()
on_create_model(constant_op.constant([[3.]])) # create variables
self.assertAllEqual(1, self.evaluate(on_create_root.save_counter))
self.assertAllEqual([42.],
self.evaluate(
on_create_model._named_dense.variables[1]))
on_create_m_bias_slot = on_create_optimizer.get_slot(
on_create_model._named_dense.variables[1], "m")
status.assert_existing_objects_matched()
if not context.executing_eagerly():
with self.assertRaises(AssertionError):
status.assert_consumed()
# Optimizer slot variables are created when the original variable is
# restored.
self.assertAllEqual([1.5], self.evaluate(on_create_m_bias_slot))
dummy_var = resource_variable_ops.ResourceVariable([1.])
on_create_optimizer.minimize(loss=dummy_var.read_value,
var_list=[dummy_var])
status.assert_existing_objects_matched()
status.assert_consumed()
self.assertAllEqual(
optimizer_variables,
# Creation order is different, so .variables() needs to be re-sorted.
self.evaluate(sorted(optimizer.variables(), key=lambda v: v.name)))
# TODO(allenl): Debug garbage created by this test in python3.
def testDeferredRestorationUsageEager(self):
"""An idiomatic eager execution example."""
num_training_steps = 10
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
for training_continuation in range(3):
model = MyModel()
optimizer = adam.Adam(0.001)
root = trackable_utils.Checkpoint(
optimizer=optimizer, model=model)
root.restore(checkpoint_management.latest_checkpoint(
checkpoint_directory))
for _ in range(num_training_steps):
# TODO(allenl): Use a Dataset and serialize/checkpoint it.
input_value = constant_op.constant([[3.]])
with backprop.GradientTape() as tape:
loss = model(input_value)
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
optimizer.apply_gradients(zip(gradients, variables))
root.save(file_prefix=checkpoint_prefix)
self.assertEqual((training_continuation + 1) * num_training_steps,
root.optimizer.iterations.numpy())
def testUsageGraph(self):
"""Expected usage when graph building."""
with context.graph_mode():
num_training_steps = 10
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
for training_continuation in range(3):
with ops.Graph().as_default():
model = MyModel()
optimizer = adam.Adam(0.001)
root = trackable_utils.CheckpointV1(
optimizer=optimizer, model=model)
input_value = constant_op.constant([[3.]])
with backprop.GradientTape() as tape:
loss = model(input_value)
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
train_op = optimizer.apply_gradients(zip(gradients, variables))
checkpoint_path = checkpoint_management.latest_checkpoint(
checkpoint_directory)
with self.session(graph=ops.get_default_graph()) as session:
status = root.restore(save_path=checkpoint_path)
status.initialize_or_restore(session=session)
if checkpoint_path is None:
self.assertEqual(0, training_continuation)
with self.assertRaises(AssertionError):
status.assert_consumed()
with self.assertRaises(AssertionError):
status.assert_existing_objects_matched()
else:
status.assert_consumed()
status.assert_existing_objects_matched()
for _ in range(num_training_steps):
session.run(train_op)
root.save(file_prefix=checkpoint_prefix, session=session)
self.assertEqual((training_continuation + 1) * num_training_steps,
session.run(root.optimizer.iterations))
self.assertEqual(training_continuation + 1,
session.run(root.save_counter))
@test_util.run_in_graph_and_eager_modes
def testAgnosticUsage(self):
"""Graph/eager agnostic usage."""
# Does create garbage when executing eagerly due to ops.Graph() creation.
num_training_steps = 10
checkpoint_directory = self.get_temp_dir()
def _train_fn(model, input_value):
with backprop.GradientTape() as tape:
loss = model(input_value)
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
return optimizer.apply_gradients(zip(gradients, variables))
for training_continuation in range(3):
with test_util.device(use_gpu=True):
model = MyModel()
optimizer = adam.Adam(0.001)
root = trackable_utils.Checkpoint(
optimizer=optimizer, model=model)
manager = checkpoint_management.CheckpointManager(
root, checkpoint_directory, max_to_keep=1)
status = root.restore(save_path=manager.latest_checkpoint)
input_value = constant_op.constant([[3.]])
train_fn = functools.partial(_train_fn, model, input_value)
if not context.executing_eagerly():
train_fn = functools.partial(self.evaluate, train_fn())
status.initialize_or_restore()
for _ in range(num_training_steps):
train_fn()
manager.save()
self.assertEqual((training_continuation + 1) * num_training_steps,
self.evaluate(root.optimizer.iterations))
self.assertEqual(training_continuation + 1,
self.evaluate(root.save_counter))
@test_util.run_in_graph_and_eager_modes
def testFreezing(self):
with test_util.use_gpu():
# Save an object-based checkpoint using a frozen saver
directory = self.get_temp_dir()
prefix = os.path.join(directory, "ckpt")
v = resource_variable_ops.ResourceVariable(0, dtype=dtypes.int64)
checkpoint = trackable_utils.Checkpoint(v=v)
self.evaluate(v.assign(3))
# Create the save counter so assert_consumed doesn't complain about it not
# existing in the checkpoint on restore.
self.evaluate(checkpoint.save_counter.assign(12))
saver = trackable_utils.frozen_saver(checkpoint)
with ops.device("cpu:0"):
prefix_tensor = constant_op.constant(prefix)
self.evaluate(saver.save(prefix_tensor))
self.evaluate(v.assign(10))
# Use the frozen saver to restore the same object graph
self.evaluate(saver.restore(prefix_tensor))
self.assertEqual(3, self.evaluate(v))
# Restore using another frozen saver on an identical object graph
del v, checkpoint, saver
v = resource_variable_ops.ResourceVariable(0, dtype=dtypes.int64)
checkpoint = trackable_utils.Checkpoint(v=v)
saver = trackable_utils.frozen_saver(checkpoint)
self.evaluate(saver.restore(prefix_tensor))
self.assertEqual(3, self.evaluate(v))
# Restore as an object-based checkpoint
del v, checkpoint, saver
checkpoint = trackable_utils.Checkpoint()
status = checkpoint.restore(prefix)
v = resource_variable_ops.ResourceVariable(0, dtype=dtypes.int64)
if context.executing_eagerly():
self.assertEqual(12, self.evaluate(checkpoint.save_counter))
self.assertEqual(0, self.evaluate(v))
checkpoint.v = v
status.assert_consumed().run_restore_ops()
self.assertEqual(3, self.evaluate(v))
self.assertEqual(12, self.evaluate(checkpoint.save_counter))
@test_util.run_in_graph_and_eager_modes
def testCustomNumbering(self):
directory = self.get_temp_dir()
prefix = os.path.join(directory, "ckpt")
step = resource_variable_ops.ResourceVariable(0, dtype=dtypes.int64)
checkpoint = trackable_utils.Checkpoint(step=step)
self.evaluate(step.initializer)
for i in range(5):
path = checkpoint.write("%s-%d" % (prefix, self.evaluate(step)))
expected_suffix = "-%d" % (2 * i,)
if not path.endswith(expected_suffix):
self.fail("%s should have suffix %s" % (path, expected_suffix))
self.evaluate(step.assign_add(2))
def testPartialRestoreWarningObject(self):
with context.eager_mode():
optimizer = adam.Adam(0.0)
original_root = trackable_utils.Checkpoint(v1=variables_lib.Variable(2.),
v2=variables_lib.Variable(3.),
optimizer=optimizer)
# Create a slot variable to save
optimizer.minimize(original_root.v1.read_value, [original_root.v1])
prefix = os.path.join(self.get_temp_dir(), "ckpt")
save_path = original_root.save(prefix)
partial_root = trackable_utils.Checkpoint(v1=variables_lib.Variable(0.))
weak_partial_root = weakref.ref(partial_root)
weak_v1 = weakref.ref(partial_root.v1)
partial_root.restore(save_path)
self.assertEqual(2., partial_root.v1.numpy())
with test.mock.patch.object(logging, "warning") as mock_log:
del partial_root
self.assertIsNone(weak_partial_root())
self.assertIsNone(weak_v1())
messages = str(mock_log.call_args_list)
self.assertIn("(root).v2'", messages)
self.assertIn("(root).optimizer's state 'm' for (root).v1", messages)
self.assertNotIn("(root).v1'", messages)
self.assertIn("expect_partial()", messages)
def testPartialRestoreWarningAttribute(self):
with context.eager_mode():
original_root = trackable_utils.Checkpoint(v1=variables_lib.Variable(2.),
v2=variables_lib.Variable(3.))
prefix = os.path.join(self.get_temp_dir(), "ckpt")
save_path = original_root.save(prefix)
partial_root = trackable_utils.Checkpoint(v1=base.Trackable(),
v2=variables_lib.Variable(0.))
weak_partial_root = weakref.ref(partial_root)
with test.mock.patch.object(logging, "warning") as mock_log:
# Note: Unlike in testPartialRestoreWarningObject, the warning actually
# prints immediately here, since all of the objects have been created
# and there's no deferred restoration sitting around.
partial_root.restore(save_path)
self.assertEqual(3., partial_root.v2.numpy())
del partial_root
self.assertIsNone(weak_partial_root())
messages = str(mock_log.call_args_list)
self.assertIn("(root).v1", messages)
self.assertNotIn("(root).v2", messages)
self.assertIn("expect_partial()", messages)
def testAttributeException(self):
with context.eager_mode():
original_root = trackable_utils.Checkpoint(v1=variables_lib.Variable(2.),
v2=variables_lib.Variable(3.))
prefix = os.path.join(self.get_temp_dir(), "ckpt")
save_path = original_root.save(prefix)
partial_root = trackable_utils.Checkpoint(v1=base.Trackable(),
v2=variables_lib.Variable(0.))
status = partial_root.restore(save_path)
with self.assertRaisesRegexp(
AssertionError,
r"Unused attributes(.|\n)*\(root\).v1"):
status.assert_consumed()
def testSilencePartialWarning(self):
with context.eager_mode():
original_root = trackable_utils.Checkpoint(v1=variables_lib.Variable(2.),
v2=variables_lib.Variable(3.))
prefix = os.path.join(self.get_temp_dir(), "ckpt")
save_path = original_root.save(prefix)
partial_root = trackable_utils.Checkpoint(v1=variables_lib.Variable(0.))
weak_partial_root = weakref.ref(partial_root)
weak_v1 = weakref.ref(partial_root.v1)
partial_root.restore(save_path).expect_partial()
self.assertEqual(2., partial_root.v1.numpy())
with test.mock.patch.object(logging, "warning") as mock_log:
del partial_root
self.assertIsNone(weak_partial_root())
self.assertIsNone(weak_v1())
self.assertEmpty(mock_log.call_args_list)
# pylint: disable=cell-var-from-loop
@test_util.run_in_graph_and_eager_modes
@test_util.run_v1_only("b/120545219")
def testWithDefun(self):
num_training_steps = 2
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
for training_continuation in range(3):
with test_util.device(use_gpu=True):
model = MyModel()
# Don't actually train so we can test variable values
optimizer = adam.Adam(0.)
root = trackable_utils.Checkpoint(
optimizer=optimizer, model=model)
checkpoint_path = checkpoint_management.latest_checkpoint(
checkpoint_directory)
status = root.restore(save_path=checkpoint_path)
def train_fn():
@def_function.function
def _call_model(x):
return model(x)
with backprop.GradientTape() as tape:
loss = _call_model(constant_op.constant([[3.]]))
gradients = tape.gradient(loss, model.variables)
return optimizer.apply_gradients(zip(gradients, model.variables))
if not context.executing_eagerly():
train_fn = functools.partial(
self.evaluate, train_fn())
status.initialize_or_restore()
for _ in range(num_training_steps):
train_fn()
if training_continuation > 0:
status.assert_consumed()
self.assertAllClose([[42.]], self.evaluate(model.variables[0]))
else:
self.evaluate(model.variables[0].assign([[42.]]))
root.save(file_prefix=checkpoint_prefix)
self.assertEqual((training_continuation + 1) * num_training_steps,
self.evaluate(optimizer.iterations))
self.assertEqual(training_continuation + 1,
self.evaluate(root.save_counter))
# pylint: enable=cell-var-from-loop
def _get_checkpoint_name(self, name):
root = tracking.AutoTrackable()
trackable_utils.add_variable(
root, name=name, shape=[1, 2], dtype=dtypes.float64)
(named_variable,), _, _ = graph_view.ObjectGraphView(
root).serialize_object_graph()
with ops.name_scope("root/" + named_variable.name):
pass # Make sure we can use this as an op name if we prefix it.
return named_variable.name
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testVariableNameEscaping(self):
suffix = "/.ATTRIBUTES/VARIABLE_VALUE"
self.assertEqual(r"a.Sb.Sc" + suffix, self._get_checkpoint_name(r"a/b/c"))
self.assertEqual(r"b" + suffix, self._get_checkpoint_name(r"b"))
self.assertEqual(r"c.S" + suffix, self._get_checkpoint_name(r"c/"))
self.assertEqual(r"d.S..S" + suffix, self._get_checkpoint_name(r"d/.S"))
self.assertEqual(r"d.S..ATTRIBUTES.Sf" + suffix,
self._get_checkpoint_name(r"d/.ATTRIBUTES/f"))
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testNumberedPath(self):
root = tracking.AutoTrackable()
leaf = tracking.AutoTrackable()
root.leaf = leaf
trackable_utils.add_variable(leaf, name="v", shape=[])
(named_variable,), _, _ = graph_view.ObjectGraphView(
root).serialize_object_graph()
self.assertEqual(r"leaf/v/.ATTRIBUTES/VARIABLE_VALUE", named_variable.name)
@test_util.run_in_graph_and_eager_modes
def testLocalNameValidation(self):
root = tracking.AutoTrackable()
leaf = tracking.AutoTrackable()
# Dots are escaped, which avoids conflicts with reserved names.
root._track_trackable(leaf, name=".ATTRIBUTES")
trackable_utils.add_variable(trackable=leaf, name="a", shape=[])
(named_variable,), _, _ = graph_view.ObjectGraphView(
root).serialize_object_graph()
self.assertEqual("..ATTRIBUTES/a/.ATTRIBUTES/VARIABLE_VALUE",
named_variable.name)
def testAnonymousVarsInInit(self):
class Model(training.Model):
def __init__(self):
super(Model, self).__init__()
self.w = resource_variable_ops.ResourceVariable(0.0)
self.b = resource_variable_ops.ResourceVariable(0.0)
self.vars = [self.w, self.b]
def call(self, x):
return x * self.w + self.b
with context.eager_mode():
model = Model()
optimizer = adam.Adam(learning_rate=0.05)
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
checkpoint = trackable_utils.Checkpoint(
model=model, optimizer=optimizer)
for _ in range(2):
checkpoint.save(checkpoint_prefix)
with backprop.GradientTape() as tape:
loss = (constant_op.constant(1.)
- model(constant_op.constant(1.))) ** 2
grad = tape.gradient(loss, model.vars)
optimizer.apply_gradients(
[(g, v) for g, v in zip(grad, model.vars)])
@test_util.run_in_graph_and_eager_modes
def testLateDependencyTracking(self):
class Dependency(tracking.AutoTrackable):
def build(self):
self.var = trackable_utils.add_variable(
self, "var", initializer=0.)
class LateDependencies(trackable_utils.Checkpoint):
def add_dep(self):
self.dep = Dependency()
self.dep.build()
original = LateDependencies()
original.add_dep()
self.evaluate(state_ops.assign(original.dep.var, 123.))
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_path = original.save(checkpoint_prefix)
load_into = LateDependencies()
status = load_into.restore(save_path)
status.assert_existing_objects_matched()
with self.assertRaises(AssertionError):
status.assert_consumed()
load_into.add_dep()
status.assert_consumed()
status.assert_existing_objects_matched().run_restore_ops()
self.assertEqual(123., self.evaluate(load_into.dep.var))
@test_util.run_in_graph_and_eager_modes
def testDepAfterVar(self):
class Dependency(tracking.AutoTrackable):
def build(self):
self.var = trackable_utils.add_variable(
self, "var", initializer=0.)
class DepAfterVar(trackable_utils.Checkpoint):
def add_dep(self):
dep = Dependency()
dep.build()
self.dep = dep
dep_after_var = DepAfterVar()
dep_after_var.add_dep()
self.evaluate(state_ops.assign(dep_after_var.dep.var, -14.))
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_path = dep_after_var.save(checkpoint_prefix)
loaded_dep_after_var = DepAfterVar()
status = loaded_dep_after_var.restore(save_path)
loaded_dep_after_var.add_dep()
status.assert_consumed()
status.run_restore_ops()
self.assertEqual(-14., self.evaluate(loaded_dep_after_var.dep.var))
@test_util.run_in_graph_and_eager_modes
def testDeferredSlotRestoration(self):
checkpoint_directory = self.get_temp_dir()
root = trackable_utils.Checkpoint()
root.var = trackable_utils.add_variable(
root, name="var", initializer=0.)
optimizer = adam.Adam(0.1)
variables = [root.var]
gradients = [1.]
train_op = optimizer.apply_gradients(zip(gradients, variables))
# Note that `optimizer` has not been added as a dependency of
# `root`. Create a one-off grouping so that slot variables for `root.var`
# get initialized too.
self.evaluate(trackable_utils.gather_initializers(
trackable_utils.Checkpoint(root=root, optimizer=optimizer)))
self.evaluate(train_op)
self.evaluate(state_ops.assign(root.var, 12.))
no_slots_path = root.save(os.path.join(checkpoint_directory, "no_slots"))
root.optimizer = optimizer
self.evaluate(state_ops.assign(root.var, 13.))
self.evaluate(state_ops.assign(
optimizer.get_slot(slot_name="m", var=root.var),
14.))
slots_path = root.save(os.path.join(checkpoint_directory, "with_slots"))
new_root = trackable_utils.Checkpoint()
# Load the slot-containing checkpoint (deferred), then immediately overwrite
# the non-slot variable (also deferred).
slot_status = new_root.restore(slots_path)
no_slot_status = new_root.restore(no_slots_path)
with self.assertRaises(AssertionError):
no_slot_status.assert_consumed()
new_root.var = trackable_utils.add_variable(
new_root, name="var", shape=[])
no_slot_status.assert_consumed()
no_slot_status.run_restore_ops()
self.assertEqual(12., self.evaluate(new_root.var))
new_root.optimizer = adam.Adam(0.1)
slot_status.assert_existing_objects_matched()
if not context.executing_eagerly():
with self.assertRaisesRegexp(AssertionError, "Unresolved object"):
slot_status.assert_consumed()
self.assertEqual(12., self.evaluate(new_root.var))
if context.executing_eagerly():
# Slot variables are only created with restoring initializers when
# executing eagerly.
self.assertEqual(14., self.evaluate(
new_root.optimizer.get_slot(slot_name="m", var=new_root.var)))
else:
# Slot variables are not created eagerly when graph building.
with self.assertRaises(KeyError):
new_root.optimizer.get_slot(slot_name="m", var=new_root.var)
variables = [new_root.var]
gradients = [1.]
train_op = new_root.optimizer.apply_gradients(zip(gradients, variables))
# The slot variable now exists; restore() didn't create it, but we should
# now have a restore op for it.
slot_status.run_restore_ops()
if not context.executing_eagerly():
# The train op hasn't run when graph building, so the slot variable has
# its restored value. It has run in eager, so the value will be different.
self.assertEqual(14., self.evaluate(
new_root.optimizer.get_slot(slot_name="m", var=new_root.var)))
self.evaluate(train_op)
slot_status.assert_consumed()
@test_util.run_in_graph_and_eager_modes
def testOverlappingRestores(self):
checkpoint_directory = self.get_temp_dir()
save_root = trackable_utils.Checkpoint()
save_root.dep = tracking.AutoTrackable()
save_root.dep.var = trackable_utils.add_variable(
save_root.dep, name="var", initializer=0.)
self.evaluate(state_ops.assign(save_root.dep.var, 12.))
first_path = save_root.save(os.path.join(checkpoint_directory, "first"))
self.evaluate(state_ops.assign(save_root.dep.var, 13.))
second_path = save_root.save(os.path.join(checkpoint_directory, "second"))
first_root = trackable_utils.Checkpoint()
second_root = trackable_utils.Checkpoint()
first_status = first_root.restore(first_path)
second_status = second_root.restore(second_path)
load_dep = tracking.AutoTrackable()
load_dep.var = trackable_utils.add_variable(
load_dep, name="var", shape=[])
first_root.dep = load_dep
first_status.assert_consumed()
first_status.run_restore_ops()
self.assertEqual(12., self.evaluate(load_dep.var))
second_root.dep = load_dep
second_status.assert_consumed()
second_status.run_restore_ops()
self.assertEqual(13., self.evaluate(load_dep.var))
# Try again with the order of the restore() reversed. The last restore
# determines the final value.
first_root = trackable_utils.Checkpoint()
second_root = trackable_utils.Checkpoint()
second_status = second_root.restore(second_path)
first_status = first_root.restore(first_path)
load_dep = tracking.AutoTrackable()
load_dep.var = trackable_utils.add_variable(
load_dep, name="var", shape=[])
first_root.dep = load_dep
first_status.assert_consumed()
first_status.run_restore_ops()
self.assertEqual(12., self.evaluate(load_dep.var))
second_root.dep = load_dep
second_status.assert_consumed()
second_status.run_restore_ops()
self.assertEqual(12., self.evaluate(load_dep.var))
@test_util.run_in_graph_and_eager_modes
def testAmbiguousLoad(self):
# Not OK to split one checkpoint object into two
checkpoint_directory = self.get_temp_dir()
save_root = trackable_utils.Checkpoint()
save_root.dep_one = tracking.AutoTrackable()
save_root.dep_two = tracking.AutoTrackable()
dep_three = tracking.AutoTrackable()
save_root.dep_one.dep_three = dep_three
save_root.dep_two.dep_three = dep_three
trackable_utils.add_variable(dep_three, name="var", initializer=0.)
self.evaluate(trackable_utils.gather_initializers(save_root))
save_path = save_root.save(os.path.join(checkpoint_directory, "ckpt"))
load_root = trackable_utils.Checkpoint()
status = load_root.restore(save_path)
load_root.dep_one = tracking.AutoTrackable()
load_root.dep_two = tracking.AutoTrackable()
load_root.dep_one.dep_three = tracking.AutoTrackable()
load_root.dep_two.dep_three = tracking.AutoTrackable()
trackable_utils.add_variable(
load_root.dep_one.dep_three, name="var", initializer=0.)
with self.assertRaises(AssertionError):
status.assert_consumed()
with self.assertRaises(AssertionError):
status.assert_existing_objects_matched()
@test_util.run_in_graph_and_eager_modes
def testObjectsCombined(self):
# Currently fine to load two checkpoint objects into one Python object
checkpoint_directory = self.get_temp_dir()
save_root = trackable_utils.Checkpoint()
save_root.dep_one = tracking.AutoTrackable()
save_root.dep_two = tracking.AutoTrackable()
trackable_utils.add_variable(
save_root.dep_one, name="var1", initializer=32., dtype=dtypes.float64)
trackable_utils.add_variable(
save_root.dep_two, name="var2", initializer=64., dtype=dtypes.float64)
self.evaluate(trackable_utils.gather_initializers(save_root))
save_path = save_root.save(os.path.join(checkpoint_directory, "ckpt"))
load_root = trackable_utils.Checkpoint()
load_root.dep_one = tracking.AutoTrackable()
load_root.dep_two = load_root.dep_one
v1 = trackable_utils.add_variable(
load_root.dep_one, name="var1", shape=[], dtype=dtypes.float64)
v2 = trackable_utils.add_variable(
load_root.dep_one, name="var2", shape=[], dtype=dtypes.float64)
status = load_root.restore(
save_path).assert_consumed().assert_existing_objects_matched()
status.run_restore_ops()
self.assertEqual(32., self.evaluate(v1))
self.assertEqual(64., self.evaluate(v2))
@test_util.run_in_graph_and_eager_modes
def testDependencyLoop(self):
# Note: this test creates garbage during eager execution because it
# purposefully creates a reference cycle.
first = trackable_utils.Checkpoint()
second = trackable_utils.Checkpoint()
first.second = second
second.first = first
first.v = trackable_utils.add_variable(
first, "v1", initializer=[3., 1., 4.])
second.v = trackable_utils.add_variable(
second, "v2", initializer=[1., 1., 2., 3.])
self.evaluate(trackable_utils.gather_initializers(first))
checkpoint_directory = self.get_temp_dir()
save_path = first.save(os.path.join(checkpoint_directory, "ckpt"))
# Test deferred loading
first_load = trackable_utils.Checkpoint()
status = first_load.restore(save_path)
second_load = tracking.AutoTrackable()
first_load.second = second_load
second_load.first = first_load
with self.assertRaises(AssertionError):
status.assert_consumed()
first_load.v = trackable_utils.add_variable(
first_load, "v1", shape=[3])
second_load.v = trackable_utils.add_variable(
second_load, "v2", shape=[4])
status.assert_consumed()
status.run_restore_ops()
self.assertAllEqual([3., 1., 4.], self.evaluate(first_load.v))
self.assertAllEqual([1., 1., 2., 3.], self.evaluate(second_load.v))
# Test loading when variables have already been created
self.evaluate(first_load.v.assign([2., 7., 1.]))
self.assertAllEqual([2., 7., 1.], self.evaluate(first_load.v))
self.evaluate(second_load.v.assign([2., 7., 1., 8.]))
self.assertAllEqual([2., 7., 1., 8.], self.evaluate(second_load.v))
status = first_load.restore(save_path).assert_consumed()
status.run_restore_ops()
self.assertAllEqual([3., 1., 4.], self.evaluate(first_load.v))
self.assertAllEqual([1., 1., 2., 3.], self.evaluate(second_load.v))
@test_util.run_in_graph_and_eager_modes
def testRestoreOnAssign(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
first = trackable_utils.Checkpoint()
first.var1 = variables_lib.Variable(0., name="outside_var")
first.var2 = variables_lib.Variable(0., name="blah")
self.evaluate(first.var1.assign(4.))
self.evaluate(first.var2.assign(8.))
save_path = first.save(checkpoint_prefix)
second = trackable_utils.Checkpoint()
second.var2 = variables_lib.Variable(0., name="blah")
status = second.restore(save_path)
recreated_var1 = variables_lib.Variable(0., name="outside_var")
status.run_restore_ops()
self.assertEqual(8., self.evaluate(second.var2))
self.evaluate(recreated_var1.assign(-2.))
self.assertEqual(-2., self.evaluate(recreated_var1))
second.var1 = recreated_var1
status.run_restore_ops()
self.assertEqual(4., self.evaluate(recreated_var1))
def testManySavesGraph(self):
"""Saves after the first should not modify the graph."""
with context.graph_mode():
graph = ops.Graph()
with graph.as_default(), self.session(graph):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
obj = trackable_utils.Checkpoint()
obj.var = variables_lib.Variable(0., name="v")
obj.opt = adam.Adam(0.1)
variables = [obj.var]
gradients = [1.]
obj.opt.apply_gradients(zip(gradients, variables))
self.evaluate(trackable_utils.gather_initializers(obj))
obj.save(checkpoint_prefix)
graph.finalize()
obj.save(checkpoint_prefix)
@test_util.run_in_graph_and_eager_modes
def testCheckpointState(self):
# No checkpoints are deleted by default
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
obj = tracking.AutoTrackable()
obj.var = variable_scope.get_variable(name="v", initializer=0.)
self.evaluate(trackable_utils.gather_initializers(obj))
saver = trackable_utils.Checkpoint(obj=obj)
for _ in range(10):
saver.save(checkpoint_prefix)
expected_filenames = ["checkpoint"]
for checkpoint_number in range(1, 11):
expected_filenames.append("ckpt-%d.index" % (checkpoint_number,))
self.assertEmpty(
set(expected_filenames)
- set(os.listdir(checkpoint_directory)))
@test_util.run_in_graph_and_eager_modes
def testCheckpointStateChangingVarList(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
obj = tracking.AutoTrackable()
obj.var = variable_scope.get_variable(name="v", initializer=0.)
self.evaluate(trackable_utils.gather_initializers(obj))
checkpoint = trackable_utils.Checkpoint(obj=obj)
looped_variables = []
for iteration in range(10):
new_variable = resource_variable_ops.ResourceVariable(iteration)
self.evaluate(new_variable.initializer)
setattr(checkpoint, "var_%d" % iteration, new_variable)
checkpoint.save(checkpoint_prefix)
looped_variables.append(new_variable)
expected_filenames = ["checkpoint"]
# We've copied the saver each time, but checkpoint management should still
# be consistent. Nothing gets deleted.
for checkpoint_number in range(1, 11):
expected_filenames.append("ckpt-%d.index" % (checkpoint_number,))
self.assertEmpty(
set(expected_filenames)
- set(os.listdir(checkpoint_directory)))
self.assertEqual(
checkpoint_prefix + "-10",
checkpoint_management.latest_checkpoint(checkpoint_directory))
# The checkpoint list only contains the most recent checkpoint, but they're
# all on disk. This means we won't eventually run into proto size limits.
self.assertEqual(
[checkpoint_prefix + "-10"],
(checkpoint_management.get_checkpoint_state(checkpoint_directory)
.all_model_checkpoint_paths))
for v in looped_variables:
self.evaluate(v.assign(314))
checkpoint.restore(checkpoint_prefix + "-6").run_restore_ops()
self.assertEqual(314, self.evaluate(checkpoint.var_9))
self.assertEqual(314, self.evaluate(checkpoint.var_8))
self.assertEqual(314, self.evaluate(checkpoint.var_6))
self.assertEqual(5, self.evaluate(checkpoint.var_5))
self.assertEqual(1, self.evaluate(checkpoint.var_1))
self.assertEqual(0, self.evaluate(checkpoint.var_0))
checkpoint.restore(checkpoint_prefix + "-10").run_restore_ops()
self.assertEqual(9, self.evaluate(checkpoint.var_9))
self.assertEqual(8, self.evaluate(checkpoint.var_8))
self.assertEqual(1, self.evaluate(checkpoint.var_1))
self.assertEqual(0, self.evaluate(checkpoint.var_0))
def testManyRestoresGraph(self):
"""Restores after the first should not modify the graph."""
with context.graph_mode():
graph = ops.Graph()
with graph.as_default(), self.session(graph):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
obj = trackable_utils.Checkpoint()
obj.var = variables_lib.Variable(0., name="v")
obj.opt = adam.Adam(0.1)
variables = [obj.var]
gradients = [1.]
obj.opt.apply_gradients(zip(gradients, variables))
self.evaluate(trackable_utils.gather_initializers(obj))
save_path = obj.save(checkpoint_prefix)
obj.restore(save_path)
graph.finalize()
obj.restore(save_path)
@test_util.run_in_graph_and_eager_modes
def test_sequential(self):
model = sequential.Sequential()
checkpoint = trackable_utils.Checkpoint(model=model)
model.add(core.Dense(4))
second_dense = core.Dense(5)
model.add(second_dense)
model(constant_op.constant([[1.]]))
checkpoint.restore(None).initialize_or_restore()
self.evaluate(second_dense.bias.assign(
constant_op.constant([1., 2., 3., 4., 5.])))
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_path = checkpoint.save(checkpoint_prefix)
self.evaluate(second_dense.bias.assign(
constant_op.constant([5., 6., 7., 8., 9.])))
checkpoint.restore(save_path).assert_consumed().run_restore_ops()
self.assertAllEqual([1., 2., 3., 4., 5.], self.evaluate(second_dense.bias))
deferred_sequential = sequential.Sequential()
deferred_sequential_checkpoint = trackable_utils.Checkpoint(
model=deferred_sequential)
status = deferred_sequential_checkpoint.restore(save_path)
deferred_sequential.add(core.Dense(4))
deferred_sequential(constant_op.constant([[1.]]))
deferred_second_dense = core.Dense(5)
deferred_sequential.add(deferred_second_dense)
deferred_sequential(constant_op.constant([[1.]]))
status.run_restore_ops()
self.assertAllEqual([1., 2., 3., 4., 5.],
self.evaluate(deferred_second_dense.bias))
@test_util.run_in_graph_and_eager_modes
def test_initialize_if_not_restoring(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
optimizer_only_prefix = os.path.join(checkpoint_directory, "opt")
with test_util.device(use_gpu=True):
model = MyModel()
optimizer = adam.Adam(0.001)
root = trackable_utils.Checkpoint(
model=model) # Do not save the optimizer with the checkpoint.
optimizer_checkpoint = trackable_utils.Checkpoint(
optimizer=optimizer)
checkpoint_path = checkpoint_management.latest_checkpoint(
checkpoint_directory)
status = root.restore(save_path=checkpoint_path)
input_value = constant_op.constant([[3.]])
def train_fn():
with backprop.GradientTape() as tape:
loss = model(input_value)
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
return optimizer.apply_gradients(zip(gradients, variables))
if not context.executing_eagerly():
train_fn = functools.partial(self.evaluate, train_fn())
status.initialize_or_restore()
# TODO(tanzheny): Add hyper variables to .variables(), and set them with
# set_weights etc.
variables_not_in_the_variables_property = [
obj for obj in optimizer._hyper.values()
if isinstance(obj, variables_lib.Variable)]
self.evaluate([v.initializer for v
in optimizer.variables()
+ variables_not_in_the_variables_property])
train_fn()
model_save_path = root.save(file_prefix=checkpoint_prefix)
self.evaluate(optimizer.beta_1.assign(42.))
optimizer_save_path = optimizer_checkpoint.save(optimizer_only_prefix)
del train_fn
# Restore into a graph with the optimizer
with test_util.device(use_gpu=True):
model = MyModel()
optimizer = adam.Adam(0.001)
root = trackable_utils.Checkpoint(
optimizer=optimizer, model=model)
status = root.restore(save_path=model_save_path)
input_value = constant_op.constant([[3.]])
def train_fn1():
with backprop.GradientTape() as tape:
loss = model(input_value)
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
return optimizer.apply_gradients(zip(gradients, variables))
if not context.executing_eagerly():
train_fn1 = functools.partial(self.evaluate, train_fn1())
status.initialize_or_restore()
train_fn1()
with self.assertRaises(AssertionError):
status.assert_existing_objects_matched()
with self.assertRaises(AssertionError):
status.assert_consumed()
del train_fn1
# Make sure initialization doesn't clobber later restores
with test_util.device(use_gpu=True):
model = MyModel()
optimizer = adam.Adam(0.001, beta_1=1.0)
root = trackable_utils.Checkpoint(
optimizer=optimizer, model=model)
opt_root = trackable_utils.Checkpoint(
optimizer=optimizer)
status = root.restore(save_path=model_save_path)
init_only_optimizer_status = opt_root.restore(save_path=None)
optimizer_status = opt_root.restore(save_path=optimizer_save_path)
input_value = constant_op.constant([[3.]])
def train_fn2():
with backprop.GradientTape() as tape:
loss = model(input_value)
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
return optimizer.apply_gradients(zip(gradients, variables))
if not context.executing_eagerly():
train_fn2 = functools.partial(self.evaluate, train_fn2())
optimizer_status.run_restore_ops()
status.initialize_or_restore()
init_only_optimizer_status.initialize_or_restore()
train_fn2()
self.assertEqual(42., self.evaluate(optimizer.beta_1))
@test_util.run_in_graph_and_eager_modes
def test_restore_after_adding_empty_trackable_data_structure(self):
model = NonLayerTrackable()
checkpoint = trackable_utils.Checkpoint(model=model)
checkpoint.restore(None).initialize_or_restore()
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_path = checkpoint.save(checkpoint_prefix)
del model, checkpoint
model = NonLayerTrackable()
model.dict = {"a": 1}
model.list = {"b": 1}
checkpoint = trackable_utils.Checkpoint(model=model)
load_status = checkpoint.restore(save_path)
load_status.assert_existing_objects_matched().run_restore_ops()
@test_util.run_in_graph_and_eager_modes
def test_write_checkpoint_from_function(self):
checkpoint_prefix = os.path.join(self.get_temp_dir(), "ckpt")
save_checkpoint = trackable_utils.Checkpoint(
v=variables_lib.Variable(1.))
@def_function.function
def _write_checkpoint():
save_path = save_checkpoint.write(checkpoint_prefix)
return save_path
self.evaluate([save_checkpoint.v.initializer])
self.evaluate(_write_checkpoint())
load_checkpoint = trackable_utils.Checkpoint(
v=variables_lib.Variable(0.))
load_checkpoint.restore(checkpoint_prefix).run_restore_ops()
self.assertEqual(1., self.evaluate(load_checkpoint.v))
self.evaluate(save_checkpoint.v.assign(3.))
self.evaluate(_write_checkpoint())
self.evaluate(save_checkpoint.v.assign(0.))
load_checkpoint.restore(checkpoint_prefix).run_restore_ops()
self.assertEqual(3., self.evaluate(load_checkpoint.v))
class _ManualScope(tracking.AutoTrackable):
def __call__(self):
with variable_scope.variable_scope("ManualScope") as vs:
self.variable_scope = vs
with trackable_utils.capture_dependencies(template=self):
return self._build()
def _build(self):
return variable_scope.get_variable(name="in_manual_scope", shape=[])
class TemplateTests(parameterized.TestCase, test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_trackable_save_restore(self):
def _templated():
v = variable_scope.get_variable(
"v", shape=[1], initializer=init_ops.zeros_initializer(),
use_resource=True)
v2 = variable_scope.get_variable(
"v2", shape=[1], initializer=init_ops.zeros_initializer(),
use_resource=True)
manual = _ManualScope()
return v, v + 1., v2, manual, manual()
save_template = template.make_template("s1", _templated)
v1_save, _, v2_save, manual_scope, manual_scope_v = save_template()
six.assertCountEqual(
self,
[v1_save, v2_save, manual_scope, manual_scope_v, save_template],
trackable_utils.list_objects(save_template))
manual_dep, = manual_scope._checkpoint_dependencies
self.assertEqual("in_manual_scope", manual_dep.name)
self.assertIs(manual_scope_v, manual_dep.ref)
optimizer = adam.Adam(0.0)
save_root = trackable_utils.Checkpoint(
my_template=save_template, optimizer=optimizer)
optimizer.minimize(v1_save.read_value,
var_list=[v1_save])
self.evaluate([v.initializer for v in save_template.variables])
optimizer_variables = optimizer.variables() + list(
optimizer._hyper.values())
self.evaluate([v.initializer for v in optimizer_variables])
self.evaluate(v1_save.assign([12.]))
self.evaluate(v2_save.assign([14.]))
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_path = save_root.save(checkpoint_prefix)
load_template = template.make_template("s2", _templated)
load_optimizer = adam.Adam(0.0)
load_root = trackable_utils.Checkpoint(
my_template=load_template, optimizer=load_optimizer)
status = load_root.restore(save_path)
var, var_plus_one, var2, _, _ = load_template()
load_optimizer.minimize(var.read_value, var_list=[var])
self.assertLen(load_template._checkpoint_dependencies, 3)
self.assertEqual("v", load_template._checkpoint_dependencies[0].name)
self.assertEqual("v2", load_template._checkpoint_dependencies[1].name)
self.assertEqual("ManualScope",
load_template._checkpoint_dependencies[2].name)
status.assert_consumed().run_restore_ops()
self.assertAllEqual([12.], self.evaluate(var))
self.assertAllEqual([13.], self.evaluate(var_plus_one))
self.assertAllEqual([14.], self.evaluate(var2))
@test_util.run_in_graph_and_eager_modes
def test_trackable_save_restore_nested(self):
def _inner_template():
v = variable_scope.get_variable(
"v", shape=[1], initializer=init_ops.zeros_initializer())
return v
def _outer_template():
first_inner = template.make_template("i1", _inner_template)
second_inner = template.make_template("i2", _inner_template)
v1 = first_inner()
v2 = second_inner()
v3 = second_inner()
return (first_inner, second_inner), (v1, v2, v3)
with variable_scope.variable_scope("ignored"):
save_template = template.make_template("s1", _outer_template)
save_root = trackable_utils.Checkpoint(my_template=save_template)
(inner_template_one, inner_template_two), _ = save_template()
self.evaluate(inner_template_one.variables[0].assign([20.]))
self.evaluate(inner_template_two.variables[0].assign([25.]))
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_path = save_root.save(checkpoint_prefix)
load_template = template.make_template("s2", _outer_template)
load_root = trackable_utils.Checkpoint(my_template=load_template)
status = load_root.restore(save_path)
(inner_template_one, inner_template_two), (v1, v2, v3) = load_template()
outer_template_dependencies = load_root.my_template._checkpoint_dependencies
self.assertLen(outer_template_dependencies, 2)
self.assertEqual("i1", outer_template_dependencies[0].name)
self.assertIs(inner_template_one, outer_template_dependencies[0].ref)
self.assertEqual("i2", outer_template_dependencies[1].name)
self.assertIs(inner_template_two, outer_template_dependencies[1].ref)
self.assertLen(inner_template_one._checkpoint_dependencies, 1)
self.assertEqual("v", inner_template_one._checkpoint_dependencies[0].name)
self.assertLen(inner_template_two._checkpoint_dependencies, 1)
self.assertEqual("v", inner_template_two._checkpoint_dependencies[0].name)
status.assert_consumed().run_restore_ops()
self.assertAllEqual([20.], self.evaluate(v1))
self.assertAllEqual([25.], self.evaluate(v2))
self.assertAllEqual([25.], self.evaluate(v3))
class CheckpointCompatibilityTests(test.TestCase):
def _initialized_model(self):
input_value = constant_op.constant([[3.]])
model = MyModel()
optimizer = adam.Adam(0.001)
root_trackable = trackable_utils.Checkpoint(
optimizer=optimizer, model=model)
with backprop.GradientTape() as tape:
loss = model(input_value)
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
train_op = optimizer.apply_gradients(zip(gradients, variables))
self.evaluate(trackable_utils.gather_initializers(
root_trackable))
self.evaluate(train_op)
# A regular variable, a slot variable, and a non-slot Optimizer variable
# with known values to check when loading.
self.evaluate(model._named_dense.bias.assign([1.]))
self.evaluate(optimizer.get_slot(
var=model._named_dense.bias, slot_name="m").assign([2.]))
self.evaluate(optimizer.beta_1.assign(3.))
return root_trackable
def _set_sentinels(self, root_trackable):
self.evaluate(root_trackable.model._named_dense.bias.assign([101.]))
self.evaluate(
root_trackable.optimizer.get_slot(
var=root_trackable.model._named_dense.bias, slot_name="m")
.assign([102.]))
self.evaluate(root_trackable.optimizer.beta_1.assign(103.))
def _check_sentinels(self, root_trackable):
self.assertAllEqual(
[1.], self.evaluate(root_trackable.model._named_dense.bias))
self.assertAllEqual([2.], self.evaluate(
root_trackable.optimizer.get_slot(
var=root_trackable.model._named_dense.bias, slot_name="m")))
self.assertAllEqual(3.,
self.evaluate(root_trackable.optimizer.beta_1))
def _write_name_based_checkpoint(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
with context.graph_mode():
save_graph = ops.Graph()
with save_graph.as_default(), self.session(
graph=save_graph) as session:
root = self._initialized_model()
name_saver = saver_lib.Saver()
return name_saver.save(
sess=session, save_path=checkpoint_prefix,
global_step=root.optimizer.iterations)
@test_util.run_in_graph_and_eager_modes
def testLoadFromNameBasedSaver(self):
"""Save a name-based checkpoint, load it using the object-based API."""
with test_util.device(use_gpu=True):
save_path = self._write_name_based_checkpoint()
root = self._initialized_model()
self._set_sentinels(root)
with self.assertRaises(AssertionError):
self._check_sentinels(root)
object_saver = trackable_utils.TrackableSaver(
graph_view.ObjectGraphView(root))
self._set_sentinels(root)
status = object_saver.restore(save_path)
if context.executing_eagerly():
self._check_sentinels(root)
if context.executing_eagerly():
status.assert_consumed()
status.assert_existing_objects_matched()
status.assert_nontrivial_match()
else:
# When graph building, we haven't read any keys, so we don't know
# whether the restore will be complete.
with self.assertRaisesRegexp(AssertionError, "not restored"):
status.assert_consumed()
with self.assertRaisesRegexp(AssertionError, "not restored"):
status.assert_existing_objects_matched()
with self.assertRaisesRegexp(AssertionError, "not restored"):
status.assert_nontrivial_match()
status.run_restore_ops()
self._check_sentinels(root)
self._set_sentinels(root)
status = object_saver.restore(save_path)
status.initialize_or_restore()
status.assert_nontrivial_match()
self._check_sentinels(root)
# Check that there is no error when keys are missing from the name-based
# checkpoint.
root.not_in_name_checkpoint = resource_variable_ops.ResourceVariable([1.])
status = object_saver.restore(save_path)
with self.assertRaises(AssertionError):
status.assert_existing_objects_matched()
def testSaveGraphLoadEager(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
with context.graph_mode():
save_graph = ops.Graph()
with save_graph.as_default(), self.session(
graph=save_graph):
root = self._initialized_model()
save_path = root.save(file_prefix=checkpoint_prefix)
with context.eager_mode():
root = self._initialized_model()
self._set_sentinels(root)
root.restore(save_path).assert_consumed()
self._check_sentinels(root)
def testSaveEagerLoadGraph(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
with context.eager_mode():
root = self._initialized_model()
save_path = root.save(file_prefix=checkpoint_prefix)
with context.graph_mode():
save_graph = ops.Graph()
with save_graph.as_default(), self.session(
graph=save_graph):
root = self._initialized_model()
self._set_sentinels(root)
root.restore(save_path).assert_consumed().run_restore_ops()
self._check_sentinels(root)
if __name__ == "__main__":
ops.enable_eager_execution()
test.main()
|
|
import uuid, json
from esprit import raw, util, tasks
from copy import deepcopy
import time
class StoreException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class DAO(object):
@classmethod
def makeid(cls):
return uuid.uuid4().hex
def actions(self, conn, action_queue):
for action in action_queue:
if action.keys()[0] == "remove":
self._action_remove(conn, action)
elif action.keys()[0] == "store":
self._action_store(conn, action)
def _action_remove(self, conn, remove_action):
obj = remove_action.get("remove")
if "index" not in obj:
raise StoreException("no index provided for remove action")
if "id" not in obj and "query" not in obj:
raise StoreException("no id or query provided for remove action")
if "id" in obj:
raw.delete(conn, obj.get("index"), obj.get("id"))
elif "query" in obj:
raw.delete_by_query(conn, obj.get("index"), obj.get("query"))
def _action_store(self, conn, store_action):
obj = store_action.get("store")
if "index" not in obj:
raise StoreException("no index provided for store action")
if "record" not in obj:
raise StoreException("no record provided for store action")
raw.store(conn, obj.get("index"), obj.get("record"), obj.get("id"))
class DomainObject(DAO):
__type__ = None
__conn__ = None
def __init__(self, raw=None):
self.data = raw if raw is not None else {}
@classmethod
def dynamic_read_types(cls):
return None
@classmethod
def dynamic_write_type(cls):
return None
@classmethod
def get_read_types(cls, types=None):
if types is not None:
if not isinstance(types, list):
return [types]
return types
drt = cls.dynamic_read_types()
if drt is not None:
if not isinstance(drt, list):
return [drt]
return drt
return [cls.__type__]
@classmethod
def get_write_type(cls, type=None):
if type is not None:
return type
dwt = cls.dynamic_write_type()
if dwt is not None:
return dwt
return cls.__type__
@property
def id(self):
return self.data.get('id', None)
@id.setter
def id(self, val):
self.data["id"] = val
@property
def created_date(self):
return self.data.get("created_date")
@created_date.setter
def created_date(self, val):
self.data["created_date"] = val
@property
def last_updated(self):
return self.data.get("last_updated")
@last_updated.setter
def last_updated(self, val):
self.data["last_updated"] = val
@property
def json(self):
return json.dumps(self.data)
@property
def raw(self):
return self.data
@classmethod
def refresh(cls, conn=None):
if conn is None:
conn = cls.__conn__
raw.refresh(conn)
@classmethod
def pull(cls, id_, conn=None, wrap=True, types=None):
'''Retrieve object by id.'''
if conn is None:
conn = cls.__conn__
types = cls.get_read_types(types)
if id_ is None:
return None
try:
for t in types:
resp = raw.get(conn, t, id_)
if resp.status_code == 404:
continue
else:
j = raw.unpack_get(resp)
if wrap:
return cls(j)
else:
return j
return None
except Exception as e:
print e.message
return None
@classmethod
def query(cls, q='', terms=None, should_terms=None, facets=None, conn=None, types=None, **kwargs):
'''Perform a query on backend.
:param q: maps to query_string parameter if string, or query dict if dict.
:param terms: dictionary of terms to filter on. values should be lists.
:param facets: dict of facets to return from the query.
:param kwargs: any keyword args as per
http://www.elasticsearch.org/guide/reference/api/search/uri-request.html
'''
if conn is None:
conn = cls.__conn__
types = cls.get_read_types(types)
if isinstance(q,dict):
query = q
if 'bool' not in query['query']:
boolean = {'bool':{'must': [] }}
boolean['bool']['must'].append( query['query'] )
query['query'] = boolean
if 'must' not in query['query']['bool']:
query['query']['bool']['must'] = []
elif q:
query = {
'query': {
'bool': {
'must': [
{'query_string': { 'query': q }}
]
}
}
}
else:
query = {
'query': {
'bool': {
'must': [
{'match_all': {}}
]
}
}
}
if facets:
if 'facets' not in query:
query['facets'] = {}
for k, v in facets.items():
query['facets'][k] = {"terms":v}
if terms:
boolean = {'must': [] }
for term in terms:
if not isinstance(terms[term],list): terms[term] = [terms[term]]
for val in terms[term]:
obj = {'term': {}}
obj['term'][ term ] = val
boolean['must'].append(obj)
if q and not isinstance(q,dict):
boolean['must'].append( {'query_string': { 'query': q } } )
elif q and 'query' in q:
boolean['must'].append( query['query'] )
query['query'] = {'bool': boolean}
for k,v in kwargs.items():
if k == '_from':
query['from'] = v
else:
query[k] = v
if should_terms is not None and len(should_terms) > 0:
for s in should_terms:
if not isinstance(should_terms[s],list): should_terms[s] = [should_terms[s]]
query["query"]["bool"]["must"].append({"terms" : {s : should_terms[s]}})
r = raw.search(conn, types, query)
return r.json()
@classmethod
def object_query(cls, q='', terms=None, should_terms=None, facets=None, conn=None, types=None, **kwargs):
j = cls.query(q=q, terms=terms, should_terms=should_terms, facets=facets, conn=conn, types=types, **kwargs)
res = raw.unpack_json_result(j)
return [cls(r) for r in res]
def save(self, conn=None, makeid=True, created=True, updated=True, blocking=False, type=None):
if conn is None:
conn = self.__conn__
type = self.get_write_type(type)
if blocking and not updated:
raise StoreException("Unable to do blocking save on record where last_updated is not set")
now = util.now()
if blocking:
# we need the new last_updated time to be later than the new one
if now == self.last_updated:
time.sleep(1) # timestamp granularity is seconds, so just sleep for 1
now = util.now() # update the new timestamp
# the main body of the save
if makeid:
if "id" not in self.data:
self.id = self.makeid()
if created:
if 'created_date' not in self.data:
self.data['created_date'] = now
if updated:
self.data['last_updated'] = now
raw.store(conn, type, self.data, self.id)
if blocking:
q = {
"query" : {
"term" : {"id.exact" : self.id}
},
"fields" : ["last_updated"]
}
while True:
res = raw.search(conn, type, q)
j = raw.unpack_result(res)
if len(j) == 0:
time.sleep(0.5)
continue
if len(j) > 1:
raise StoreException("More than one record with id {x}".format(x=self.id))
if j[0].get("last_updated")[0] == now: # NOTE: only works on ES > 1.x
break
else:
time.sleep(0.5)
continue
def delete(self, conn=None, type=None):
if conn is None:
conn = self.__conn__
# the record may be in any one of the read types, so we need to check them all
types = self.get_read_types(type)
# in the simple case of one type, just get on and issue the delete
if len(types) == 1:
raw.delete(conn, types[0], self.id)
# otherwise, check all the types until we find the object, then issue the delete there
for t in types:
o = raw.get(conn, t, self.id)
if o is not None:
raw.delete(conn, t, self.id)
@classmethod
def delete_by_query(cls, query, conn=None, es_version="0.90.13", type=None):
if conn is None:
conn = cls.__conn__
type = cls.get_write_type(type)
raw.delete_by_query(conn, type, query, es_version=es_version)
@classmethod
def iterate(cls, q, page_size=1000, limit=None, wrap=True, **kwargs):
q = q.copy()
q["size"] = page_size
q["from"] = 0
if "sort" not in q: # to ensure complete coverage on a changing index, sort by id is our best bet
q["sort"] = [{"id" : {"order" : "asc"}}]
counter = 0
while True:
# apply the limit
if limit is not None and counter >= limit:
break
res = cls.query(q=q, **kwargs)
rs = [r.get("_source") if "_source" in r else r.get("fields") for r in res.get("hits", {}).get("hits", [])]
# print counter, len(rs), res.get("hits", {}).get("total"), len(res.get("hits", {}).get("hits", [])), json.dumps(q)
if len(rs) == 0:
break
for r in rs:
# apply the limit (again)
if limit is not None and counter >= limit:
break
counter += 1
if wrap:
yield cls(r)
else:
yield r
q["from"] += page_size
@classmethod
def iterall(cls, page_size=1000, limit=None, **kwargs):
return cls.iterate(deepcopy(all_query), page_size, limit, **kwargs)
@classmethod
def count(cls, q, **kwargs):
q = deepcopy(q)
q["size"] = 0
res = cls.query(q=q, **kwargs)
return res.get("hits", {}).get("total")
@classmethod
def scroll(cls, q=None, page_size=1000, limit=None, keepalive="1m", conn=None, raise_on_scroll_error=True, types=None):
if conn is None:
conn = cls.__conn__
types = cls.get_read_types(types)
if q is None:
q = {"query" : {"match_all" : {}}}
gen = tasks.scroll(conn, types, q, page_size=page_size, limit=limit, keepalive=keepalive)
try:
for o in gen:
yield cls(o)
except tasks.ScrollException as e:
if raise_on_scroll_error:
raise e
else:
return
########################################################################
## Some useful ES queries
########################################################################
all_query = {
"query" : {
"match_all" : { }
}
}
|
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Analytics for extracting facts based on StudentAnswerEntity entries."""
__author__ = 'Mike Gainer (mgainer@google.com)'
import ast
import collections
import datetime
from mapreduce import context
from common import crypto
from common import schema_fields
from common import tags
from models import courses
from models import data_sources
from models import entities
from models import event_transforms
from models import jobs
from models import models
from models import transforms
from tools import verify
from google.appengine.ext import db
from google.appengine.api import app_identity
from google.appengine.api import datastore
MAX_INCORRECT_REPORT = 5
class QuestionAnswersEntity(entities.BaseEntity):
"""Student answers to individual questions."""
data = db.TextProperty(indexed=False)
@classmethod
def safe_key(cls, db_key, transform_fn):
return db.Key.from_path(cls.kind(), transform_fn(db_key.id_or_name()))
class RawAnswersGenerator(jobs.MapReduceJob):
"""Extract answers from all event types into QuestionAnswersEntity table."""
TOTAL_STUDENTS = 'total_students'
@staticmethod
def get_description():
return 'raw question answers'
@staticmethod
def entity_class():
return models.EventEntity
def build_additional_mapper_params(self, app_context):
return {
'questions_by_usage_id': (
event_transforms.get_questions_by_usage_id(app_context)),
'valid_question_ids': (
event_transforms.get_valid_question_ids()),
'group_to_questions': (
event_transforms.get_group_to_questions()),
'assessment_weights':
event_transforms.get_assessment_weights(app_context),
'unscored_lesson_ids':
event_transforms.get_unscored_lesson_ids(app_context),
}
@staticmethod
def map(event):
"""Extract question responses from all event types providing them."""
if event.source not in (
'submit-assessment',
'attempt-lesson',
'tag-assessment'):
return
# Fetch global params set up in build_additional_mapper_params(), above.
params = context.get().mapreduce_spec.mapper.params
questions_info = params['questions_by_usage_id']
valid_question_ids = params['valid_question_ids']
group_to_questions = params['group_to_questions']
assessment_weights = params['assessment_weights']
timestamp = int(
(event.recorded_on - datetime.datetime(1970, 1, 1)).total_seconds())
content = transforms.loads(event.data)
if event.source == 'submit-assessment':
answer_data = content.get('values', {})
# TODO(mgainer): handle assessment-as-form submissions. Current
# implementation only understands Question and QuestionGroup;
# forms are simply submitted as lists of fields.
# TODO(mgainer): Handle peer-review scoring
if not isinstance(answer_data, dict):
return
version = answer_data.get('version')
if version == '1.5':
answers = event_transforms.unpack_student_answer_1_5(
questions_info, valid_question_ids, assessment_weights,
group_to_questions, answer_data, timestamp)
elif event.source == 'attempt-lesson':
# Very odd that the version should be in the answers map....
version = content.get('answers', {}).get('version')
if version == '1.5':
answers = event_transforms.unpack_student_answer_1_5(
questions_info, valid_question_ids, assessment_weights,
group_to_questions, content, timestamp)
elif event.source == 'tag-assessment':
answers = event_transforms.unpack_check_answers(
content, questions_info, valid_question_ids, assessment_weights,
group_to_questions, timestamp)
yield (event.user_id, [list(answer) for answer in answers])
yield (RawAnswersGenerator.TOTAL_STUDENTS, event.user_id)
@staticmethod
def reduce(key, answers_lists):
"""Stores values to DB, and emits one aggregate: Count of students."""
if key == RawAnswersGenerator.TOTAL_STUDENTS:
student_ids = set(answers_lists)
yield (key, len(student_ids))
return
answers = []
for data in answers_lists:
answers += ast.literal_eval(data)
data = transforms.dumps(answers)
QuestionAnswersEntity(key_name=key, data=data).put()
StudentPlaceholder = collections.namedtuple(
'StudentPlaceholder', ['user_id', 'name', 'email'])
class RawAnswersDataSource(data_sources.SynchronousQuery,
data_sources.AbstractDbTableRestDataSource):
"""Make raw answers from QuestionAnswersEntity available via REST."""
MAX_INTERACTIVE_DOWNLOAD_SIZE = 100
@staticmethod
def required_generators():
return [RawAnswersGenerator]
@staticmethod
def fill_values(app_context, template_values, raw_answers_job):
results = jobs.MapReduceJob.get_results(raw_answers_job)
if not results:
template_values['any_results'] = False
else:
template_values['any_results'] = True
template_values['max_interactive_download_size'] = (
RawAnswersDataSource.MAX_INTERACTIVE_DOWNLOAD_SIZE)
results = {k: v for k, v in results}
template_values['interactive_download_allowed'] = (
results[RawAnswersGenerator.TOTAL_STUDENTS] <=
RawAnswersDataSource.MAX_INTERACTIVE_DOWNLOAD_SIZE)
template_values['course_slug'] = app_context.get_slug()
template_values['app_id'] = (
app_identity.get_application_id())
template_values['hostname'] = (
app_identity.get_default_version_hostname())
@classmethod
def get_entity_class(cls):
return QuestionAnswersEntity
@classmethod
def get_name(cls):
return 'raw_student_answers'
@classmethod
def get_title(cls):
return 'Raw Student Answers'
@classmethod
def get_default_chunk_size(cls):
# Selecting answers by student turns into a where-in clause, which
# in turn turns into N different '==' filters, and AppEngine supports
# at most 30.
# TODO(mgainer): Do something clever so that the students who have
# non-blank data here are returned in the earlier pages.
# TODO(mgainer): For students with no data, return blank items so
# we at least see rows for them in the UI, even if there are no scores.
return 25
@classmethod
def get_schema(cls, unused_app_context, unused_catch_and_log,
unused_source_context):
reg = schema_fields.FieldRegistry(
'Raw Student Answers',
description='Raw data of answers to all uses of all graded '
'questions (excludes self-check non-graded questions in lessons) '
'in the course.')
reg.add_property(schema_fields.SchemaField(
'user_id', 'User ID', 'string',
description='ID of the student providing this answer.'))
reg.add_property(schema_fields.SchemaField(
'user_name', 'User Name', 'string',
description='Name of the student providing this answer.'))
reg.add_property(schema_fields.SchemaField(
'user_email', 'User Email', 'string',
description='Email address of the student providing this answer.'))
reg.add_property(schema_fields.SchemaField(
'unit_id', 'Unit ID', 'string',
description='ID of unit or assessment for this score.'))
reg.add_property(schema_fields.SchemaField(
'lesson_id', 'Lesson ID', 'string', optional=True,
description='ID of lesson for this score.'))
reg.add_property(schema_fields.SchemaField(
'sequence', 'Sequence', 'integer',
description='0-based order within containing assessment/lesson.'))
reg.add_property(schema_fields.SchemaField(
'question_id', 'Question ID', 'string',
description='ID of question. Key to models.QuestionDAO'))
reg.add_property(schema_fields.SchemaField(
'question_type', 'Question Type', 'string',
description='Kind of question. E.g., "SaQuestion" or "McQuestion" '
'for single-answer and multiple-choice, respectively.'))
reg.add_property(schema_fields.SchemaField(
'timestamp', 'Question ID', 'integer',
description='Seconds since 1970-01-01 in GMT when answer given.'))
choice_type = schema_fields.SchemaField(
'answer', 'Answer', 'string',
description='An answer to the question')
reg.add_property(schema_fields.FieldArray(
'answers', 'Answers', item_type=choice_type,
description='The answer from the student. Note that '
'this may be an array for questions permitting multiple answers.'))
reg.add_property(schema_fields.SchemaField(
'score', 'Score', 'number',
description='Value from the Question indicating the score for '
'this answer or set of answers.'))
reg.add_property(schema_fields.SchemaField(
'weighted_score', 'Weighted Score', 'number',
description='Question score, multiplied by weights in '
'containing Question Group, Assessment, etc.'))
reg.add_property(schema_fields.SchemaField(
'tallied', 'Tallied', 'boolean',
description='Whether the score counts towards the overall grade. '
'Lessons by default do not contribute to course score, but may '
'be marked as graded.'))
return reg.get_json_schema_dict()['properties']
@classmethod
def _postprocess_rows(cls, app_context, source_context, schema, log,
page_number, rows):
"""Unpack all responses from single student into separate rows."""
# Fill in responses with actual student name, not just ID.
ids = []
for entity in rows:
ids.append(entity.key().id_or_name())
# Chunkify student lookups; 'in' has max of 30
students = []
size = datastore.MAX_ALLOWABLE_QUERIES
for ids_chunk in [ids[i:i + size] for i in xrange(0, len(ids), size)]:
students_chunk = (models.Student
.all()
.filter('user_id in', ids_chunk)
.fetch(len(ids_chunk)))
students_by_id = {s.user_id: s for s in students_chunk}
for student_id in ids_chunk:
if student_id in students_by_id:
students += [students_by_id[student_id]]
else:
students += [StudentPlaceholder(
student_id, '<unknown>', '<unknown>')]
# Prepare to convert multiple-choice question indices to answer strings.
mc_choices = {}
for question in models.QuestionDAO.get_all():
if 'choices' in question.dict:
mc_choices[str(question.id)] = [
choice['text'] for choice in question.dict['choices']]
ret = []
for entity, student in zip(rows, students):
raw_answers = transforms.loads(entity.data)
answers = [event_transforms.QuestionAnswerInfo(*parts)
for parts in raw_answers]
for answer in answers:
if answer.question_id in mc_choices:
choices = mc_choices[answer.question_id]
given_answers = []
for i in answer.answers:
given_answers.append(
choices[i] if i < len(choices)
else '[deleted choice]')
else:
given_answers = answer.answers
if not isinstance(given_answers, list):
given_answers = [given_answers]
ret.append({
'user_id': student.user_id,
'user_name': student.name or '<blank>',
'user_email': student.email or '<blank>',
'unit_id': str(answer.unit_id),
'lesson_id': str(answer.lesson_id),
'sequence': answer.sequence,
'question_id': str(answer.question_id),
'question_type': answer.question_type,
'timestamp': answer.timestamp,
'answers': given_answers,
'score': float(answer.score),
'weighted_score': float(answer.weighted_score),
'tallied': answer.tallied,
})
return ret
class AnswersDataSource(RawAnswersDataSource):
"""Exposes user-ID-obscured versions of all answers to all questions.
This data source is meant to be used for aggregation or export to
BigQuery (in contrast to RawAnswersDataSource, which should only ever
be used within CourseBuilder, as that class exposes un-obscured user
IDs and names).
"""
@classmethod
def get_name(cls):
return 'answers'
@classmethod
def get_title(cls):
return 'Answers'
@classmethod
def get_default_chunk_size(cls):
return 1000
@classmethod
def get_schema(cls, app_context, log, source_context):
schema = super(AnswersDataSource, cls).get_schema(app_context, log,
source_context)
schema.pop('user_name')
return schema
@classmethod
def _postprocess_rows(cls, app_context, source_context, schema, log,
page_number, rows):
items = super(AnswersDataSource, cls)._postprocess_rows(
app_context, source_context, schema, log, page_number, rows)
for item in items:
item.pop('user_name')
item['user_id'] = crypto.hmac_sha_2_256_transform(
source_context.pii_secret, item['user_id'])
return items
class OrderedQuestionsDataSource(data_sources.SynchronousQuery):
"""Simple "analytic" giving names of each question, in course order.
This class cooperates with the Jinja template in gradebook.html to
generate the header for the Gradebook analytics sub-tab. It also
generates the expected list of questions, in course order. This
set of questions sets the order for the question responses
provided by RawAnswersDataSource (above).
"""
@staticmethod
def fill_values(app_context, template_values):
"""Sets values into the dict used to fill out the Jinja template."""
def _find_q_ids(html, groups):
"""Returns the list of question IDs referenced from rich HTML."""
question_ids = []
for component in tags.get_components_from_html(html):
if component['cpt_name'] == 'question':
question_ids.append(int(component['quid']))
elif component['cpt_name'] == 'question-group':
qgid = int(component['qgid'])
if qgid in groups:
for question_id in groups[qgid]:
question_ids.append(int(question_id))
return question_ids
def _look_up_questions(questions, question_ids):
"""Build a dict used to build HTML for one column for one question.
Args:
questions: Map from question ID to QuestionDAO
question_ids: Set of IDS for which we want to build helper dicts.
Returns:
An array of dicts, one per question named in question_ids.
"""
ret = []
for qid in list(question_ids):
if qid not in questions:
question_ids.remove(qid)
continue
ret.append({
'id': qid,
'description': questions[qid],
'href': 'dashboard?action=edit_question&key=%s' % qid,
})
return ret
def _q_key(unit_id, lesson_id, question_id):
return '%s.%s.%s' % (unit_id, lesson_id, question_id)
def _add_assessment(unit):
q_ids = _find_q_ids(unit.html_content, groups)
return (
[_q_key(unit.unit_id, None, q_id) for q_id in q_ids],
{
'unit_id': None,
'title': None,
'questions': _look_up_questions(questions, q_ids)
})
def _add_sub_assessment(unit, assessment):
q_ids = _find_q_ids(assessment.html_content, groups)
return (
[_q_key(assessment.unit_id, None, q_id) for q_id in q_ids],
{
'href': 'unit?unit=%s&assessment=%s' % (
unit.unit_id, assessment.unit_id),
'unit_id': assessment.unit_id,
'title': assessment.title,
'questions': _look_up_questions(questions, q_ids),
'tallied': True,
})
def _add_lesson(unit, lesson):
q_ids = _find_q_ids(lesson.objectives, groups)
return (
[_q_key(unit.unit_id, lesson.lesson_id, qid) for qid in q_ids],
{
'href': 'unit?unit=%s&lesson=%s' % (
unit.unit_id, lesson.lesson_id),
'lesson_id': lesson.lesson_id,
'title': lesson.title,
'questions': _look_up_questions(questions, q_ids),
'tallied': lesson.scored,
})
def _count_colspans(units):
for unit in units:
unit_colspan = 0
for item in unit['contents']:
# answer/score for each question, plus subtotal for section.
item['colspan'] = len(item['questions']) * 2
unit_colspan += item['colspan']
# If a unit contains more than one sub-unit, we need a subtotal
# column.
if len(unit['contents']) > 1:
for item in unit['contents']:
if len(item['questions']) > 1 and item['tallied']:
item['colspan'] += 1
unit_colspan += 1
# +1 for unit total column
unit['colspan'] = unit_colspan + 1
course = courses.Course(None, app_context)
questions = {q.id: q.description for q in models.QuestionDAO.get_all()}
groups = {
g.id: g.question_ids for g in models.QuestionGroupDAO.get_all()}
units = []
question_keys = []
# Walk through the course in display order, gathering all items
# that may contain questions. This is used to build up the HTML
# table headers for display.
for unit in course.get_units():
# Skip contained pre/post assessments; these will be done in their
# containing unit.
if course.get_parent_unit(unit.unit_id):
continue
# Only deal with known unit types
if unit.type == verify.UNIT_TYPE_ASSESSMENT:
href = 'assessment?name=%s' % unit.unit_id
elif unit.type == verify.UNIT_TYPE_UNIT:
href = 'unit?unit=%s' % unit.unit_id,
else:
continue
unit_contents = []
if unit.type == verify.UNIT_TYPE_ASSESSMENT:
q_keys, contents = _add_assessment(unit)
if q_keys:
question_keys += q_keys
unit_contents.append(contents)
if unit.pre_assessment:
assessment = course.find_unit_by_id(unit.pre_assessment)
if assessment:
q_keys, contents = _add_sub_assessment(unit, assessment)
if q_keys:
question_keys += q_keys
if len(q_keys) > 1:
question_keys += ['subtotal']
unit_contents.append(contents)
for lesson in course.get_lessons(unit.unit_id):
q_keys, contents = _add_lesson(unit, lesson)
if q_keys:
question_keys += q_keys
if len(q_keys) > 1 and contents['tallied']:
question_keys += ['subtotal']
unit_contents.append(contents)
if unit.post_assessment:
assessment = course.find_unit_by_id(unit.post_assessment)
if assessment:
q_keys, contents = _add_sub_assessment(unit, assessment)
if q_keys:
question_keys += q_keys
if len(q_keys) > 1:
question_keys += ['subtotal']
unit_contents.append(contents)
if unit_contents:
units.append({
'href': href,
'unit_id': unit.unit_id,
'title': unit.title,
'contents': unit_contents,
})
question_keys.append('total')
_count_colspans(units)
template_values['units'] = units
template_values['gradebook_js_vars'] = transforms.dumps(
{'question_keys': question_keys})
class StudentAnswersStatsGenerator(jobs.MapReduceJob):
@staticmethod
def get_description():
return 'student answers'
@staticmethod
def entity_class():
return models.StudentAnswersEntity
def build_additional_mapper_params(self, app_context):
return {
'questions_by_usage_id': (
event_transforms.get_questions_by_usage_id(app_context)),
'valid_question_ids': (
event_transforms.get_valid_question_ids()),
'group_to_questions': (
event_transforms.get_group_to_questions()),
'assessment_weights':
event_transforms.get_assessment_weights(app_context),
'unscored_lesson_ids':
event_transforms.get_unscored_lesson_ids(app_context),
}
@staticmethod
def build_key(unit, sequence, question_id, question_type):
return '%s_%d_%s_%s' % (unit, sequence, question_id, question_type)
@staticmethod
def parse_key(key):
unit, sequence, question_id, question_type = key.split('_')
return unit, int(sequence), question_id, question_type
@staticmethod
def map(student_answers):
params = context.get().mapreduce_spec.mapper.params
questions_by_usage_id = params['questions_by_usage_id']
valid_question_ids = params['valid_question_ids']
group_to_questions = params['group_to_questions']
assessment_weights = params['assessment_weights']
all_answers = transforms.loads(student_answers.data)
for unit_id, unit_responses in all_answers.items():
# Is this a CourseBuilder Question/QuestionGroup set of answers?
if ('containedTypes' in unit_responses and
unit_responses['version'] == '1.5'):
for answer in event_transforms.unpack_student_answer_1_5(
questions_by_usage_id, valid_question_ids,
assessment_weights, group_to_questions, unit_responses,
timestamp=0):
yield (StudentAnswersStatsGenerator.build_key(
unit_id, answer.sequence, answer.question_id,
answer.question_type), (answer.answers, answer.score))
# TODO(mgainer): Emit warning counter here if we don't grok
# the response type. We will need to cope with Oppia and
# XBlocks responses. Do that in a follow-on CL.
@staticmethod
def reduce(key, answers_and_score_list):
correct_answers = {}
incorrect_answers = {}
unit_id, sequence, question_id, question_type = (
StudentAnswersStatsGenerator.parse_key(key))
unit_id = int(unit_id)
question_id = long(question_id)
for packed_data in answers_and_score_list:
answers, score = ast.literal_eval(packed_data)
if question_type == 'SaQuestion':
if score > 0:
# Note: 'answers' only contains one item (not a list) for
# SaQuestion.
correct_answers.setdefault(answers, 0)
correct_answers[answers] += 1
else:
incorrect_answers.setdefault(answers, 0)
incorrect_answers[answers] += 1
elif question_type == 'McQuestion':
# For multiple-choice questions, we only get one overall score
# for the question as a whole. This means that some choices
# may be incorrect. Happily, though, the only reason we care
# about the distinction between correct/incorrect is to limit
# the quantity of output for incorrect answers. Since
# multiple-choice questions are inherently limited, just
# call all of the answers 'correct'.
for sub_answer in answers:
correct_answers.setdefault(sub_answer, 0)
correct_answers[sub_answer] += 1
def build_reduce_dict(unit_id, sequence, question_id, is_valid,
answer, count):
# NOTE: maintain members in parallel with get_schema() below.
if not isinstance(answer, basestring):
answer = str(answer) # Convert numbers to strings.
return ({'unit_id': str(unit_id),
'sequence': sequence,
'question_id': str(question_id),
'is_valid': is_valid,
'answer': answer,
'count': count})
# Emit tuples for each of the correct answers.
for answer, count in correct_answers.items():
yield(build_reduce_dict(unit_id, sequence, question_id, True,
answer, count))
# Emit tuples for incorrect answers. Free-form answer fields can have
# a lot of wrong answers. Only report the most-commonly-occuring N
# answers, and report a total for the rest.
if incorrect_answers:
sorted_incorrect = [(v, k) for k, v in incorrect_answers.items()]
sorted_incorrect.sort()
sorted_incorrect.reverse()
for count, answer in sorted_incorrect[0:MAX_INCORRECT_REPORT]:
yield(build_reduce_dict(unit_id, sequence, question_id, False,
answer, count))
total_other_incorrect = 0
for count, _ in sorted_incorrect[MAX_INCORRECT_REPORT:]:
total_other_incorrect += count
if total_other_incorrect:
yield(build_reduce_dict(unit_id, sequence, question_id, False,
'Other Incorrect Answers',
total_other_incorrect))
class QuestionAnswersDataSource(data_sources.AbstractSmallRestDataSource):
@staticmethod
def required_generators():
return [StudentAnswersStatsGenerator]
@classmethod
def get_name(cls):
return 'question_answers'
@classmethod
def get_title(cls):
return 'Question Answers'
@classmethod
def get_schema(cls, unused_app_context, unused_catch_and_log,
unused_source_context):
# NOTE: maintain members in parallel with build_reduce_dict() above.
reg = schema_fields.FieldRegistry(
'Question Answers',
description='Summarized results for each use of each question')
reg.add_property(schema_fields.SchemaField(
'unit_id', 'Unit ID', 'string',
description='ID of unit in which question appears. Key to Unit'))
reg.add_property(schema_fields.SchemaField(
'sequence', 'Sequence', 'integer',
description='Ordering within course for question.'))
reg.add_property(schema_fields.SchemaField(
'question_id', 'Question ID', 'string',
description='ID of question. Key to models.QuestionDAO'))
reg.add_property(schema_fields.SchemaField(
'is_valid', 'Is Valid', 'boolean',
description='Whether the answer is "valid". An answer is '
'valid if it is one of the defined answers to the question. '
'All answers to multiple-choice questions, correct or incorrect '
'are considered valid. Answers to single-answer questions '
'(i.e., type-in-an-answer) questions are only considered valid '
'if they earned a positive score. The most-commonly guessed '
'wrong answers are also reported with this field set to False. '
'The count of the rest of the wrong answers is lumped into a '
'single item, "Other Incorrect Answers".'))
reg.add_property(schema_fields.SchemaField(
'answer', 'Answer', 'string',
description='The actually-selected answer'))
reg.add_property(schema_fields.SchemaField(
'count', 'Count', 'integer',
description='The number of times this answer was given.'))
return reg.get_json_schema_dict()['properties']
@classmethod
def fetch_values(cls, app_context, unused_source_context, unused_schema,
unused_catch_and_log, unused_page_number,
student_answers_job):
def ordering(a1, a2):
return (cmp(a1['unit_id'], a2['unit_id']) or
cmp(a1['sequence'], a2['sequence']) or
cmp(a2['is_valid'], a1['is_valid']) or
cmp(a1['answer'], a2['answer']))
ret = list(jobs.MapReduceJob.get_results(student_answers_job))
ret.sort(ordering)
return ret, 0
class CourseQuestionsDataSource(data_sources.AbstractSmallRestDataSource):
@classmethod
def get_name(cls):
return 'course_questions'
@classmethod
def get_title(cls):
return 'Course Questions'
@classmethod
def exportable(cls):
return True
@classmethod
def get_schema(cls, unused_app_context, unused_catch_and_log,
unused_source_context):
reg = schema_fields.FieldRegistry(
'Course Questions',
description='Facts about each usage of each question in a course.')
reg.add_property(schema_fields.SchemaField(
'question_id', 'Question ID', 'string',
description='ID of question. Key to models.QuestionDAO'))
reg.add_property(schema_fields.SchemaField(
'description', 'Description', 'string',
description='User-entered description of question.'))
reg.add_property(schema_fields.SchemaField(
'text', 'Text', 'string',
description='Text of the question.'))
# pylint: disable=unused-variable
arrayMember = schema_fields.SchemaField(
'option_text', 'Option Text', 'string',
description='Text of the multiple-choice option')
reg.add_property(schema_fields.FieldArray(
'choices', 'Choices', item_type=arrayMember,
description='Multiple-choice question options'))
return reg.get_json_schema_dict()['properties']
@classmethod
def fetch_values(cls, app_context, unused_source_context, unused_schema,
unused_catch_and_log, unused_page_number):
# Look up questions from DB.
questions = []
for question in models.QuestionDAO.get_all():
item = {
'question_id': str(question.id),
'description': question.dict['description'],
'text': question.dict['question'],
}
if 'choices' in question.dict:
item['choices'] = [c['text'] for c in question.dict['choices']]
else:
item['choices'] = []
questions.append(item)
return questions, 0
class CourseUnitsDataSource(data_sources.AbstractSmallRestDataSource):
@classmethod
def get_name(cls):
return 'course_units'
@classmethod
def get_title(cls):
return 'Course Units'
@classmethod
def exportable(cls):
return True
@classmethod
def get_schema(cls, unused_app_context, unused_catch_and_log,
unused_source_context):
# NOTE: maintain members in parallel with build_reduce_dict() above.
reg = schema_fields.FieldRegistry(
'Units',
description='Units (units, assessments, links) in a course')
reg.add_property(schema_fields.SchemaField(
'unit_id', 'Unit ID', 'string',
description='ID of unit in which question appears. Key to Unit'))
reg.add_property(schema_fields.SchemaField(
'now_available', 'Now Available', 'boolean',
description='Whether the unit is publicly available'))
reg.add_property(schema_fields.SchemaField(
'type', 'Type', 'string',
description='Type of unit. "U":unit, "A":assessment, "L":link'))
reg.add_property(schema_fields.SchemaField(
'title', 'Title', 'string',
description='Display title of the unit'))
reg.add_property(schema_fields.SchemaField(
'release_date', 'Release Date', 'string',
description='Date the unit is to be made publicly available'))
reg.add_property(schema_fields.SchemaField(
'props', 'Properties', 'string',
description='Site-specific additional properties added to unit'))
reg.add_property(schema_fields.SchemaField(
'weight', 'Weight', 'number',
description='Weight to give to the unit when scoring.'))
return reg.get_json_schema_dict()['properties']
@classmethod
def fetch_values(cls, app_context, unused_source_context, unused_schema,
unused_catch_and_log, unused_page_number):
# Look up questions from DB.
units = []
course = courses.Course(None, app_context=app_context)
for unit in course.get_units():
units.append({
'unit_id': str(unit.unit_id),
'type': unit.type,
'title': unit.title,
'release_date': unit.release_date,
'now_available': course.is_unit_available(unit),
'props': str(unit.properties),
'weight': float(unit.weight)
})
return units, 0
|
|
#!/usr/bin/env python
#
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Message registry for apitools."""
import collections
import contextlib
import json
import six
from apitools.base.protorpclite import descriptor
from apitools.base.protorpclite import messages
from apitools.gen import extended_descriptor
from apitools.gen import util
TypeInfo = collections.namedtuple('TypeInfo', ('type_name', 'variant'))
class MessageRegistry(object):
"""Registry for message types.
This closely mirrors a messages.FileDescriptor, but adds additional
attributes (such as message and field descriptions) and some extra
code for validation and cycle detection.
"""
# Type information from these two maps comes from here:
# https://developers.google.com/discovery/v1/type-format
PRIMITIVE_TYPE_INFO_MAP = {
'string': TypeInfo(type_name='string',
variant=messages.StringField.DEFAULT_VARIANT),
'integer': TypeInfo(type_name='integer',
variant=messages.IntegerField.DEFAULT_VARIANT),
'boolean': TypeInfo(type_name='boolean',
variant=messages.BooleanField.DEFAULT_VARIANT),
'number': TypeInfo(type_name='number',
variant=messages.FloatField.DEFAULT_VARIANT),
'any': TypeInfo(type_name='extra_types.JsonValue',
variant=messages.Variant.MESSAGE),
}
PRIMITIVE_FORMAT_MAP = {
'int32': TypeInfo(type_name='integer',
variant=messages.Variant.INT32),
'uint32': TypeInfo(type_name='integer',
variant=messages.Variant.UINT32),
'int64': TypeInfo(type_name='string',
variant=messages.Variant.INT64),
'uint64': TypeInfo(type_name='string',
variant=messages.Variant.UINT64),
'double': TypeInfo(type_name='number',
variant=messages.Variant.DOUBLE),
'float': TypeInfo(type_name='number',
variant=messages.Variant.FLOAT),
'byte': TypeInfo(type_name='byte',
variant=messages.BytesField.DEFAULT_VARIANT),
'date': TypeInfo(type_name='extra_types.DateField',
variant=messages.Variant.STRING),
'date-time': TypeInfo(
type_name=('apitools.base.protorpclite.message_types.'
'DateTimeMessage'),
variant=messages.Variant.MESSAGE),
}
def __init__(self, client_info, names, description, root_package_dir,
base_files_package, protorpc_package):
self.__names = names
self.__client_info = client_info
self.__package = client_info.package
self.__description = util.CleanDescription(description)
self.__root_package_dir = root_package_dir
self.__base_files_package = base_files_package
self.__protorpc_package = protorpc_package
self.__file_descriptor = extended_descriptor.ExtendedFileDescriptor(
package=self.__package, description=self.__description)
# Add required imports
self.__file_descriptor.additional_imports = [
'from %s import messages as _messages' % self.__protorpc_package,
]
# Map from scoped names (i.e. Foo.Bar) to MessageDescriptors.
self.__message_registry = collections.OrderedDict()
# A set of types that we're currently adding (for cycle detection).
self.__nascent_types = set()
# A set of types for which we've seen a reference but no
# definition; if this set is nonempty, validation fails.
self.__unknown_types = set()
# Used for tracking paths during message creation
self.__current_path = []
# Where to register created messages
self.__current_env = self.__file_descriptor
# TODO(craigcitro): Add a `Finalize` method.
@property
def file_descriptor(self):
self.Validate()
return self.__file_descriptor
def WriteProtoFile(self, printer):
"""Write the messages file to out as proto."""
self.Validate()
extended_descriptor.WriteMessagesFile(
self.__file_descriptor, self.__package, self.__client_info.version,
printer)
def WriteFile(self, printer):
"""Write the messages file to out."""
self.Validate()
extended_descriptor.WritePythonFile(
self.__file_descriptor, self.__package, self.__client_info.version,
printer)
def Validate(self):
mysteries = self.__nascent_types or self.__unknown_types
if mysteries:
raise ValueError('Malformed MessageRegistry: %s' % mysteries)
def __ComputeFullName(self, name):
return '.'.join(map(six.text_type, self.__current_path[:] + [name]))
def __AddImport(self, new_import):
if new_import not in self.__file_descriptor.additional_imports:
self.__file_descriptor.additional_imports.append(new_import)
def __DeclareDescriptor(self, name):
self.__nascent_types.add(self.__ComputeFullName(name))
def __RegisterDescriptor(self, new_descriptor):
"""Register the given descriptor in this registry."""
if not isinstance(new_descriptor, (
extended_descriptor.ExtendedMessageDescriptor,
extended_descriptor.ExtendedEnumDescriptor)):
raise ValueError('Cannot add descriptor of type %s' % (
type(new_descriptor),))
full_name = self.__ComputeFullName(new_descriptor.name)
if full_name in self.__message_registry:
raise ValueError(
'Attempt to re-register descriptor %s' % full_name)
if full_name not in self.__nascent_types:
raise ValueError('Directly adding types is not supported')
new_descriptor.full_name = full_name
self.__message_registry[full_name] = new_descriptor
if isinstance(new_descriptor,
extended_descriptor.ExtendedMessageDescriptor):
self.__current_env.message_types.append(new_descriptor)
elif isinstance(new_descriptor,
extended_descriptor.ExtendedEnumDescriptor):
self.__current_env.enum_types.append(new_descriptor)
self.__unknown_types.discard(full_name)
self.__nascent_types.remove(full_name)
def LookupDescriptor(self, name):
return self.__GetDescriptorByName(name)
def LookupDescriptorOrDie(self, name):
message_descriptor = self.LookupDescriptor(name)
if message_descriptor is None:
raise ValueError('No message descriptor named "%s"', name)
return message_descriptor
def __GetDescriptor(self, name):
return self.__GetDescriptorByName(self.__ComputeFullName(name))
def __GetDescriptorByName(self, name):
if name in self.__message_registry:
return self.__message_registry[name]
if name in self.__nascent_types:
raise ValueError(
'Cannot retrieve type currently being created: %s' % name)
return None
@contextlib.contextmanager
def __DescriptorEnv(self, message_descriptor):
# TODO(craigcitro): Typecheck?
previous_env = self.__current_env
self.__current_path.append(message_descriptor.name)
self.__current_env = message_descriptor
yield
self.__current_path.pop()
self.__current_env = previous_env
def AddEnumDescriptor(self, name, description,
enum_values, enum_descriptions):
"""Add a new EnumDescriptor named name with the given enum values."""
message = extended_descriptor.ExtendedEnumDescriptor()
message.name = self.__names.ClassName(name)
message.description = util.CleanDescription(description)
self.__DeclareDescriptor(message.name)
for index, (enum_name, enum_description) in enumerate(
zip(enum_values, enum_descriptions)):
enum_value = extended_descriptor.ExtendedEnumValueDescriptor()
enum_value.name = self.__names.NormalizeEnumName(enum_name)
if enum_value.name != enum_name:
message.enum_mappings.append(
extended_descriptor.ExtendedEnumDescriptor.JsonEnumMapping(
python_name=enum_value.name, json_name=enum_name))
self.__AddImport('from %s import encoding' %
self.__base_files_package)
enum_value.number = index
enum_value.description = util.CleanDescription(
enum_description or '<no description>')
message.values.append(enum_value)
self.__RegisterDescriptor(message)
def __DeclareMessageAlias(self, schema, alias_for):
"""Declare schema as an alias for alias_for."""
# TODO(craigcitro): This is a hack. Remove it.
message = extended_descriptor.ExtendedMessageDescriptor()
message.name = self.__names.ClassName(schema['id'])
message.alias_for = alias_for
self.__DeclareDescriptor(message.name)
self.__AddImport('from %s import extra_types' %
self.__base_files_package)
self.__RegisterDescriptor(message)
def __AddAdditionalProperties(self, message, schema, properties):
"""Add an additionalProperties field to message."""
additional_properties_info = schema['additionalProperties']
entries_type_name = self.__AddAdditionalPropertyType(
message.name, additional_properties_info)
description = util.CleanDescription(
additional_properties_info.get('description'))
if description is None:
description = 'Additional properties of type %s' % message.name
attrs = {
'items': {
'$ref': entries_type_name,
},
'description': description,
'type': 'array',
}
field_name = 'additionalProperties'
message.fields.append(self.__FieldDescriptorFromProperties(
field_name, len(properties) + 1, attrs))
self.__AddImport('from %s import encoding' % self.__base_files_package)
message.decorators.append(
'encoding.MapUnrecognizedFields(%r)' % field_name)
def AddDescriptorFromSchema(self, schema_name, schema):
"""Add a new MessageDescriptor named schema_name based on schema."""
# TODO(craigcitro): Is schema_name redundant?
if self.__GetDescriptor(schema_name):
return
if schema.get('enum'):
self.__DeclareEnum(schema_name, schema)
return
if schema.get('type') == 'any':
self.__DeclareMessageAlias(schema, 'extra_types.JsonValue')
return
if schema.get('type') != 'object':
raise ValueError('Cannot create message descriptors for type %s',
schema.get('type'))
message = extended_descriptor.ExtendedMessageDescriptor()
message.name = self.__names.ClassName(schema['id'])
message.description = util.CleanDescription(schema.get(
'description', 'A %s object.' % message.name))
self.__DeclareDescriptor(message.name)
with self.__DescriptorEnv(message):
properties = schema.get('properties', {})
for index, (name, attrs) in enumerate(sorted(properties.items())):
field = self.__FieldDescriptorFromProperties(
name, index + 1, attrs)
message.fields.append(field)
if field.name != name:
message.field_mappings.append(
type(message).JsonFieldMapping(
python_name=field.name, json_name=name))
self.__AddImport(
'from %s import encoding' % self.__base_files_package)
if 'additionalProperties' in schema:
self.__AddAdditionalProperties(message, schema, properties)
self.__RegisterDescriptor(message)
def __AddAdditionalPropertyType(self, name, property_schema):
"""Add a new nested AdditionalProperty message."""
new_type_name = 'AdditionalProperty'
property_schema = dict(property_schema)
# We drop the description here on purpose, so the resulting
# messages are less repetitive.
property_schema.pop('description', None)
description = 'An additional property for a %s object.' % name
schema = {
'id': new_type_name,
'type': 'object',
'description': description,
'properties': {
'key': {
'type': 'string',
'description': 'Name of the additional property.',
},
'value': property_schema,
},
}
self.AddDescriptorFromSchema(new_type_name, schema)
return new_type_name
def __AddEntryType(self, entry_type_name, entry_schema, parent_name):
"""Add a type for a list entry."""
entry_schema.pop('description', None)
description = 'Single entry in a %s.' % parent_name
schema = {
'id': entry_type_name,
'type': 'object',
'description': description,
'properties': {
'entry': {
'type': 'array',
'items': entry_schema,
},
},
}
self.AddDescriptorFromSchema(entry_type_name, schema)
return entry_type_name
def __FieldDescriptorFromProperties(self, name, index, attrs):
"""Create a field descriptor for these attrs."""
field = descriptor.FieldDescriptor()
field.name = self.__names.CleanName(name)
field.number = index
field.label = self.__ComputeLabel(attrs)
new_type_name_hint = self.__names.ClassName(
'%sValue' % self.__names.ClassName(name))
type_info = self.__GetTypeInfo(attrs, new_type_name_hint)
field.type_name = type_info.type_name
field.variant = type_info.variant
if 'default' in attrs:
# TODO(craigcitro): Correctly handle non-primitive default values.
default = attrs['default']
if not (field.type_name == 'string' or
field.variant == messages.Variant.ENUM):
default = str(json.loads(default))
if field.variant == messages.Variant.ENUM:
default = self.__names.NormalizeEnumName(default)
field.default_value = default
extended_field = extended_descriptor.ExtendedFieldDescriptor()
extended_field.name = field.name
extended_field.description = util.CleanDescription(
attrs.get('description', 'A %s attribute.' % field.type_name))
extended_field.field_descriptor = field
return extended_field
@staticmethod
def __ComputeLabel(attrs):
if attrs.get('required', False):
return descriptor.FieldDescriptor.Label.REQUIRED
elif attrs.get('type') == 'array':
return descriptor.FieldDescriptor.Label.REPEATED
elif attrs.get('repeated'):
return descriptor.FieldDescriptor.Label.REPEATED
return descriptor.FieldDescriptor.Label.OPTIONAL
def __DeclareEnum(self, enum_name, attrs):
description = util.CleanDescription(attrs.get('description', ''))
enum_values = attrs['enum']
enum_descriptions = attrs.get(
'enumDescriptions', [''] * len(enum_values))
self.AddEnumDescriptor(enum_name, description,
enum_values, enum_descriptions)
self.__AddIfUnknown(enum_name)
return TypeInfo(type_name=enum_name, variant=messages.Variant.ENUM)
def __AddIfUnknown(self, type_name):
type_name = self.__names.ClassName(type_name)
full_type_name = self.__ComputeFullName(type_name)
if (full_type_name not in self.__message_registry.keys() and
type_name not in self.__message_registry.keys()):
self.__unknown_types.add(type_name)
def __GetTypeInfo(self, attrs, name_hint):
"""Return a TypeInfo object for attrs, creating one if needed."""
type_ref = self.__names.ClassName(attrs.get('$ref'))
type_name = attrs.get('type')
if not (type_ref or type_name):
raise ValueError('No type found for %s' % attrs)
if type_ref:
self.__AddIfUnknown(type_ref)
# We don't actually know this is a message -- it might be an
# enum. However, we can't check that until we've created all the
# types, so we come back and fix this up later.
return TypeInfo(
type_name=type_ref, variant=messages.Variant.MESSAGE)
if 'enum' in attrs:
enum_name = '%sValuesEnum' % name_hint
return self.__DeclareEnum(enum_name, attrs)
if 'format' in attrs:
type_info = self.PRIMITIVE_FORMAT_MAP.get(attrs['format'])
if type_info is None:
# If we don't recognize the format, the spec says we fall back
# to just using the type name.
if type_name in self.PRIMITIVE_TYPE_INFO_MAP:
return self.PRIMITIVE_TYPE_INFO_MAP[type_name]
raise ValueError('Unknown type/format "%s"/"%s"' % (
attrs['format'], type_name))
if type_info.type_name.startswith((
'apitools.base.protorpclite.message_types.',
'message_types.')):
self.__AddImport(
'from %s import message_types as _message_types' %
self.__protorpc_package)
if type_info.type_name.startswith('extra_types.'):
self.__AddImport(
'from %s import extra_types' % self.__base_files_package)
return type_info
if type_name in self.PRIMITIVE_TYPE_INFO_MAP:
type_info = self.PRIMITIVE_TYPE_INFO_MAP[type_name]
if type_info.type_name.startswith('extra_types.'):
self.__AddImport(
'from %s import extra_types' % self.__base_files_package)
return type_info
if type_name == 'array':
items = attrs.get('items')
if not items:
raise ValueError('Array type with no item type: %s' % attrs)
entry_name_hint = self.__names.ClassName(
items.get('title') or '%sListEntry' % name_hint)
entry_label = self.__ComputeLabel(items)
if entry_label == descriptor.FieldDescriptor.Label.REPEATED:
parent_name = self.__names.ClassName(
items.get('title') or name_hint)
entry_type_name = self.__AddEntryType(
entry_name_hint, items.get('items'), parent_name)
return TypeInfo(type_name=entry_type_name,
variant=messages.Variant.MESSAGE)
return self.__GetTypeInfo(items, entry_name_hint)
elif type_name == 'any':
self.__AddImport('from %s import extra_types' %
self.__base_files_package)
return self.PRIMITIVE_TYPE_INFO_MAP['any']
elif type_name == 'object':
# TODO(craigcitro): Think of a better way to come up with names.
if not name_hint:
raise ValueError(
'Cannot create subtype without some name hint')
schema = dict(attrs)
schema['id'] = name_hint
self.AddDescriptorFromSchema(name_hint, schema)
self.__AddIfUnknown(name_hint)
return TypeInfo(
type_name=name_hint, variant=messages.Variant.MESSAGE)
raise ValueError('Unknown type: %s' % type_name)
def FixupMessageFields(self):
for message_type in self.file_descriptor.message_types:
self._FixupMessage(message_type)
def _FixupMessage(self, message_type):
with self.__DescriptorEnv(message_type):
for field in message_type.fields:
if field.field_descriptor.variant == messages.Variant.MESSAGE:
field_type_name = field.field_descriptor.type_name
field_type = self.LookupDescriptor(field_type_name)
if isinstance(field_type,
extended_descriptor.ExtendedEnumDescriptor):
field.field_descriptor.variant = messages.Variant.ENUM
for submessage_type in message_type.message_types:
self._FixupMessage(submessage_type)
|
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import sys
import os
import time
import tempfile
import re
import traceback
sys.path.insert(0, os.path.abspath(os.path.realpath(os.path.dirname(sys.argv[0]))))
from automation import Automation
from remoteautomation import RemoteAutomation
from runtests import Mochitest
from runtests import MochitestOptions
from runtests import MochitestServer
import devicemanager, devicemanagerADB, devicemanagerSUT
import manifestparser
class RemoteOptions(MochitestOptions):
def __init__(self, automation, scriptdir, **kwargs):
defaults = {}
MochitestOptions.__init__(self, automation, scriptdir)
self.add_option("--remote-app-path", action="store",
type = "string", dest = "remoteAppPath",
help = "Path to remote executable relative to device root using only forward slashes. Either this or app must be specified but not both")
defaults["remoteAppPath"] = None
self.add_option("--deviceIP", action="store",
type = "string", dest = "deviceIP",
help = "ip address of remote device to test")
defaults["deviceIP"] = None
self.add_option("--dm_trans", action="store",
type = "string", dest = "dm_trans",
help = "the transport to use to communicate with device: [adb|sut]; default=sut")
defaults["dm_trans"] = "sut"
self.add_option("--devicePort", action="store",
type = "string", dest = "devicePort",
help = "port of remote device to test")
defaults["devicePort"] = 20701
self.add_option("--remote-product-name", action="store",
type = "string", dest = "remoteProductName",
help = "The executable's name of remote product to test - either fennec or firefox, defaults to fennec")
defaults["remoteProductName"] = "fennec"
self.add_option("--remote-logfile", action="store",
type = "string", dest = "remoteLogFile",
help = "Name of log file on the device relative to the device root. PLEASE ONLY USE A FILENAME.")
defaults["remoteLogFile"] = None
self.add_option("--remote-webserver", action = "store",
type = "string", dest = "remoteWebServer",
help = "ip address where the remote web server is hosted at")
defaults["remoteWebServer"] = None
self.add_option("--http-port", action = "store",
type = "string", dest = "httpPort",
help = "http port of the remote web server")
defaults["httpPort"] = automation.DEFAULT_HTTP_PORT
self.add_option("--ssl-port", action = "store",
type = "string", dest = "sslPort",
help = "ssl port of the remote web server")
defaults["sslPort"] = automation.DEFAULT_SSL_PORT
self.add_option("--pidfile", action = "store",
type = "string", dest = "pidFile",
help = "name of the pidfile to generate")
defaults["pidFile"] = ""
self.add_option("--robocop", action = "store",
type = "string", dest = "robocop",
help = "name of the .ini file containing the list of tests to run")
defaults["robocop"] = ""
self.add_option("--robocop-path", action = "store",
type = "string", dest = "robocopPath",
help = "Path to the folder where robocop.apk is located at. Primarily used for ADB test running")
defaults["robocopPath"] = ""
self.add_option("--robocop-ids", action = "store",
type = "string", dest = "robocopIds",
help = "name of the file containing the view ID map (fennec_ids.txt)")
defaults["robocopIds"] = ""
defaults["remoteTestRoot"] = None
defaults["logFile"] = "mochitest.log"
defaults["autorun"] = True
defaults["closeWhenDone"] = True
defaults["testPath"] = ""
defaults["app"] = None
self.set_defaults(**defaults)
def verifyRemoteOptions(self, options, automation):
options.remoteTestRoot = automation._devicemanager.getDeviceRoot()
productRoot = options.remoteTestRoot + "/" + automation._product
if (options.utilityPath == self._automation.DIST_BIN):
options.utilityPath = productRoot + "/bin"
if options.remoteWebServer == None:
if os.name != "nt":
options.remoteWebServer = automation.getLanIp()
else:
print "ERROR: you must specify a --remote-webserver=<ip address>\n"
return None
options.webServer = options.remoteWebServer
if (options.deviceIP == None):
print "ERROR: you must provide a device IP"
return None
if (options.remoteLogFile == None):
options.remoteLogFile = options.remoteTestRoot + '/logs/mochitest.log'
if (options.remoteLogFile.count('/') < 1):
options.remoteLogFile = options.remoteTestRoot + '/' + options.remoteLogFile
# remoteAppPath or app must be specified to find the product to launch
if (options.remoteAppPath and options.app):
print "ERROR: You cannot specify both the remoteAppPath and the app setting"
return None
elif (options.remoteAppPath):
options.app = options.remoteTestRoot + "/" + options.remoteAppPath
elif (options.app == None):
# Neither remoteAppPath nor app are set -- error
print "ERROR: You must specify either appPath or app"
return None
# Only reset the xrePath if it wasn't provided
if (options.xrePath == None):
if (automation._product == "fennec"):
options.xrePath = productRoot + "/xulrunner"
else:
options.xrePath = options.utilityPath
if (options.pidFile != ""):
f = open(options.pidFile, 'w')
f.write("%s" % os.getpid())
f.close()
# Robocop specific options
if options.robocop != "":
if not os.path.exists(options.robocop):
print "ERROR: Unable to find specified manifest '%s'" % options.robocop
return None
options.robocop = os.path.abspath(options.robocop)
if options.robocopPath != "":
if not os.path.exists(os.path.join(options.robocopPath, 'robocop.apk')):
print "ERROR: Unable to find robocop.apk in path '%s'" % options.robocopPath
return None
options.robocopPath = os.path.abspath(options.robocopPath)
if options.robocopIds != "":
if not os.path.exists(options.robocopIds):
print "ERROR: Unable to find specified IDs file '%s'" % options.robocopIds
return None
options.robocopIds = os.path.abspath(options.robocopIds)
return options
def verifyOptions(self, options, mochitest):
# since we are reusing verifyOptions, it will exit if App is not found
temp = options.app
options.app = sys.argv[0]
tempPort = options.httpPort
tempSSL = options.sslPort
tempIP = options.webServer
options = MochitestOptions.verifyOptions(self, options, mochitest)
options.webServer = tempIP
options.app = temp
options.sslPort = tempSSL
options.httpPort = tempPort
return options
class MochiRemote(Mochitest):
_automation = None
_dm = None
localProfile = None
logLines = []
def __init__(self, automation, devmgr, options):
self._automation = automation
Mochitest.__init__(self, self._automation)
self._dm = devmgr
self.runSSLTunnel = False
self.remoteProfile = options.remoteTestRoot + "/profile"
self._automation.setRemoteProfile(self.remoteProfile)
self.remoteLog = options.remoteLogFile
self.localLog = options.logFile
def cleanup(self, manifest, options):
if self._dm.fileExists(self.remoteLog):
self._dm.getFile(self.remoteLog, self.localLog)
self._dm.removeFile(self.remoteLog)
else:
print "WARNING: Unable to retrieve log file (%s) from remote " \
"device" % self.remoteLog
self._dm.removeDir(self.remoteProfile)
if (options.pidFile != ""):
try:
os.remove(options.pidFile)
os.remove(options.pidFile + ".xpcshell.pid")
except:
print "Warning: cleaning up pidfile '%s' was unsuccessful from the test harness" % options.pidFile
def findPath(self, paths, filename = None):
for path in paths:
p = path
if filename:
p = os.path.join(p, filename)
if os.path.exists(self.getFullPath(p)):
return path
return None
def startWebServer(self, options):
""" Create the webserver on the host and start it up """
remoteXrePath = options.xrePath
remoteProfilePath = options.profilePath
remoteUtilityPath = options.utilityPath
localAutomation = Automation()
localAutomation.IS_WIN32 = False
localAutomation.IS_LINUX = False
localAutomation.IS_MAC = False
localAutomation.UNIXISH = False
hostos = sys.platform
if (hostos == 'mac' or hostos == 'darwin'):
localAutomation.IS_MAC = True
elif (hostos == 'linux' or hostos == 'linux2'):
localAutomation.IS_LINUX = True
localAutomation.UNIXISH = True
elif (hostos == 'win32' or hostos == 'win64'):
localAutomation.BIN_SUFFIX = ".exe"
localAutomation.IS_WIN32 = True
paths = [options.xrePath, localAutomation.DIST_BIN, self._automation._product, os.path.join('..', self._automation._product)]
options.xrePath = self.findPath(paths)
if options.xrePath == None:
print "ERROR: unable to find xulrunner path for %s, please specify with --xre-path" % (os.name)
sys.exit(1)
paths.append("bin")
paths.append(os.path.join("..", "bin"))
xpcshell = "xpcshell"
if (os.name == "nt"):
xpcshell += ".exe"
if (options.utilityPath):
paths.insert(0, options.utilityPath)
options.utilityPath = self.findPath(paths, xpcshell)
if options.utilityPath == None:
print "ERROR: unable to find utility path for %s, please specify with --utility-path" % (os.name)
sys.exit(1)
options.profilePath = tempfile.mkdtemp()
self.server = MochitestServer(localAutomation, options)
self.server.start()
if (options.pidFile != ""):
f = open(options.pidFile + ".xpcshell.pid", 'w')
f.write("%s" % self.server._process.pid)
f.close()
self.server.ensureReady(self.SERVER_STARTUP_TIMEOUT)
options.xrePath = remoteXrePath
options.utilityPath = remoteUtilityPath
options.profilePath = remoteProfilePath
def stopWebServer(self, options):
self.server.stop()
def buildProfile(self, options):
if self.localProfile:
options.profilePath = self.localProfile
manifest = Mochitest.buildProfile(self, options)
self.localProfile = options.profilePath
self._dm.removeDir(self.remoteProfile)
try:
self._dm.pushDir(options.profilePath, self.remoteProfile)
except devicemanager.DMError:
print "Automation Error: Unable to copy profile to device."
raise
options.profilePath = self.remoteProfile
return manifest
def buildURLOptions(self, options, env):
self.localLog = options.logFile
options.logFile = self.remoteLog
options.profilePath = self.localProfile
retVal = Mochitest.buildURLOptions(self, options, env)
#we really need testConfig.js (for browser chrome)
try:
self._dm.pushDir(options.profilePath, self.remoteProfile)
except devicemanager.DMError:
print "Automation Error: Unable to copy profile to device."
raise
options.profilePath = self.remoteProfile
options.logFile = self.localLog
return retVal
def installChromeFile(self, filename, options):
parts = options.app.split('/')
if (parts[0] == options.app):
return "NO_CHROME_ON_DROID"
path = '/'.join(parts[:-1])
manifest = path + "/chrome/" + os.path.basename(filename)
try:
self._dm.pushFile(filename, manifest)
except devicemanager.DMError:
print "Automation Error: Unable to install Chrome files on device."
raise
return manifest
def getLogFilePath(self, logFile):
return logFile
# In the future we could use LogParser: http://hg.mozilla.org/automation/logparser/
def addLogData(self):
with open(self.localLog) as currentLog:
data = currentLog.readlines()
restart = re.compile('0 INFO SimpleTest START.*')
reend = re.compile('([0-9]+) INFO TEST-START . Shutdown.*')
start_found = False
end_found = False
for line in data:
if reend.match(line):
end_found = True
start_found = False
return
if start_found and not end_found:
# Append the line without the number to increment
self.logLines.append(' '.join(line.split(' ')[1:]))
if restart.match(line):
start_found = True
def printLog(self):
passed = 0
failed = 0
todo = 0
incr = 1
logFile = []
logFile.append("0 INFO SimpleTest START")
for line in self.logLines:
if line.startswith("INFO TEST-PASS"):
passed += 1
elif line.startswith("INFO TEST-UNEXPECTED"):
failed += 1
elif line.startswith("INFO TEST-KNOWN"):
todo += 1
incr += 1
logFile.append("%s INFO TEST-START | Shutdown" % incr)
incr += 1
logFile.append("%s INFO Passed: %s" % (incr, passed))
incr += 1
logFile.append("%s INFO Failed: %s" % (incr, failed))
incr += 1
logFile.append("%s INFO Todo: %s" % (incr, todo))
incr += 1
logFile.append("%s INFO SimpleTest FINISHED" % incr)
# TODO: Consider not printing to stdout because we might be duplicating output
print '\n'.join(logFile)
with open(self.localLog, 'w') as localLog:
localLog.write('\n'.join(logFile))
if failed > 0:
return 1
return 0
def main():
scriptdir = os.path.abspath(os.path.realpath(os.path.dirname(__file__)))
auto = RemoteAutomation(None, "fennec")
parser = RemoteOptions(auto, scriptdir)
options, args = parser.parse_args()
if (options.dm_trans == "adb"):
if (options.deviceIP):
dm = devicemanagerADB.DeviceManagerADB(options.deviceIP, options.devicePort)
else:
dm = devicemanagerADB.DeviceManagerADB()
else:
dm = devicemanagerSUT.DeviceManagerSUT(options.deviceIP, options.devicePort)
auto.setDeviceManager(dm)
options = parser.verifyRemoteOptions(options, auto)
if (options == None):
print "ERROR: Invalid options specified, use --help for a list of valid options"
sys.exit(1)
productPieces = options.remoteProductName.split('.')
if (productPieces != None):
auto.setProduct(productPieces[0])
else:
auto.setProduct(options.remoteProductName)
mochitest = MochiRemote(auto, dm, options)
options = parser.verifyOptions(options, mochitest)
if (options == None):
sys.exit(1)
logParent = os.path.dirname(options.remoteLogFile)
dm.mkDir(logParent);
auto.setRemoteLog(options.remoteLogFile)
auto.setServerInfo(options.webServer, options.httpPort, options.sslPort)
print dm.getInfo()
procName = options.app.split('/')[-1]
if (dm.processExist(procName)):
dm.killProcess(procName)
if options.robocop != "":
mp = manifestparser.TestManifest(strict=False)
# TODO: pull this in dynamically
mp.read(options.robocop)
robocop_tests = mp.active_tests(exists=False)
fHandle = tempfile.NamedTemporaryFile(suffix='.config',
prefix='robotium-',
dir=os.getcwd(),
delete=False)
fHandle.write("profile=%s\n" % (mochitest.remoteProfile))
fHandle.write("logfile=%s\n" % (options.remoteLogFile))
fHandle.write("host=http://mochi.test:8888/tests\n")
fHandle.write("rawhost=http://%s:%s/tests\n" % (options.remoteWebServer, options.httpPort))
fHandle.close()
deviceRoot = dm.getDeviceRoot()
dm.removeFile(os.path.join(deviceRoot, "fennec_ids.txt"))
dm.removeFile(os.path.join(deviceRoot, "robotium.config"))
dm.pushFile(fHandle.name, os.path.join(deviceRoot, "robotium.config"))
os.unlink(fHandle.name)
fennec_ids = os.path.abspath("fennec_ids.txt")
if not os.path.exists(fennec_ids) and options.robocopIds:
fennec_ids = options.robocopIds
dm.pushFile(fennec_ids, os.path.join(deviceRoot, "fennec_ids.txt"))
options.extraPrefs.append('robocop.logfile="%s/robocop.log"' % deviceRoot)
options.extraPrefs.append('browser.search.suggest.enabled=true')
options.extraPrefs.append('browser.search.suggest.prompted=true')
if (options.dm_trans == 'adb' and options.robocopPath):
dm._checkCmd(["install", "-r", os.path.join(options.robocopPath, "robocop.apk")])
appname = options.app
retVal = None
logcat = []
for test in robocop_tests:
if options.testPath and options.testPath != test['name']:
continue
options.app = "am"
options.browserArgs = ["instrument", "-w", "-e", "deviceroot", deviceRoot, "-e", "class"]
options.browserArgs.append("%s.tests.%s" % (appname, test['name']))
options.browserArgs.append("org.mozilla.roboexample.test/%s.FennecInstrumentationTestRunner" % appname)
try:
dm.recordLogcat()
retVal = mochitest.runTests(options)
logcat = dm.getLogcat()
mochitest.addLogData()
except:
print "Automation Error: Exception caught while running tests"
traceback.print_exc()
mochitest.stopWebServer(options)
mochitest.stopWebSocketServer(options)
try:
self.cleanup(None, options)
except:
pass
sys.exit(1)
if retVal is None:
print "No tests run. Did you pass an invalid TEST_PATH?"
retVal = 1
retVal = mochitest.printLog()
else:
try:
dm.recordLogcat()
retVal = mochitest.runTests(options)
logcat = dm.getLogcat()
except:
print "Automation Error: Exception caught while running tests"
traceback.print_exc()
mochitest.stopWebServer(options)
mochitest.stopWebSocketServer(options)
try:
self.cleanup(None, options)
except:
pass
sys.exit(1)
print ''.join(logcat[-500:-1])
print dm.getInfo()
sys.exit(retVal)
if __name__ == "__main__":
main()
|
|
"""Config flow for Plex."""
import copy
import logging
from aiohttp import web_response
import plexapi.exceptions
from plexapi.gdm import GDM
from plexauth import PlexAuth
import requests.exceptions
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components.http.view import HomeAssistantView
from homeassistant.components.media_player import DOMAIN as MP_DOMAIN
from homeassistant.const import (
CONF_CLIENT_ID,
CONF_HOST,
CONF_PORT,
CONF_SSL,
CONF_TOKEN,
CONF_URL,
CONF_VERIFY_SSL,
)
from homeassistant.core import callback
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.network import get_url
from .const import ( # pylint: disable=unused-import
AUTH_CALLBACK_NAME,
AUTH_CALLBACK_PATH,
AUTOMATIC_SETUP_STRING,
CONF_IGNORE_NEW_SHARED_USERS,
CONF_IGNORE_PLEX_WEB_CLIENTS,
CONF_MONITORED_USERS,
CONF_SERVER,
CONF_SERVER_IDENTIFIER,
CONF_USE_EPISODE_ART,
DEFAULT_PORT,
DEFAULT_SSL,
DEFAULT_VERIFY_SSL,
DOMAIN,
MANUAL_SETUP_STRING,
PLEX_SERVER_CONFIG,
SERVERS,
X_PLEX_DEVICE_NAME,
X_PLEX_PLATFORM,
X_PLEX_PRODUCT,
X_PLEX_VERSION,
)
from .errors import NoServersFound, ServerNotSpecified
from .server import PlexServer
_LOGGER = logging.getLogger(__package__)
@callback
def configured_servers(hass):
"""Return a set of the configured Plex servers."""
return {
entry.data[CONF_SERVER_IDENTIFIER]
for entry in hass.config_entries.async_entries(DOMAIN)
}
async def async_discover(hass):
"""Scan for available Plex servers."""
gdm = GDM()
await hass.async_add_executor_job(gdm.scan)
for server_data in gdm.entries:
await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_INTEGRATION_DISCOVERY},
data=server_data,
)
class PlexFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a Plex config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return PlexOptionsFlowHandler(config_entry)
def __init__(self):
"""Initialize the Plex flow."""
self.current_login = {}
self.available_servers = None
self.plexauth = None
self.token = None
self.client_id = None
self._manual = False
async def async_step_user(
self, user_input=None, errors=None
): # pylint: disable=arguments-differ
"""Handle a flow initialized by the user."""
if user_input is not None:
return await self.async_step_plex_website_auth()
if self.show_advanced_options:
return await self.async_step_user_advanced(errors=errors)
return self.async_show_form(step_id="user", errors=errors)
async def async_step_user_advanced(self, user_input=None, errors=None):
"""Handle an advanced mode flow initialized by the user."""
if user_input is not None:
if user_input.get("setup_method") == MANUAL_SETUP_STRING:
self._manual = True
return await self.async_step_manual_setup()
return await self.async_step_plex_website_auth()
data_schema = vol.Schema(
{
vol.Required("setup_method", default=AUTOMATIC_SETUP_STRING): vol.In(
[AUTOMATIC_SETUP_STRING, MANUAL_SETUP_STRING]
)
}
)
return self.async_show_form(
step_id="user_advanced", data_schema=data_schema, errors=errors
)
async def async_step_manual_setup(self, user_input=None, errors=None):
"""Begin manual configuration."""
if user_input is not None and errors is None:
user_input.pop(CONF_URL, None)
host = user_input.get(CONF_HOST)
if host:
port = user_input[CONF_PORT]
prefix = "https" if user_input.get(CONF_SSL) else "http"
user_input[CONF_URL] = f"{prefix}://{host}:{port}"
elif CONF_TOKEN not in user_input:
return await self.async_step_manual_setup(
user_input=user_input, errors={"base": "host_or_token"}
)
return await self.async_step_server_validate(user_input)
previous_input = user_input or {}
data_schema = vol.Schema(
{
vol.Optional(
CONF_HOST,
description={"suggested_value": previous_input.get(CONF_HOST)},
): str,
vol.Required(
CONF_PORT, default=previous_input.get(CONF_PORT, DEFAULT_PORT)
): int,
vol.Required(
CONF_SSL, default=previous_input.get(CONF_SSL, DEFAULT_SSL)
): bool,
vol.Required(
CONF_VERIFY_SSL,
default=previous_input.get(CONF_VERIFY_SSL, DEFAULT_VERIFY_SSL),
): bool,
vol.Optional(
CONF_TOKEN,
description={"suggested_value": previous_input.get(CONF_TOKEN)},
): str,
}
)
return self.async_show_form(
step_id="manual_setup", data_schema=data_schema, errors=errors
)
async def async_step_server_validate(self, server_config):
"""Validate a provided configuration."""
errors = {}
self.current_login = server_config
plex_server = PlexServer(self.hass, server_config)
try:
await self.hass.async_add_executor_job(plex_server.connect)
except NoServersFound:
_LOGGER.error("No servers linked to Plex account")
errors["base"] = "no_servers"
except (plexapi.exceptions.BadRequest, plexapi.exceptions.Unauthorized):
_LOGGER.error("Invalid credentials provided, config not created")
errors[CONF_TOKEN] = "faulty_credentials"
except requests.exceptions.SSLError as error:
_LOGGER.error("SSL certificate error: [%s]", error)
errors["base"] = "ssl_error"
except (plexapi.exceptions.NotFound, requests.exceptions.ConnectionError):
server_identifier = (
server_config.get(CONF_URL) or plex_server.server_choice or "Unknown"
)
_LOGGER.error("Plex server could not be reached: %s", server_identifier)
errors[CONF_HOST] = "not_found"
except ServerNotSpecified as available_servers:
self.available_servers = available_servers.args[0]
return await self.async_step_select_server()
except Exception as error: # pylint: disable=broad-except
_LOGGER.exception("Unknown error connecting to Plex server: %s", error)
return self.async_abort(reason="unknown")
if errors:
if self._manual:
return await self.async_step_manual_setup(
user_input=server_config, errors=errors
)
return await self.async_step_user(errors=errors)
server_id = plex_server.machine_identifier
await self.async_set_unique_id(server_id)
self._abort_if_unique_id_configured()
url = plex_server.url_in_use
token = server_config.get(CONF_TOKEN)
entry_config = {CONF_URL: url}
if self.client_id:
entry_config[CONF_CLIENT_ID] = self.client_id
if token:
entry_config[CONF_TOKEN] = token
if url.startswith("https"):
entry_config[CONF_VERIFY_SSL] = server_config.get(
CONF_VERIFY_SSL, DEFAULT_VERIFY_SSL
)
_LOGGER.debug("Valid config created for %s", plex_server.friendly_name)
return self.async_create_entry(
title=plex_server.friendly_name,
data={
CONF_SERVER: plex_server.friendly_name,
CONF_SERVER_IDENTIFIER: server_id,
PLEX_SERVER_CONFIG: entry_config,
},
)
async def async_step_select_server(self, user_input=None):
"""Use selected Plex server."""
config = dict(self.current_login)
if user_input is not None:
config[CONF_SERVER] = user_input[CONF_SERVER]
return await self.async_step_server_validate(config)
configured = configured_servers(self.hass)
available_servers = [
name
for (name, server_id) in self.available_servers
if server_id not in configured
]
if not available_servers:
return self.async_abort(reason="all_configured")
if len(available_servers) == 1:
config[CONF_SERVER] = available_servers[0]
return await self.async_step_server_validate(config)
return self.async_show_form(
step_id="select_server",
data_schema=vol.Schema(
{vol.Required(CONF_SERVER): vol.In(available_servers)}
),
errors={},
)
async def async_step_integration_discovery(self, discovery_info):
"""Handle GDM discovery."""
machine_identifier = discovery_info["data"]["Resource-Identifier"]
await self.async_set_unique_id(machine_identifier)
self._abort_if_unique_id_configured()
host = f"{discovery_info['from'][0]}:{discovery_info['data']['Port']}"
name = discovery_info["data"]["Name"]
self.context["title_placeholders"] = { # pylint: disable=no-member
"host": host,
"name": name,
}
return await self.async_step_user()
async def async_step_plex_website_auth(self):
"""Begin external auth flow on Plex website."""
self.hass.http.register_view(PlexAuthorizationCallbackView)
payload = {
"X-Plex-Device-Name": X_PLEX_DEVICE_NAME,
"X-Plex-Version": X_PLEX_VERSION,
"X-Plex-Product": X_PLEX_PRODUCT,
"X-Plex-Device": self.hass.config.location_name,
"X-Plex-Platform": X_PLEX_PLATFORM,
"X-Plex-Model": "Plex OAuth",
}
session = async_get_clientsession(self.hass)
self.plexauth = PlexAuth(payload, session)
await self.plexauth.initiate_auth()
forward_url = f"{get_url(self.hass)}{AUTH_CALLBACK_PATH}?flow_id={self.flow_id}"
auth_url = self.plexauth.auth_url(forward_url)
return self.async_external_step(step_id="obtain_token", url=auth_url)
async def async_step_obtain_token(self, user_input=None):
"""Obtain token after external auth completed."""
token = await self.plexauth.token(10)
if not token:
return self.async_external_step_done(next_step_id="timed_out")
self.token = token
self.client_id = self.plexauth.client_identifier
return self.async_external_step_done(next_step_id="use_external_token")
async def async_step_timed_out(self, user_input=None):
"""Abort flow when time expires."""
return self.async_abort(reason="token_request_timeout")
async def async_step_use_external_token(self, user_input=None):
"""Continue server validation with external token."""
server_config = {CONF_TOKEN: self.token}
return await self.async_step_server_validate(server_config)
class PlexOptionsFlowHandler(config_entries.OptionsFlow):
"""Handle Plex options."""
def __init__(self, config_entry):
"""Initialize Plex options flow."""
self.options = copy.deepcopy(dict(config_entry.options))
self.server_id = config_entry.data[CONF_SERVER_IDENTIFIER]
async def async_step_init(self, user_input=None):
"""Manage the Plex options."""
return await self.async_step_plex_mp_settings()
async def async_step_plex_mp_settings(self, user_input=None):
"""Manage the Plex media_player options."""
plex_server = self.hass.data[DOMAIN][SERVERS][self.server_id]
if user_input is not None:
self.options[MP_DOMAIN][CONF_USE_EPISODE_ART] = user_input[
CONF_USE_EPISODE_ART
]
self.options[MP_DOMAIN][CONF_IGNORE_NEW_SHARED_USERS] = user_input[
CONF_IGNORE_NEW_SHARED_USERS
]
self.options[MP_DOMAIN][CONF_IGNORE_PLEX_WEB_CLIENTS] = user_input[
CONF_IGNORE_PLEX_WEB_CLIENTS
]
account_data = {
user: {"enabled": bool(user in user_input[CONF_MONITORED_USERS])}
for user in plex_server.accounts
}
self.options[MP_DOMAIN][CONF_MONITORED_USERS] = account_data
return self.async_create_entry(title="", data=self.options)
available_accounts = {name: name for name in plex_server.accounts}
available_accounts[plex_server.owner] += " [Owner]"
default_accounts = plex_server.accounts
known_accounts = set(plex_server.option_monitored_users)
if known_accounts:
default_accounts = {
user
for user in plex_server.option_monitored_users
if plex_server.option_monitored_users[user]["enabled"]
}
for user in plex_server.accounts:
if user not in known_accounts:
available_accounts[user] += " [New]"
if not plex_server.option_ignore_new_shared_users:
for new_user in plex_server.accounts - known_accounts:
default_accounts.add(new_user)
return self.async_show_form(
step_id="plex_mp_settings",
data_schema=vol.Schema(
{
vol.Required(
CONF_USE_EPISODE_ART,
default=plex_server.option_use_episode_art,
): bool,
vol.Optional(
CONF_MONITORED_USERS, default=default_accounts
): cv.multi_select(available_accounts),
vol.Required(
CONF_IGNORE_NEW_SHARED_USERS,
default=plex_server.option_ignore_new_shared_users,
): bool,
vol.Required(
CONF_IGNORE_PLEX_WEB_CLIENTS,
default=plex_server.option_ignore_plexweb_clients,
): bool,
}
),
)
class PlexAuthorizationCallbackView(HomeAssistantView):
"""Handle callback from external auth."""
url = AUTH_CALLBACK_PATH
name = AUTH_CALLBACK_NAME
requires_auth = False
async def get(self, request):
"""Receive authorization confirmation."""
hass = request.app["hass"]
await hass.config_entries.flow.async_configure(
flow_id=request.query["flow_id"], user_input=None
)
return web_response.Response(
headers={"content-type": "text/html"},
text="<script>window.close()</script>Success! This window can be closed",
)
|
|
import boto
import six
class FieldLists():
ADDRESS = [
'allocation_id',
'association_id',
'domain',
'instance_id',
'network_interface_id',
'network_interface_owner_id',
'private_ip_address',
'public_ip'
]
BLOCK_DEVICE_TYPE = [
'attach_time',
'delete_on_termination',
'encrypted',
'ephemeral_name',
'iops',
'size',
'snapshot_id',
'status',
'volume_id',
'volume_type'
]
BUCKET = [
'connection',
'creation_date',
'LoggingGroup',
'name'
]
EC2ZONE = [
'messages',
'name',
'region_name',
'state'
]
INSTANCE = [
'ami_launch_index',
'architecture',
'hypervisor',
'id',
'image_id',
'instance_type',
'ip_address',
'kernel',
'key_name',
'launch_time',
'monitored',
'monitoring_state',
'placement',
'placement_group',
'placement_tenancy',
'platform',
'previous_state',
'previous_state_code',
'private_dns_name',
'private_ip_address',
'public_dns_name',
'ramdisk',
'root_device_name',
'root_device_type',
'spot_instance_request_id',
'state',
'state_code',
'state_reason',
'subnet_id',
'virtualization_type',
'vpc_id',
]
RECORD = [
'alias_dns_name',
'alias_evaluate_target_health',
'alias_hosted_zone_id',
'failover',
'health_check',
'identifier',
'name',
'region',
'resource_records',
'ttl',
'type',
'weight'
]
R53ZONE = [
'callerreference',
'config',
'id',
'name',
'resourcerecordsetcount'
]
R53STATUS = [
'comment',
'id',
'status',
'submittedat'
]
VOLUME = [
'create_time',
'encrypted',
'id',
'iops',
'size',
'snapshot_id',
'status',
'type',
'zone'
]
class ResultSets(object):
def __init__(self):
self.foo = ''
def selector(self, output):
if isinstance(output, boto.ec2.instance.Reservation):
return self.parseReservation(output)
elif isinstance(output, boto.ec2.instance.Instance):
return self.parseInstance(output)
elif isinstance(output, boto.ec2.volume.Volume):
return self.parseVolume(output)
elif isinstance(output, boto.ec2.blockdevicemapping.BlockDeviceType):
return self.parseBlockDeviceType(output)
elif isinstance(output, boto.ec2.zone.Zone):
return self.parseEC2Zone(output)
elif isinstance(output, boto.ec2.address.Address):
return self.parseAddress(output)
elif isinstance(output, boto.route53.record.Record):
return self.parseRecord(output)
elif isinstance(output, boto.route53.zone.Zone):
return self.parseR53Zone(output)
elif isinstance(output, boto.route53.status.Status):
return self.parseR53Status(output)
elif isinstance(output, boto.ec2.ec2object.EC2Object):
return self.parseEC2Object(output)
else:
return output
def formatter(self, output):
if isinstance(output, list):
return [self.formatter(item) for item in output]
elif isinstance(output, dict):
return {key: self.formatter(value) for key, value in six.iteritems(output)}
else:
return self.selector(output)
def parseReservation(self, output):
instance_list = []
for instance in output.instances:
instance_data = self.parseInstance(instance)
instance_data['owner_id'] = output.owner_id
instance_list.append(instance_data)
return instance_list
def parseAddress(self, output):
instance_data = {field: getattr(output, field) for field in FieldLists.ADDRESS}
return instance_data
def parseInstance(self, output):
instance_data = {field: getattr(output, field) for field in FieldLists.INSTANCE}
return instance_data
def parseVolume(self, output):
volume_data = {field: getattr(output, field) for field in FieldLists.VOLUME}
return volume_data
def parseBlockDeviceType(self, output):
data = {field: getattr(output, field) for field in FieldLists.BLOCK_DEVICE_TYPE}
return data
def parseEC2Zone(self, output):
zone_data = {field: getattr(output, field) for field in FieldLists.EC2ZONE}
return zone_data
def parseRecord(self, output):
record_data = {field: getattr(output, field) for field in FieldLists.RECORD}
return record_data
def parseR53Zone(self, output):
zone_data = {field: getattr(output, field) for field in FieldLists.R53ZONE}
return zone_data
def parseR53Status(self, output):
status_data = {field: getattr(output, field) for field in FieldLists.R53STATUS}
return status_data
def parseBucket(self, output):
bucket_data = {field: getattr(output, field) for field in FieldLists.BUCKET}
return bucket_data
def parseEC2Object(self, output):
# Looks like everything that is an EC2Object pretty much only has these extra
# 'unparseable' properties so handle region and connection specially.
output = vars(output)
del output['connection']
# special handling for region since name here is better than id.
region = output.get('region', None)
output['region'] = region.name if region else ''
# now anything that is an EC2Object get some special marshalling care.
for k, v in six.iteritems(output):
if isinstance(v, boto.ec2.ec2object.EC2Object):
# Better not to assume each EC2Object has an id. If not found
# resort to the str of the object which should have something meaningful.
output[k] = getattr(v, 'id', str(v))
# Generally unmarshallable object might be hiding in list so better to
if isinstance(v, list):
v_list = []
for item in v:
# avoid touching the basic types.
if isinstance(item, (basestring, bool, int, long, float)):
v_list.append(v)
else:
v_list.append(str(item))
output[k] = v_list
return output
|
|
import math
import operator
import sys
import pickle
import multiprocessing
import ctypes
import warnings
from distutils.version import LooseVersion
import re
import numpy as np
from numba import njit, jit, vectorize, guvectorize, objmode
from numba.core import types, errors, typing, compiler, cgutils
from numba.core.typed_passes import type_inference_stage
from numba.core.registry import cpu_target
from numba.core.compiler import compile_isolated
from numba.tests.support import (
TestCase,
captured_stdout,
temp_directory,
override_config,
run_in_new_process_in_cache_dir,
skip_if_typeguard,
)
from numba.core.errors import LoweringError
import unittest
from numba.extending import (
typeof_impl,
type_callable,
lower_builtin,
lower_cast,
overload,
overload_attribute,
overload_method,
models,
register_model,
box,
unbox,
NativeValue,
intrinsic,
_Intrinsic,
register_jitable,
get_cython_function_address,
is_jitted,
overload_classmethod,
)
from numba.core.typing.templates import (
ConcreteTemplate,
signature,
infer,
infer_global,
AbstractTemplate,
)
# Pandas-like API implementation
from .pdlike_usecase import Index, Series
try:
import scipy
if LooseVersion(scipy.__version__) < "0.19":
sc = None
else:
import scipy.special.cython_special as sc
except ImportError:
sc = None
# -----------------------------------------------------------------------
# Define a custom type and an implicit cast on it
class MyDummy(object):
pass
class MyDummyType(types.Opaque):
def can_convert_to(self, context, toty):
if isinstance(toty, types.Number):
from numba.core.typeconv import Conversion
return Conversion.safe
mydummy_type = MyDummyType("mydummy")
mydummy = MyDummy()
@typeof_impl.register(MyDummy)
def typeof_mydummy(val, c):
return mydummy_type
@lower_cast(MyDummyType, types.Number)
def mydummy_to_number(context, builder, fromty, toty, val):
"""
Implicit conversion from MyDummy to int.
"""
return context.get_constant(toty, 42)
def get_dummy():
return mydummy
register_model(MyDummyType)(models.OpaqueModel)
@unbox(MyDummyType)
def unbox_index(typ, obj, c):
return NativeValue(c.context.get_dummy_value())
# -----------------------------------------------------------------------
# Define a second custom type but w/o implicit cast to Number
def base_dummy_type_factory(name):
class DynType(object):
pass
class DynTypeType(types.Opaque):
pass
dyn_type_type = DynTypeType(name)
@typeof_impl.register(DynType)
def typeof_mydummy(val, c):
return dyn_type_type
register_model(DynTypeType)(models.OpaqueModel)
return DynTypeType, DynType, dyn_type_type
MyDummyType2, MyDummy2, mydummy_type_2 = base_dummy_type_factory("mydummy2")
@unbox(MyDummyType2)
def unbox_index2(typ, obj, c):
return NativeValue(c.context.get_dummy_value())
# -----------------------------------------------------------------------
# Define a function's typing and implementation using the classical
# two-step API
def func1(x=None):
raise NotImplementedError
def type_func1_(context):
def typer(x=None):
if x in (None, types.none):
# 0-arg or 1-arg with None
return types.int32
elif isinstance(x, types.Float):
# 1-arg with float
return x
return typer
type_func1 = type_callable(func1)(type_func1_)
@lower_builtin(func1)
@lower_builtin(func1, types.none)
def func1_nullary(context, builder, sig, args):
return context.get_constant(sig.return_type, 42)
@lower_builtin(func1, types.Float)
def func1_unary(context, builder, sig, args):
def func1_impl(x):
return math.sqrt(2 * x)
return context.compile_internal(builder, func1_impl, sig, args)
# We can do the same for a known internal operation, here "print_item"
# which we extend to support MyDummyType.
@infer
class PrintDummy(ConcreteTemplate):
key = "print_item"
cases = [signature(types.none, mydummy_type)]
@lower_builtin("print_item", MyDummyType)
def print_dummy(context, builder, sig, args):
[x] = args
pyapi = context.get_python_api(builder)
strobj = pyapi.unserialize(pyapi.serialize_object("hello!"))
pyapi.print_object(strobj)
pyapi.decref(strobj)
return context.get_dummy_value()
# -----------------------------------------------------------------------
# Define an overloaded function (combined API)
def where(cond, x, y):
raise NotImplementedError
def np_where(cond, x, y):
"""
Wrap np.where() to allow for keyword arguments
"""
return np.where(cond, x, y)
def call_where(cond, x, y):
return where(cond, y=y, x=x)
@overload(where)
def overload_where_arrays(cond, x, y):
"""
Implement where() for arrays.
"""
# Choose implementation based on argument types.
if isinstance(cond, types.Array):
if x.dtype != y.dtype:
raise errors.TypingError("x and y should have the same dtype")
# Array where() => return an array of the same shape
if all(ty.layout == "C" for ty in (cond, x, y)):
def where_impl(cond, x, y):
"""
Fast implementation for C-contiguous arrays
"""
shape = cond.shape
if x.shape != shape or y.shape != shape:
raise ValueError("all inputs should have the same shape")
res = np.empty_like(x)
cf = cond.flat
xf = x.flat
yf = y.flat
rf = res.flat
for i in range(cond.size):
rf[i] = xf[i] if cf[i] else yf[i]
return res
else:
def where_impl(cond, x, y):
"""
Generic implementation for other arrays
"""
shape = cond.shape
if x.shape != shape or y.shape != shape:
raise ValueError("all inputs should have the same shape")
res = np.empty_like(x)
for idx, c in np.ndenumerate(cond):
res[idx] = x[idx] if c else y[idx]
return res
return where_impl
# We can define another overload function for the same function, they
# will be tried in turn until one succeeds.
@overload(where)
def overload_where_scalars(cond, x, y):
"""
Implement where() for scalars.
"""
if not isinstance(cond, types.Array):
if x != y:
raise errors.TypingError("x and y should have the same type")
def where_impl(cond, x, y):
"""
Scalar where() => return a 0-dim array
"""
scal = x if cond else y
# Can't use full_like() on Numpy < 1.8
arr = np.empty_like(scal)
arr[()] = scal
return arr
return where_impl
# -----------------------------------------------------------------------
# Overload an already defined built-in function, extending it for new types.
@overload(len)
def overload_len_dummy(arg):
if isinstance(arg, MyDummyType):
def len_impl(arg):
return 13
return len_impl
@overload(operator.add)
def overload_add_dummy(arg1, arg2):
if isinstance(arg1, (MyDummyType, MyDummyType2)) and isinstance(
arg2, (MyDummyType, MyDummyType2)
):
def dummy_add_impl(arg1, arg2):
return 42
return dummy_add_impl
@overload(operator.delitem)
def overload_dummy_delitem(obj, idx):
if isinstance(obj, MyDummyType) and isinstance(idx, types.Integer):
def dummy_delitem_impl(obj, idx):
print("del", obj, idx)
return dummy_delitem_impl
@overload(operator.getitem)
def overload_dummy_getitem(obj, idx):
if isinstance(obj, MyDummyType) and isinstance(idx, types.Integer):
def dummy_getitem_impl(obj, idx):
return idx + 123
return dummy_getitem_impl
@overload(operator.setitem)
def overload_dummy_setitem(obj, idx, val):
if all(
[
isinstance(obj, MyDummyType),
isinstance(idx, types.Integer),
isinstance(val, types.Integer),
]
):
def dummy_setitem_impl(obj, idx, val):
print(idx, val)
return dummy_setitem_impl
def call_add_operator(arg1, arg2):
return operator.add(arg1, arg2)
def call_add_binop(arg1, arg2):
return arg1 + arg2
@overload(operator.iadd)
def overload_iadd_dummy(arg1, arg2):
if isinstance(arg1, (MyDummyType, MyDummyType2)) and isinstance(
arg2, (MyDummyType, MyDummyType2)
):
def dummy_iadd_impl(arg1, arg2):
return 42
return dummy_iadd_impl
def call_iadd_operator(arg1, arg2):
return operator.add(arg1, arg2)
def call_iadd_binop(arg1, arg2):
arg1 += arg2
return arg1
def call_delitem(obj, idx):
del obj[idx]
def call_getitem(obj, idx):
return obj[idx]
def call_setitem(obj, idx, val):
obj[idx] = val
@overload_method(MyDummyType, "length")
def overload_method_length(arg):
def imp(arg):
return len(arg)
return imp
def cache_overload_method_usecase(x):
return x.length()
def call_func1_nullary():
return func1()
def call_func1_unary(x):
return func1(x)
def len_usecase(x):
return len(x)
def print_usecase(x):
print(x)
def getitem_usecase(x, key):
return x[key]
def npyufunc_usecase(x):
return np.cos(np.sin(x))
def get_data_usecase(x):
return x._data
def get_index_usecase(x):
return x._index
def is_monotonic_usecase(x):
return x.is_monotonic_increasing
def make_series_usecase(data, index):
return Series(data, index)
def clip_usecase(x, lo, hi):
return x.clip(lo, hi)
# -----------------------------------------------------------------------
def return_non_boxable():
return np
@overload(return_non_boxable)
def overload_return_non_boxable():
def imp():
return np
return imp
def non_boxable_ok_usecase(sz):
mod = return_non_boxable()
return mod.arange(sz)
def non_boxable_bad_usecase():
return return_non_boxable()
def mk_func_input(f):
pass
@infer_global(mk_func_input)
class MkFuncTyping(AbstractTemplate):
def generic(self, args, kws):
assert isinstance(args[0], types.MakeFunctionLiteral)
return signature(types.none, *args)
def mk_func_test_impl():
mk_func_input(lambda a: a)
# -----------------------------------------------------------------------
@overload(np.exp)
def overload_np_exp(obj):
if isinstance(obj, MyDummyType):
def imp(obj):
# Returns a constant if a MyDummyType is seen
return 0xDEADBEEF
return imp
class TestLowLevelExtending(TestCase):
"""
Test the low-level two-tier extension API.
"""
# We check with both @jit and compile_isolated(), to exercise the
# registration logic.
def test_func1(self):
pyfunc = call_func1_nullary
cfunc = jit(nopython=True)(pyfunc)
self.assertPreciseEqual(cfunc(), 42)
pyfunc = call_func1_unary
cfunc = jit(nopython=True)(pyfunc)
self.assertPreciseEqual(cfunc(None), 42)
self.assertPreciseEqual(cfunc(18.0), 6.0)
def test_func1_isolated(self):
pyfunc = call_func1_nullary
cr = compile_isolated(pyfunc, ())
self.assertPreciseEqual(cr.entry_point(), 42)
pyfunc = call_func1_unary
cr = compile_isolated(pyfunc, (types.float64,))
self.assertPreciseEqual(cr.entry_point(18.0), 6.0)
def test_type_callable_keeps_function(self):
self.assertIs(type_func1, type_func1_)
self.assertIsNotNone(type_func1)
def test_cast_mydummy(self):
pyfunc = get_dummy
cr = compile_isolated(pyfunc, (), types.float64)
self.assertPreciseEqual(cr.entry_point(), 42.0)
def test_mk_func_literal(self):
"""make sure make_function is passed to typer class as a literal
"""
test_ir = compiler.run_frontend(mk_func_test_impl)
typingctx = cpu_target.typing_context
targetctx = cpu_target.target_context
typingctx.refresh()
targetctx.refresh()
typing_res = type_inference_stage(typingctx, targetctx, test_ir, (),
None)
self.assertTrue(
any(
isinstance(a, types.MakeFunctionLiteral)
for a in typing_res.typemap.values()
)
)
class TestPandasLike(TestCase):
"""
Test implementing a pandas-like Index object.
Also stresses most of the high-level API.
"""
def test_index_len(self):
i = Index(np.arange(3))
cfunc = jit(nopython=True)(len_usecase)
self.assertPreciseEqual(cfunc(i), 3)
def test_index_getitem(self):
i = Index(np.int32([42, 8, -5]))
cfunc = jit(nopython=True)(getitem_usecase)
self.assertPreciseEqual(cfunc(i, 1), 8)
ii = cfunc(i, slice(1, None))
self.assertIsInstance(ii, Index)
self.assertEqual(list(ii), [8, -5])
def test_index_ufunc(self):
"""
Check Numpy ufunc on an Index object.
"""
i = Index(np.int32([42, 8, -5]))
cfunc = jit(nopython=True)(npyufunc_usecase)
ii = cfunc(i)
self.assertIsInstance(ii, Index)
self.assertPreciseEqual(ii._data, np.cos(np.sin(i._data)))
def test_index_get_data(self):
# The _data attribute is exposed with make_attribute_wrapper()
i = Index(np.int32([42, 8, -5]))
cfunc = jit(nopython=True)(get_data_usecase)
data = cfunc(i)
self.assertIs(data, i._data)
def test_index_is_monotonic(self):
# The is_monotonic_increasing attribute is exposed with
# overload_attribute()
cfunc = jit(nopython=True)(is_monotonic_usecase)
for values, expected in [
([8, 42, 5], False),
([5, 8, 42], True),
([], True),
]:
i = Index(np.int32(values))
got = cfunc(i)
self.assertEqual(got, expected)
def test_series_len(self):
i = Index(np.int32([2, 4, 3]))
s = Series(np.float64([1.5, 4.0, 2.5]), i)
cfunc = jit(nopython=True)(len_usecase)
self.assertPreciseEqual(cfunc(s), 3)
def test_series_get_index(self):
i = Index(np.int32([2, 4, 3]))
s = Series(np.float64([1.5, 4.0, 2.5]), i)
cfunc = jit(nopython=True)(get_index_usecase)
got = cfunc(s)
self.assertIsInstance(got, Index)
self.assertIs(got._data, i._data)
def test_series_ufunc(self):
"""
Check Numpy ufunc on an Series object.
"""
i = Index(np.int32([42, 8, -5]))
s = Series(np.int64([1, 2, 3]), i)
cfunc = jit(nopython=True)(npyufunc_usecase)
ss = cfunc(s)
self.assertIsInstance(ss, Series)
self.assertIsInstance(ss._index, Index)
self.assertIs(ss._index._data, i._data)
self.assertPreciseEqual(ss._values, np.cos(np.sin(s._values)))
def test_series_constructor(self):
i = Index(np.int32([42, 8, -5]))
d = np.float64([1.5, 4.0, 2.5])
cfunc = jit(nopython=True)(make_series_usecase)
got = cfunc(d, i)
self.assertIsInstance(got, Series)
self.assertIsInstance(got._index, Index)
self.assertIs(got._index._data, i._data)
self.assertIs(got._values, d)
def test_series_clip(self):
i = Index(np.int32([42, 8, -5]))
s = Series(np.float64([1.5, 4.0, 2.5]), i)
cfunc = jit(nopython=True)(clip_usecase)
ss = cfunc(s, 1.6, 3.0)
self.assertIsInstance(ss, Series)
self.assertIsInstance(ss._index, Index)
self.assertIs(ss._index._data, i._data)
self.assertPreciseEqual(ss._values, np.float64([1.6, 3.0, 2.5]))
class TestHighLevelExtending(TestCase):
"""
Test the high-level combined API.
"""
def test_where(self):
"""
Test implementing a function with @overload.
"""
pyfunc = call_where
cfunc = jit(nopython=True)(pyfunc)
def check(*args, **kwargs):
expected = np_where(*args, **kwargs)
got = cfunc(*args, **kwargs)
self.assertPreciseEqual(expected, got)
check(x=3, cond=True, y=8)
check(True, 3, 8)
check(
np.bool_([True, False, True]),
np.int32([1, 2, 3]),
np.int32([4, 5, 5]),
)
# The typing error is propagated
with self.assertRaises(errors.TypingError) as raises:
cfunc(np.bool_([]), np.int32([]), np.int64([]))
self.assertIn(
"x and y should have the same dtype", str(raises.exception)
)
def test_len(self):
"""
Test re-implementing len() for a custom type with @overload.
"""
cfunc = jit(nopython=True)(len_usecase)
self.assertPreciseEqual(cfunc(MyDummy()), 13)
self.assertPreciseEqual(cfunc([4, 5]), 2)
def test_print(self):
"""
Test re-implementing print() for a custom type with @overload.
"""
cfunc = jit(nopython=True)(print_usecase)
with captured_stdout():
cfunc(MyDummy())
self.assertEqual(sys.stdout.getvalue(), "hello!\n")
def test_add_operator(self):
"""
Test re-implementing operator.add() for a custom type with @overload.
"""
pyfunc = call_add_operator
cfunc = jit(nopython=True)(pyfunc)
self.assertPreciseEqual(cfunc(1, 2), 3)
self.assertPreciseEqual(cfunc(MyDummy2(), MyDummy2()), 42)
# this will call add(Number, Number) as MyDummy implicitly casts to
# Number
self.assertPreciseEqual(cfunc(MyDummy(), MyDummy()), 84)
def test_add_binop(self):
"""
Test re-implementing '+' for a custom type via @overload(operator.add).
"""
pyfunc = call_add_binop
cfunc = jit(nopython=True)(pyfunc)
self.assertPreciseEqual(cfunc(1, 2), 3)
self.assertPreciseEqual(cfunc(MyDummy2(), MyDummy2()), 42)
# this will call add(Number, Number) as MyDummy implicitly casts to
# Number
self.assertPreciseEqual(cfunc(MyDummy(), MyDummy()), 84)
def test_iadd_operator(self):
"""
Test re-implementing operator.add() for a custom type with @overload.
"""
pyfunc = call_iadd_operator
cfunc = jit(nopython=True)(pyfunc)
self.assertPreciseEqual(cfunc(1, 2), 3)
self.assertPreciseEqual(cfunc(MyDummy2(), MyDummy2()), 42)
# this will call add(Number, Number) as MyDummy implicitly casts to
# Number
self.assertPreciseEqual(cfunc(MyDummy(), MyDummy()), 84)
def test_iadd_binop(self):
"""
Test re-implementing '+' for a custom type via @overload(operator.add).
"""
pyfunc = call_iadd_binop
cfunc = jit(nopython=True)(pyfunc)
self.assertPreciseEqual(cfunc(1, 2), 3)
self.assertPreciseEqual(cfunc(MyDummy2(), MyDummy2()), 42)
# this will call add(Number, Number) as MyDummy implicitly casts to
# Number
self.assertPreciseEqual(cfunc(MyDummy(), MyDummy()), 84)
def test_delitem(self):
pyfunc = call_delitem
cfunc = jit(nopython=True)(pyfunc)
obj = MyDummy()
e = None
with captured_stdout() as out:
try:
cfunc(obj, 321)
except Exception as exc:
e = exc
if e is not None:
raise e
self.assertEqual(out.getvalue(), "del hello! 321\n")
def test_getitem(self):
pyfunc = call_getitem
cfunc = jit(nopython=True)(pyfunc)
self.assertPreciseEqual(cfunc(MyDummy(), 321), 321 + 123)
def test_setitem(self):
pyfunc = call_setitem
cfunc = jit(nopython=True)(pyfunc)
obj = MyDummy()
e = None
with captured_stdout() as out:
try:
cfunc(obj, 321, 123)
except Exception as exc:
e = exc
if e is not None:
raise e
self.assertEqual(out.getvalue(), "321 123\n")
def test_no_cpython_wrapper(self):
"""
Test overloading whose return value cannot be represented in CPython.
"""
# Test passing Module type from a @overload implementation to ensure
# that the *no_cpython_wrapper* flag works
ok_cfunc = jit(nopython=True)(non_boxable_ok_usecase)
n = 10
got = ok_cfunc(n)
expect = non_boxable_ok_usecase(n)
np.testing.assert_equal(expect, got)
# Verify that the Module type cannot be returned to CPython
bad_cfunc = jit(nopython=True)(non_boxable_bad_usecase)
with self.assertRaises(TypeError) as raises:
bad_cfunc()
errmsg = str(raises.exception)
expectmsg = "cannot convert native Module"
self.assertIn(expectmsg, errmsg)
def test_typing_vs_impl_signature_mismatch_handling(self):
"""
Tests that an overload which has a differing typing and implementing
signature raises an exception.
"""
def gen_ol(impl=None):
def myoverload(a, b, c, kw=None):
pass
@overload(myoverload)
def _myoverload_impl(a, b, c, kw=None):
return impl
@jit(nopython=True)
def foo(a, b, c, d):
myoverload(a, b, c, kw=d)
return foo
sentinel = "Typing and implementation arguments differ in"
# kwarg value is different
def impl1(a, b, c, kw=12):
if a > 10:
return 1
else:
return -1
with self.assertRaises(errors.TypingError) as e:
gen_ol(impl1)(1, 2, 3, 4)
msg = str(e.exception)
self.assertIn(sentinel, msg)
self.assertIn("keyword argument default values", msg)
self.assertIn('<Parameter "kw=12">', msg)
self.assertIn('<Parameter "kw=None">', msg)
# kwarg name is different
def impl2(a, b, c, kwarg=None):
if a > 10:
return 1
else:
return -1
with self.assertRaises(errors.TypingError) as e:
gen_ol(impl2)(1, 2, 3, 4)
msg = str(e.exception)
self.assertIn(sentinel, msg)
self.assertIn("keyword argument names", msg)
self.assertIn('<Parameter "kwarg=None">', msg)
self.assertIn('<Parameter "kw=None">', msg)
# arg name is different
def impl3(z, b, c, kw=None):
if a > 10: # noqa: F821
return 1
else:
return -1
with self.assertRaises(errors.TypingError) as e:
gen_ol(impl3)(1, 2, 3, 4)
msg = str(e.exception)
self.assertIn(sentinel, msg)
self.assertIn("argument names", msg)
self.assertFalse("keyword" in msg)
self.assertIn('<Parameter "a">', msg)
self.assertIn('<Parameter "z">', msg)
from .overload_usecases import impl4, impl5
with self.assertRaises(errors.TypingError) as e:
gen_ol(impl4)(1, 2, 3, 4)
msg = str(e.exception)
self.assertIn(sentinel, msg)
self.assertIn("argument names", msg)
self.assertFalse("keyword" in msg)
self.assertIn("First difference: 'z'", msg)
with self.assertRaises(errors.TypingError) as e:
gen_ol(impl5)(1, 2, 3, 4)
msg = str(e.exception)
self.assertIn(sentinel, msg)
self.assertIn("argument names", msg)
self.assertFalse("keyword" in msg)
self.assertIn('<Parameter "a">', msg)
self.assertIn('<Parameter "z">', msg)
# too many args
def impl6(a, b, c, d, e, kw=None):
if a > 10:
return 1
else:
return -1
with self.assertRaises(errors.TypingError) as e:
gen_ol(impl6)(1, 2, 3, 4)
msg = str(e.exception)
self.assertIn(sentinel, msg)
self.assertIn("argument names", msg)
self.assertFalse("keyword" in msg)
self.assertIn('<Parameter "d">', msg)
self.assertIn('<Parameter "e">', msg)
# too few args
def impl7(a, b, kw=None):
if a > 10:
return 1
else:
return -1
with self.assertRaises(errors.TypingError) as e:
gen_ol(impl7)(1, 2, 3, 4)
msg = str(e.exception)
self.assertIn(sentinel, msg)
self.assertIn("argument names", msg)
self.assertFalse("keyword" in msg)
self.assertIn('<Parameter "c">', msg)
# too many kwargs
def impl8(a, b, c, kw=None, extra_kwarg=None):
if a > 10:
return 1
else:
return -1
with self.assertRaises(errors.TypingError) as e:
gen_ol(impl8)(1, 2, 3, 4)
msg = str(e.exception)
self.assertIn(sentinel, msg)
self.assertIn("keyword argument names", msg)
self.assertIn('<Parameter "extra_kwarg=None">', msg)
# too few kwargs
def impl9(a, b, c):
if a > 10:
return 1
else:
return -1
with self.assertRaises(errors.TypingError) as e:
gen_ol(impl9)(1, 2, 3, 4)
msg = str(e.exception)
self.assertIn(sentinel, msg)
self.assertIn("keyword argument names", msg)
self.assertIn('<Parameter "kw=None">', msg)
def test_typing_vs_impl_signature_mismatch_handling_var_positional(self):
"""
Tests that an overload which has a differing typing and implementing
signature raises an exception and uses VAR_POSITIONAL (*args) in typing
"""
def myoverload(a, kw=None):
pass
from .overload_usecases import var_positional_impl
overload(myoverload)(var_positional_impl)
@jit(nopython=True)
def foo(a, b):
return myoverload(a, b, 9, kw=11)
with self.assertRaises(errors.TypingError) as e:
foo(1, 5)
msg = str(e.exception)
self.assertIn("VAR_POSITIONAL (e.g. *args) argument kind", msg)
self.assertIn("offending argument name is '*star_args_token'", msg)
def test_typing_vs_impl_signature_mismatch_handling_var_keyword(self):
"""
Tests that an overload which uses **kwargs (VAR_KEYWORD)
"""
def gen_ol(impl, strict=True):
def myoverload(a, kw=None):
pass
overload(myoverload, strict=strict)(impl)
@jit(nopython=True)
def foo(a, b):
return myoverload(a, kw=11)
return foo
# **kwargs in typing
def ol1(a, **kws):
def impl(a, kw=10):
return a
return impl
gen_ol(ol1, False)(1, 2) # no error if strictness not enforced
with self.assertRaises(errors.TypingError) as e:
gen_ol(ol1)(1, 2)
msg = str(e.exception)
self.assertIn("use of VAR_KEYWORD (e.g. **kwargs) is unsupported", msg)
self.assertIn("offending argument name is '**kws'", msg)
# **kwargs in implementation
def ol2(a, kw=0):
def impl(a, **kws):
return a
return impl
with self.assertRaises(errors.TypingError) as e:
gen_ol(ol2)(1, 2)
msg = str(e.exception)
self.assertIn("use of VAR_KEYWORD (e.g. **kwargs) is unsupported", msg)
self.assertIn("offending argument name is '**kws'", msg)
def test_overload_method_kwargs(self):
# Issue #3489
@overload_method(types.Array, "foo")
def fooimpl(arr, a_kwarg=10):
def impl(arr, a_kwarg=10):
return a_kwarg
return impl
@njit
def bar(A):
return A.foo(), A.foo(20), A.foo(a_kwarg=30)
Z = np.arange(5)
self.assertEqual(bar(Z), (10, 20, 30))
def test_overload_method_literal_unpack(self):
# Issue #3683
@overload_method(types.Array, "litfoo")
def litfoo(arr, val):
# Must be an integer
if isinstance(val, types.Integer):
# Must not be literal
if not isinstance(val, types.Literal):
def impl(arr, val):
return val
return impl
@njit
def bar(A):
return A.litfoo(0xCAFE)
A = np.zeros(1)
bar(A)
self.assertEqual(bar(A), 0xCAFE)
def test_overload_ufunc(self):
# Issue #4133.
# Use an extended type (MyDummyType) to use with a customized
# ufunc (np.exp).
@njit
def test():
return np.exp(mydummy)
self.assertEqual(test(), 0xDEADBEEF)
def test_overload_method_stararg(self):
@overload_method(MyDummyType, "method_stararg")
def _ov_method_stararg(obj, val, val2, *args):
def get(obj, val, val2, *args):
return (val, val2, args)
return get
@njit
def foo(obj, *args):
# Test with expanding stararg
return obj.method_stararg(*args)
obj = MyDummy()
self.assertEqual(foo(obj, 1, 2), (1, 2, ()))
self.assertEqual(foo(obj, 1, 2, 3), (1, 2, (3,)))
self.assertEqual(foo(obj, 1, 2, 3, 4), (1, 2, (3, 4)))
@njit
def bar(obj):
# Test with explicit argument
return (
obj.method_stararg(1, 2),
obj.method_stararg(1, 2, 3),
obj.method_stararg(1, 2, 3, 4),
)
self.assertEqual(
bar(obj), ((1, 2, ()), (1, 2, (3,)), (1, 2, (3, 4))),
)
# Check cases that put tuple type into stararg
# NOTE: the expected result has an extra tuple because of stararg.
self.assertEqual(
foo(obj, 1, 2, (3,)), (1, 2, ((3,),)),
)
self.assertEqual(
foo(obj, 1, 2, (3, 4)), (1, 2, ((3, 4),)),
)
self.assertEqual(
foo(obj, 1, 2, (3, (4, 5))), (1, 2, ((3, (4, 5)),)),
)
def test_overload_classmethod(self):
# Add classmethod to a subclass of Array
class MyArray(types.Array):
pass
@overload_classmethod(MyArray, "array_alloc")
def ol_array_alloc(cls, nitems):
def impl(cls, nitems):
arr = np.arange(nitems)
return arr
return impl
@njit
def foo(nitems):
return MyArray.array_alloc(nitems)
nitems = 13
self.assertPreciseEqual(foo(nitems), np.arange(nitems))
# Check that the base type doesn't get the classmethod
@njit
def no_classmethod_in_base(nitems):
return types.Array.array_alloc(nitems)
with self.assertRaises(errors.TypingError) as raises:
no_classmethod_in_base(nitems)
self.assertIn(
"Unknown attribute 'array_alloc' of",
str(raises.exception),
)
def _assert_cache_stats(cfunc, expect_hit, expect_misses):
hit = cfunc._cache_hits[cfunc.signatures[0]]
if hit != expect_hit:
raise AssertionError("cache not used")
miss = cfunc._cache_misses[cfunc.signatures[0]]
if miss != expect_misses:
raise AssertionError("cache not used")
@skip_if_typeguard
class TestOverloadMethodCaching(TestCase):
# Nested multiprocessing.Pool raises AssertionError:
# "daemonic processes are not allowed to have children"
_numba_parallel_test_ = False
def test_caching_overload_method(self):
self._cache_dir = temp_directory(self.__class__.__name__)
with override_config("CACHE_DIR", self._cache_dir):
self.run_caching_overload_method()
def run_caching_overload_method(self):
cfunc = jit(nopython=True, cache=True)(cache_overload_method_usecase)
self.assertPreciseEqual(cfunc(MyDummy()), 13)
_assert_cache_stats(cfunc, 0, 1)
llvmir = cfunc.inspect_llvm((mydummy_type,))
# Ensure the inner method is not a declaration
decls = [
ln
for ln in llvmir.splitlines()
if ln.startswith("declare") and "overload_method_length" in ln
]
self.assertEqual(len(decls), 0)
# Test in a separate process
try:
ctx = multiprocessing.get_context("spawn")
except AttributeError:
ctx = multiprocessing
q = ctx.Queue()
p = ctx.Process(
target=run_caching_overload_method, args=(q, self._cache_dir)
)
p.start()
q.put(MyDummy())
p.join()
# Ensure subprocess exited normally
self.assertEqual(p.exitcode, 0)
res = q.get(timeout=1)
self.assertEqual(res, 13)
def run_caching_overload_method(q, cache_dir):
"""
Used by TestOverloadMethodCaching.test_caching_overload_method
"""
with override_config("CACHE_DIR", cache_dir):
arg = q.get()
cfunc = jit(nopython=True, cache=True)(cache_overload_method_usecase)
res = cfunc(arg)
q.put(res)
# Check cache stat
_assert_cache_stats(cfunc, 1, 0)
class TestIntrinsic(TestCase):
def test_void_return(self):
"""
Verify that returning a None from codegen function is handled
automatically for void functions, otherwise raise exception.
"""
@intrinsic
def void_func(typingctx, a):
sig = types.void(types.int32)
def codegen(context, builder, signature, args):
pass # do nothing, return None, should be turned into
# dummy value
return sig, codegen
@intrinsic
def non_void_func(typingctx, a):
sig = types.int32(types.int32)
def codegen(context, builder, signature, args):
pass # oops, should be returning a value here, raise exception
return sig, codegen
@jit(nopython=True)
def call_void_func():
void_func(1)
return 0
@jit(nopython=True)
def call_non_void_func():
non_void_func(1)
return 0
# void func should work
self.assertEqual(call_void_func(), 0)
# not void function should raise exception
with self.assertRaises(LoweringError) as e:
call_non_void_func()
self.assertIn("non-void function returns None", e.exception.msg)
def test_ll_pointer_cast(self):
"""
Usecase test: custom reinterpret cast to turn int values to pointers
"""
from ctypes import CFUNCTYPE, POINTER, c_float, c_int
# Use intrinsic to make a reinterpret_cast operation
def unsafe_caster(result_type):
assert isinstance(result_type, types.CPointer)
@intrinsic
def unsafe_cast(typingctx, src):
self.assertIsInstance(typingctx, typing.Context)
if isinstance(src, types.Integer):
sig = result_type(types.uintp)
# defines the custom code generation
def codegen(context, builder, signature, args):
[src] = args
rtype = signature.return_type
llrtype = context.get_value_type(rtype)
return builder.inttoptr(src, llrtype)
return sig, codegen
return unsafe_cast
# make a nopython function to use our cast op.
# this is not usable from cpython due to the returning of a pointer.
def unsafe_get_ctypes_pointer(src):
raise NotImplementedError("not callable from python")
@overload(unsafe_get_ctypes_pointer, strict=False)
def array_impl_unsafe_get_ctypes_pointer(arrtype):
if isinstance(arrtype, types.Array):
unsafe_cast = unsafe_caster(types.CPointer(arrtype.dtype))
def array_impl(arr):
return unsafe_cast(src=arr.ctypes.data)
return array_impl
# the ctype wrapped function for use in nopython mode
def my_c_fun_raw(ptr, n):
for i in range(n):
print(ptr[i])
prototype = CFUNCTYPE(None, POINTER(c_float), c_int)
my_c_fun = prototype(my_c_fun_raw)
# Call our pointer-cast in a @jit compiled function and use
# the pointer in a ctypes function
@jit(nopython=True)
def foo(arr):
ptr = unsafe_get_ctypes_pointer(arr)
my_c_fun(ptr, arr.size)
# Test
arr = np.arange(10, dtype=np.float32)
with captured_stdout() as buf:
foo(arr)
got = buf.getvalue().splitlines()
buf.close()
expect = list(map(str, arr))
self.assertEqual(expect, got)
def test_serialization(self):
"""
Test serialization of intrinsic objects
"""
# define a intrinsic
@intrinsic
def identity(context, x):
def codegen(context, builder, signature, args):
return args[0]
sig = x(x)
return sig, codegen
# use in a jit function
@jit(nopython=True)
def foo(x):
return identity(x)
self.assertEqual(foo(1), 1)
# get serialization memo
memo = _Intrinsic._memo
memo_size = len(memo)
# pickle foo and check memo size
serialized_foo = pickle.dumps(foo)
# increases the memo size
memo_size += 1
self.assertEqual(memo_size, len(memo))
# unpickle
foo_rebuilt = pickle.loads(serialized_foo)
self.assertEqual(memo_size, len(memo))
# check rebuilt foo
self.assertEqual(foo(1), foo_rebuilt(1))
# pickle identity directly
serialized_identity = pickle.dumps(identity)
# memo size unchanged
self.assertEqual(memo_size, len(memo))
# unpickle
identity_rebuilt = pickle.loads(serialized_identity)
# must be the same object
self.assertIs(identity, identity_rebuilt)
# memo size unchanged
self.assertEqual(memo_size, len(memo))
def test_deserialization(self):
"""
Test deserialization of intrinsic
"""
def defn(context, x):
def codegen(context, builder, signature, args):
return args[0]
return x(x), codegen
memo = _Intrinsic._memo
memo_size = len(memo)
# invoke _Intrinsic indirectly to avoid registration which keeps an
# internal reference inside the compiler
original = _Intrinsic("foo", defn)
self.assertIs(original._defn, defn)
pickled = pickle.dumps(original)
# by pickling, a new memo entry is created
memo_size += 1
self.assertEqual(memo_size, len(memo))
del original # remove original before unpickling
# by deleting, the memo entry is NOT removed due to recent
# function queue
self.assertEqual(memo_size, len(memo))
# Manually force clear of _recent queue
_Intrinsic._recent.clear()
memo_size -= 1
self.assertEqual(memo_size, len(memo))
rebuilt = pickle.loads(pickled)
# verify that the rebuilt object is different
self.assertIsNot(rebuilt._defn, defn)
# the second rebuilt object is the same as the first
second = pickle.loads(pickled)
self.assertIs(rebuilt._defn, second._defn)
def test_docstring(self):
@intrinsic
def void_func(typingctx, a: int):
"""void_func docstring"""
sig = types.void(types.int32)
def codegen(context, builder, signature, args):
pass # do nothing, return None, should be turned into
# dummy value
return sig, codegen
self.assertEqual("numba.tests.test_extending", void_func.__module__)
self.assertEqual("void_func", void_func.__name__)
self.assertEqual("TestIntrinsic.test_docstring.<locals>.void_func",
void_func.__qualname__)
self.assertDictEqual({'a': int}, void_func.__annotations__)
self.assertEqual("void_func docstring", void_func.__doc__)
class TestRegisterJitable(unittest.TestCase):
def test_no_flags(self):
@register_jitable
def foo(x, y):
return x + y
def bar(x, y):
return foo(x, y)
cbar = jit(nopython=True)(bar)
expect = bar(1, 2)
got = cbar(1, 2)
self.assertEqual(expect, got)
def test_flags_no_nrt(self):
@register_jitable(_nrt=False)
def foo(n):
return np.arange(n)
def bar(n):
return foo(n)
self.assertEqual(bar(3).tolist(), [0, 1, 2])
cbar = jit(nopython=True)(bar)
with self.assertRaises(errors.TypingError) as raises:
cbar(2)
msg = (
"Only accept returning of array passed into the function as "
"argument"
)
self.assertIn(msg, str(raises.exception))
class TestImportCythonFunction(unittest.TestCase):
@unittest.skipIf(sc is None, "Only run if SciPy >= 0.19 is installed")
def test_getting_function(self):
addr = get_cython_function_address(
"scipy.special.cython_special", "j0"
)
functype = ctypes.CFUNCTYPE(ctypes.c_double, ctypes.c_double)
_j0 = functype(addr)
j0 = jit(nopython=True)(lambda x: _j0(x))
self.assertEqual(j0(0), 1)
def test_missing_module(self):
with self.assertRaises(ImportError) as raises:
get_cython_function_address("fakemodule", "fakefunction")
# The quotes are not there in Python 2
msg = "No module named '?fakemodule'?"
match = re.match(msg, str(raises.exception))
self.assertIsNotNone(match)
@unittest.skipIf(sc is None, "Only run if SciPy >= 0.19 is installed")
def test_missing_function(self):
with self.assertRaises(ValueError) as raises:
get_cython_function_address(
"scipy.special.cython_special", "foo"
)
msg = (
"No function 'foo' found in __pyx_capi__ of "
"'scipy.special.cython_special'"
)
self.assertEqual(msg, str(raises.exception))
@overload_method(
MyDummyType, "method_jit_option_check_nrt", jit_options={"_nrt": True}
)
def ov_method_jit_option_check_nrt(obj):
def imp(obj):
return np.arange(10)
return imp
@overload_method(
MyDummyType, "method_jit_option_check_no_nrt", jit_options={"_nrt": False}
)
def ov_method_jit_option_check_no_nrt(obj):
def imp(obj):
return np.arange(10)
return imp
@overload_attribute(
MyDummyType, "attr_jit_option_check_nrt", jit_options={"_nrt": True}
)
def ov_attr_jit_option_check_nrt(obj):
def imp(obj):
return np.arange(10)
return imp
@overload_attribute(
MyDummyType, "attr_jit_option_check_no_nrt", jit_options={"_nrt": False}
)
def ov_attr_jit_option_check_no_nrt(obj):
def imp(obj):
return np.arange(10)
return imp
class TestJitOptionsNoNRT(TestCase):
# Test overload*(jit_options={...}) by turning off _nrt
def check_error_no_nrt(self, func, *args, **kwargs):
# Check that the compilation fails with a complaint about dynamic array
msg = (
"Only accept returning of array passed into "
"the function as argument"
)
with self.assertRaises(errors.TypingError) as raises:
func(*args, **kwargs)
self.assertIn(msg, str(raises.exception))
def no_nrt_overload_check(self, flag):
def dummy():
return np.arange(10)
@overload(dummy, jit_options={"_nrt": flag})
def ov_dummy():
def dummy():
return np.arange(10)
return dummy
@njit
def foo():
return dummy()
if flag:
self.assertPreciseEqual(foo(), np.arange(10))
else:
self.check_error_no_nrt(foo)
def test_overload_no_nrt(self):
self.no_nrt_overload_check(True)
self.no_nrt_overload_check(False)
def test_overload_method_no_nrt(self):
@njit
def udt(x):
return x.method_jit_option_check_nrt()
self.assertPreciseEqual(udt(mydummy), np.arange(10))
@njit
def udt(x):
return x.method_jit_option_check_no_nrt()
self.check_error_no_nrt(udt, mydummy)
def test_overload_attribute_no_nrt(self):
@njit
def udt(x):
return x.attr_jit_option_check_nrt
self.assertPreciseEqual(udt(mydummy), np.arange(10))
@njit
def udt(x):
return x.attr_jit_option_check_no_nrt
self.check_error_no_nrt(udt, mydummy)
class TestBoxingCallingJIT(TestCase):
def setUp(self):
super().setUp()
many = base_dummy_type_factory("mydummy2")
self.DynTypeType, self.DynType, self.dyn_type_type = many
self.dyn_type = self.DynType()
def test_unboxer_basic(self):
# Implements an unboxer on DynType that calls an intrinsic into the
# unboxer code.
magic_token = 0xCAFE
magic_offset = 123
@intrinsic
def my_intrinsic(typingctx, val):
# An intrinsic that returns `val + magic_offset`
def impl(context, builder, sig, args):
[val] = args
return builder.add(val, val.type(magic_offset))
sig = signature(val, val)
return sig, impl
@unbox(self.DynTypeType)
def unboxer(typ, obj, c):
# The unboxer that calls some jitcode
def bridge(x):
# proof that this is a jit'ed context by calling jit only
# intrinsic
return my_intrinsic(x)
args = [c.context.get_constant(types.intp, magic_token)]
sig = signature(types.voidptr, types.intp)
is_error, res = c.pyapi.call_jit_code(bridge, sig, args)
return NativeValue(res, is_error=is_error)
@box(self.DynTypeType)
def boxer(typ, val, c):
# The boxer that returns an integer representation
res = c.builder.ptrtoint(val, cgutils.intp_t)
return c.pyapi.long_from_ssize_t(res)
@njit
def passthru(x):
return x
out = passthru(self.dyn_type)
self.assertEqual(out, magic_token + magic_offset)
def test_unboxer_raise(self):
# Testing exception raising in jitcode called from unboxing.
@unbox(self.DynTypeType)
def unboxer(typ, obj, c):
# The unboxer that calls some jitcode
def bridge(x):
if x > 0:
raise ValueError("cannot be x > 0")
return x
args = [c.context.get_constant(types.intp, 1)]
sig = signature(types.voidptr, types.intp)
is_error, res = c.pyapi.call_jit_code(bridge, sig, args)
return NativeValue(res, is_error=is_error)
@box(self.DynTypeType)
def boxer(typ, val, c):
# The boxer that returns an integer representation
res = c.builder.ptrtoint(val, cgutils.intp_t)
return c.pyapi.long_from_ssize_t(res)
@njit
def passthru(x):
return x
with self.assertRaises(ValueError) as raises:
passthru(self.dyn_type)
self.assertIn(
"cannot be x > 0", str(raises.exception),
)
def test_boxer(self):
# Call jitcode inside the boxer
magic_token = 0xCAFE
magic_offset = 312
@intrinsic
def my_intrinsic(typingctx, val):
# An intrinsic that returns `val + magic_offset`
def impl(context, builder, sig, args):
[val] = args
return builder.add(val, val.type(magic_offset))
sig = signature(val, val)
return sig, impl
@unbox(self.DynTypeType)
def unboxer(typ, obj, c):
return NativeValue(c.context.get_dummy_value())
@box(self.DynTypeType)
def boxer(typ, val, c):
# Note: this doesn't do proper error handling
def bridge(x):
return my_intrinsic(x)
args = [c.context.get_constant(types.intp, magic_token)]
sig = signature(types.intp, types.intp)
is_error, res = c.pyapi.call_jit_code(bridge, sig, args)
return c.pyapi.long_from_ssize_t(res)
@njit
def passthru(x):
return x
r = passthru(self.dyn_type)
self.assertEqual(r, magic_token + magic_offset)
def test_boxer_raise(self):
# Call jitcode inside the boxer
@unbox(self.DynTypeType)
def unboxer(typ, obj, c):
return NativeValue(c.context.get_dummy_value())
@box(self.DynTypeType)
def boxer(typ, val, c):
def bridge(x):
if x > 0:
raise ValueError("cannot do x > 0")
return x
args = [c.context.get_constant(types.intp, 1)]
sig = signature(types.intp, types.intp)
is_error, res = c.pyapi.call_jit_code(bridge, sig, args)
# The error handling
retval = cgutils.alloca_once(c.builder, c.pyapi.pyobj, zfill=True)
with c.builder.if_then(c.builder.not_(is_error)):
obj = c.pyapi.long_from_ssize_t(res)
c.builder.store(obj, retval)
return c.builder.load(retval)
@njit
def passthru(x):
return x
with self.assertRaises(ValueError) as raises:
passthru(self.dyn_type)
self.assertIn(
"cannot do x > 0", str(raises.exception),
)
def with_objmode_cache_ov_example(x):
# This is the function stub for overloading inside
# TestCachingOverloadObjmode.test_caching_overload_objmode
pass
@skip_if_typeguard
class TestCachingOverloadObjmode(TestCase):
"""Test caching of the use of overload implementations that use
`with objmode`
"""
_numba_parallel_test_ = False
def setUp(self):
warnings.simplefilter("error", errors.NumbaWarning)
def tearDown(self):
warnings.resetwarnings()
def test_caching_overload_objmode(self):
cache_dir = temp_directory(self.__class__.__name__)
with override_config("CACHE_DIR", cache_dir):
def realwork(x):
# uses numpy code
arr = np.arange(x) / x
return np.linalg.norm(arr)
def python_code(x):
# create indirections
return realwork(x)
@overload(with_objmode_cache_ov_example)
def _ov_with_objmode_cache_ov_example(x):
def impl(x):
with objmode(y="float64"):
y = python_code(x)
return y
return impl
@njit(cache=True)
def testcase(x):
return with_objmode_cache_ov_example(x)
expect = realwork(123)
got = testcase(123)
self.assertEqual(got, expect)
testcase_cached = njit(cache=True)(testcase.py_func)
got = testcase_cached(123)
self.assertEqual(got, expect)
@classmethod
def check_objmode_cache_ndarray(cls):
def do_this(a, b):
return np.sum(a + b)
def do_something(a, b):
return np.sum(a + b)
@overload(do_something)
def overload_do_something(a, b):
def _do_something_impl(a, b):
with objmode(y='float64'):
y = do_this(a, b)
return y
return _do_something_impl
@njit(cache=True)
def test_caching():
a = np.arange(20)
b = np.arange(20)
return do_something(a, b)
got = test_caching()
expect = test_caching.py_func()
# Check result
if got != expect:
raise AssertionError("incorrect result")
return test_caching
@classmethod
def check_objmode_cache_ndarray_check_cache(cls):
disp = cls.check_objmode_cache_ndarray()
if len(disp.stats.cache_misses) != 0:
raise AssertionError('unexpected cache miss')
if len(disp.stats.cache_hits) <= 0:
raise AssertionError("unexpected missing cache hit")
def test_check_objmode_cache_ndarray(self):
# See issue #6130.
# Env is missing after cache load.
cache_dir = temp_directory(self.__class__.__name__)
with override_config("CACHE_DIR", cache_dir):
# Test in local process to populate the cache.
self.check_objmode_cache_ndarray()
# Run in new process to use the cache in a fresh process.
res = run_in_new_process_in_cache_dir(
self.check_objmode_cache_ndarray_check_cache, cache_dir
)
self.assertEqual(res['exitcode'], 0)
class TestMisc(TestCase):
def test_is_jitted(self):
def foo(x):
pass
self.assertFalse(is_jitted(foo))
self.assertTrue(is_jitted(njit(foo)))
self.assertFalse(is_jitted(vectorize(foo)))
self.assertFalse(is_jitted(vectorize(parallel=True)(foo)))
self.assertFalse(
is_jitted(guvectorize("void(float64[:])", "(m)")(foo))
)
class TestOverloadPreferLiteral(TestCase):
def test_overload(self):
def prefer_lit(x):
pass
def non_lit(x):
pass
def ov(x):
if isinstance(x, types.IntegerLiteral):
# With prefer_literal=False, this branch will not be reached.
if x.literal_value == 1:
def impl(x):
return 0xcafe
return impl
else:
raise errors.TypingError('literal value')
else:
def impl(x):
return x * 100
return impl
overload(prefer_lit, prefer_literal=True)(ov)
overload(non_lit)(ov)
@njit
def check_prefer_lit(x):
return prefer_lit(1), prefer_lit(2), prefer_lit(x)
a, b, c = check_prefer_lit(3)
self.assertEqual(a, 0xcafe)
self.assertEqual(b, 200)
self.assertEqual(c, 300)
@njit
def check_non_lit(x):
return non_lit(1), non_lit(2), non_lit(x)
a, b, c = check_non_lit(3)
self.assertEqual(a, 100)
self.assertEqual(b, 200)
self.assertEqual(c, 300)
def test_overload_method(self):
def ov(self, x):
if isinstance(x, types.IntegerLiteral):
# With prefer_literal=False, this branch will not be reached.
if x.literal_value == 1:
def impl(self, x):
return 0xcafe
return impl
else:
raise errors.TypingError('literal value')
else:
def impl(self, x):
return x * 100
return impl
overload_method(
MyDummyType, "method_prefer_literal",
prefer_literal=True,
)(ov)
overload_method(
MyDummyType, "method_non_literal",
prefer_literal=False,
)(ov)
@njit
def check_prefer_lit(dummy, x):
return (
dummy.method_prefer_literal(1),
dummy.method_prefer_literal(2),
dummy.method_prefer_literal(x),
)
a, b, c = check_prefer_lit(MyDummy(), 3)
self.assertEqual(a, 0xcafe)
self.assertEqual(b, 200)
self.assertEqual(c, 300)
@njit
def check_non_lit(dummy, x):
return (
dummy.method_non_literal(1),
dummy.method_non_literal(2),
dummy.method_non_literal(x),
)
a, b, c = check_non_lit(MyDummy(), 3)
self.assertEqual(a, 100)
self.assertEqual(b, 200)
self.assertEqual(c, 300)
if __name__ == "__main__":
unittest.main()
|
|
# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Prefix related APIs.
"""
import logging
from ryu.lib.packet.bgp import EvpnEsi
from ryu.lib.packet.bgp import EvpnNLRI
from ryu.lib.packet.bgp import EvpnEthernetAutoDiscoveryNLRI
from ryu.lib.packet.bgp import EvpnMacIPAdvertisementNLRI
from ryu.lib.packet.bgp import EvpnInclusiveMulticastEthernetTagNLRI
from ryu.lib.packet.bgp import EvpnEthernetSegmentNLRI
from ryu.lib.packet.bgp import EvpnIpPrefixNLRI
from ryu.lib.packet.bgp import BGPPathAttributePmsiTunnel
from ryu.lib.packet.bgp import FlowSpecIPv4NLRI
from ryu.lib.packet.bgp import FlowSpecIPv6NLRI
from ryu.lib.packet.bgp import FlowSpecVPNv4NLRI
from ryu.lib.packet.bgp import FlowSpecVPNv6NLRI
from ryu.lib.packet.bgp import FlowSpecL2VPNNLRI
from ryu.lib.packet.bgp import BGPFlowSpecTrafficRateCommunity
from ryu.lib.packet.bgp import BGPFlowSpecTrafficActionCommunity
from ryu.lib.packet.bgp import BGPFlowSpecRedirectCommunity
from ryu.lib.packet.bgp import BGPFlowSpecTrafficMarkingCommunity
from ryu.lib.packet.bgp import BGPFlowSpecVlanActionCommunity
from ryu.lib.packet.bgp import BGPFlowSpecTPIDActionCommunity
from ryu.services.protocols.bgp.api.base import EVPN_ROUTE_TYPE
from ryu.services.protocols.bgp.api.base import EVPN_ESI
from ryu.services.protocols.bgp.api.base import EVPN_ETHERNET_TAG_ID
from ryu.services.protocols.bgp.api.base import REDUNDANCY_MODE
from ryu.services.protocols.bgp.api.base import MAC_ADDR
from ryu.services.protocols.bgp.api.base import IP_ADDR
from ryu.services.protocols.bgp.api.base import IP_PREFIX
from ryu.services.protocols.bgp.api.base import GW_IP_ADDR
from ryu.services.protocols.bgp.api.base import MPLS_LABELS
from ryu.services.protocols.bgp.api.base import NEXT_HOP
from ryu.services.protocols.bgp.api.base import PREFIX
from ryu.services.protocols.bgp.api.base import RegisterWithArgChecks
from ryu.services.protocols.bgp.api.base import ROUTE_DISTINGUISHER
from ryu.services.protocols.bgp.api.base import VPN_LABEL
from ryu.services.protocols.bgp.api.base import EVPN_VNI
from ryu.services.protocols.bgp.api.base import TUNNEL_TYPE
from ryu.services.protocols.bgp.api.base import PMSI_TUNNEL_TYPE
from ryu.services.protocols.bgp.api.base import FLOWSPEC_FAMILY
from ryu.services.protocols.bgp.api.base import FLOWSPEC_RULES
from ryu.services.protocols.bgp.api.base import FLOWSPEC_ACTIONS
from ryu.services.protocols.bgp.base import add_bgp_error_metadata
from ryu.services.protocols.bgp.base import PREFIX_ERROR_CODE
from ryu.services.protocols.bgp.base import validate
from ryu.services.protocols.bgp.core import BgpCoreError
from ryu.services.protocols.bgp.core_manager import CORE_MANAGER
from ryu.services.protocols.bgp.rtconf.base import ConfigValueError
from ryu.services.protocols.bgp.rtconf.base import RuntimeConfigError
from ryu.services.protocols.bgp.rtconf.vrfs import VRF_RF
from ryu.services.protocols.bgp.rtconf.vrfs import VRF_RF_IPV4
from ryu.services.protocols.bgp.rtconf.vrfs import VRF_RF_L2_EVPN
from ryu.services.protocols.bgp.utils import validation
LOG = logging.getLogger('bgpspeaker.api.prefix')
# Maximum value of the Ethernet Tag ID
EVPN_MAX_ET = EvpnNLRI.MAX_ET
# ESI Types
ESI_TYPE_ARBITRARY = EvpnEsi.ARBITRARY
ESI_TYPE_LACP = EvpnEsi.LACP
ESI_TYPE_L2_BRIDGE = EvpnEsi.L2_BRIDGE
ESI_TYPE_MAC_BASED = EvpnEsi.MAC_BASED
ESI_TYPE_ROUTER_ID = EvpnEsi.ROUTER_ID
ESI_TYPE_AS_BASED = EvpnEsi.AS_BASED
SUPPORTED_ESI_TYPES = [
ESI_TYPE_ARBITRARY,
ESI_TYPE_LACP,
ESI_TYPE_L2_BRIDGE,
ESI_TYPE_MAC_BASED,
ESI_TYPE_ROUTER_ID,
ESI_TYPE_AS_BASED,
]
# Constants used in API calls for EVPN
EVPN_ETH_AUTO_DISCOVERY = EvpnEthernetAutoDiscoveryNLRI.ROUTE_TYPE_NAME
EVPN_MAC_IP_ADV_ROUTE = EvpnMacIPAdvertisementNLRI.ROUTE_TYPE_NAME
EVPN_MULTICAST_ETAG_ROUTE = (
EvpnInclusiveMulticastEthernetTagNLRI.ROUTE_TYPE_NAME)
EVPN_ETH_SEGMENT = EvpnEthernetSegmentNLRI.ROUTE_TYPE_NAME
EVPN_IP_PREFIX_ROUTE = EvpnIpPrefixNLRI.ROUTE_TYPE_NAME
SUPPORTED_EVPN_ROUTE_TYPES = [
EVPN_ETH_AUTO_DISCOVERY,
EVPN_MAC_IP_ADV_ROUTE,
EVPN_MULTICAST_ETAG_ROUTE,
EVPN_ETH_SEGMENT,
EVPN_IP_PREFIX_ROUTE,
]
# Constants used in API calls for Flow Specification
FLOWSPEC_FAMILY_IPV4 = FlowSpecIPv4NLRI.FLOWSPEC_FAMILY
FLOWSPEC_FAMILY_IPV6 = FlowSpecIPv6NLRI.FLOWSPEC_FAMILY
FLOWSPEC_FAMILY_VPNV4 = FlowSpecVPNv4NLRI.FLOWSPEC_FAMILY
FLOWSPEC_FAMILY_VPNV6 = FlowSpecVPNv6NLRI.FLOWSPEC_FAMILY
FLOWSPEC_FAMILY_L2VPN = FlowSpecL2VPNNLRI.FLOWSPEC_FAMILY
SUPPORTED_FLOWSPEC_FAMILIES = (
FLOWSPEC_FAMILY_IPV4,
FLOWSPEC_FAMILY_IPV6,
FLOWSPEC_FAMILY_VPNV4,
FLOWSPEC_FAMILY_VPNV6,
FLOWSPEC_FAMILY_L2VPN,
)
# Constants for the Traffic Filtering Actions of Flow Specification
# Constants for the Traffic Filtering Actions of Flow Specification.
FLOWSPEC_ACTION_TRAFFIC_RATE = BGPFlowSpecTrafficRateCommunity.ACTION_NAME
FLOWSPEC_ACTION_TRAFFIC_ACTION = BGPFlowSpecTrafficActionCommunity.ACTION_NAME
FLOWSPEC_ACTION_REDIRECT = BGPFlowSpecRedirectCommunity.ACTION_NAME
FLOWSPEC_ACTION_TRAFFIC_MARKING = BGPFlowSpecTrafficMarkingCommunity.ACTION_NAME
FLOWSPEC_ACTION_VLAN = BGPFlowSpecVlanActionCommunity.ACTION_NAME
FLOWSPEC_ACTION_TPID = BGPFlowSpecTPIDActionCommunity.ACTION_NAME
SUPPORTTED_FLOWSPEC_ACTIONS = (
FLOWSPEC_ACTION_TRAFFIC_RATE,
FLOWSPEC_ACTION_TRAFFIC_ACTION,
FLOWSPEC_ACTION_REDIRECT,
FLOWSPEC_ACTION_TRAFFIC_MARKING,
FLOWSPEC_ACTION_VLAN,
FLOWSPEC_ACTION_TPID,
)
# Constants for ESI Label extended community
REDUNDANCY_MODE_ALL_ACTIVE = 'all_active'
REDUNDANCY_MODE_SINGLE_ACTIVE = 'single_active'
SUPPORTED_REDUNDANCY_MODES = [
REDUNDANCY_MODE_ALL_ACTIVE,
REDUNDANCY_MODE_SINGLE_ACTIVE,
]
# Constants for BGP Tunnel Encapsulation Attribute
TUNNEL_TYPE_VXLAN = 'vxlan'
TUNNEL_TYPE_NVGRE = 'nvgre'
TUNNEL_TYPE_MPLS = 'mpls'
TUNNEL_TYPE_MPLS_IN_GRE = 'mpls_in_gre'
TUNNEL_TYPE_VXLAN_GRE = 'vxlan_gre'
SUPPORTED_TUNNEL_TYPES = [
TUNNEL_TYPE_VXLAN,
TUNNEL_TYPE_NVGRE,
TUNNEL_TYPE_MPLS,
TUNNEL_TYPE_MPLS_IN_GRE,
TUNNEL_TYPE_VXLAN_GRE,
]
# Constants for PMSI Tunnel Attribute
PMSI_TYPE_NO_TUNNEL_INFO = (
BGPPathAttributePmsiTunnel.TYPE_NO_TUNNEL_INFORMATION_PRESENT
)
PMSI_TYPE_INGRESS_REP = (
BGPPathAttributePmsiTunnel.TYPE_INGRESS_REPLICATION
)
SUPPORTED_PMSI_TUNNEL_TYPES = [
PMSI_TYPE_NO_TUNNEL_INFO,
PMSI_TYPE_INGRESS_REP,
]
@add_bgp_error_metadata(code=PREFIX_ERROR_CODE,
sub_code=1,
def_desc='Unknown error related to operation on '
'prefixes')
class PrefixError(RuntimeConfigError):
pass
@validate(name=PREFIX)
def is_valid_prefix(prefix):
if not (validation.is_valid_ipv4_prefix(prefix)
or validation.is_valid_ipv6_prefix(prefix)):
raise ConfigValueError(conf_name=PREFIX,
conf_value=prefix)
@validate(name=NEXT_HOP)
def is_valid_next_hop(next_hop):
if not (validation.is_valid_ipv4(next_hop)
or validation.is_valid_ipv6(next_hop)):
raise ConfigValueError(conf_name=NEXT_HOP,
conf_value=next_hop)
@validate(name=EVPN_ROUTE_TYPE)
def is_valid_evpn_route_type(route_type):
if route_type not in SUPPORTED_EVPN_ROUTE_TYPES:
raise ConfigValueError(conf_name=EVPN_ROUTE_TYPE,
conf_value=route_type)
@validate(name=EVPN_ESI)
def is_valid_esi(esi):
if not validation.is_valid_esi(esi):
raise ConfigValueError(conf_name=EVPN_ESI,
conf_value=esi)
@validate(name=EVPN_ETHERNET_TAG_ID)
def is_valid_ethernet_tag_id(ethernet_tag_id):
if not validation.is_valid_ethernet_tag_id(ethernet_tag_id):
raise ConfigValueError(conf_name=EVPN_ETHERNET_TAG_ID,
conf_value=ethernet_tag_id)
@validate(name=REDUNDANCY_MODE)
def is_valid_redundancy_mode(redundancy_mode):
if redundancy_mode not in SUPPORTED_REDUNDANCY_MODES:
raise ConfigValueError(conf_name=REDUNDANCY_MODE,
conf_value=redundancy_mode)
@validate(name=MAC_ADDR)
def is_valid_mac_addr(addr):
if not validation.is_valid_mac(addr):
raise ConfigValueError(conf_name=MAC_ADDR,
conf_value=addr)
@validate(name=IP_ADDR)
def is_valid_ip_addr(addr):
# Note: Allows empty IP Address (means length=0).
# e.g.) L2VPN MAC advertisement of Cisco NX-OS
if not (addr is None
or validation.is_valid_ipv4(addr)
or validation.is_valid_ipv6(addr)):
raise ConfigValueError(conf_name=IP_ADDR,
conf_value=addr)
@validate(name=IP_PREFIX)
def is_valid_ip_prefix(prefix):
if not (validation.is_valid_ipv4_prefix(prefix)
or validation.is_valid_ipv6_prefix(prefix)):
raise ConfigValueError(conf_name=IP_PREFIX,
conf_value=prefix)
@validate(name=GW_IP_ADDR)
def is_valid_gw_ip_addr(addr):
if not (validation.is_valid_ipv4(addr)
or validation.is_valid_ipv6(addr)):
raise ConfigValueError(conf_name=GW_IP_ADDR,
conf_value=addr)
@validate(name=MPLS_LABELS)
def is_valid_mpls_labels(labels):
if not validation.is_valid_mpls_labels(labels):
raise ConfigValueError(conf_name=MPLS_LABELS,
conf_value=labels)
@validate(name=EVPN_VNI)
def is_valid_vni(vni):
if not validation.is_valid_vni(vni):
raise ConfigValueError(conf_name=EVPN_VNI,
conf_value=vni)
@validate(name=TUNNEL_TYPE)
def is_valid_tunnel_type(tunnel_type):
if tunnel_type not in SUPPORTED_TUNNEL_TYPES:
raise ConfigValueError(conf_name=TUNNEL_TYPE,
conf_value=tunnel_type)
@validate(name=PMSI_TUNNEL_TYPE)
def is_valid_pmsi_tunnel_type(pmsi_tunnel_type):
if pmsi_tunnel_type not in SUPPORTED_PMSI_TUNNEL_TYPES:
raise ConfigValueError(conf_name=PMSI_TUNNEL_TYPE,
conf_value=pmsi_tunnel_type)
@validate(name=FLOWSPEC_FAMILY)
def is_valid_flowspec_family(flowspec_family):
if flowspec_family not in SUPPORTED_FLOWSPEC_FAMILIES:
raise ConfigValueError(conf_name=FLOWSPEC_FAMILY,
conf_value=flowspec_family)
@validate(name=FLOWSPEC_RULES)
def is_valid_flowspec_rules(rules):
if not isinstance(rules, dict):
raise ConfigValueError(conf_name=FLOWSPEC_RULES,
conf_value=rules)
@validate(name=FLOWSPEC_ACTIONS)
def is_valid_flowspec_actions(actions):
for k in actions:
if k not in SUPPORTTED_FLOWSPEC_ACTIONS:
raise ConfigValueError(conf_name=FLOWSPEC_ACTIONS,
conf_value=actions)
@RegisterWithArgChecks(name='prefix.add_local',
req_args=[ROUTE_DISTINGUISHER, PREFIX, NEXT_HOP],
opt_args=[VRF_RF])
def add_local(route_dist, prefix, next_hop, route_family=VRF_RF_IPV4):
"""Adds *prefix* from VRF identified by *route_dist* and sets the source as
network controller.
"""
try:
# Create new path and insert into appropriate VRF table.
tm = CORE_MANAGER.get_core_service().table_manager
label = tm.update_vrf_table(route_dist, prefix, next_hop, route_family)
# Currently we only allocate one label per local_prefix,
# so we share first label from the list.
if label:
label = label[0]
# Send success response with new label.
return [{ROUTE_DISTINGUISHER: route_dist, PREFIX: prefix,
VRF_RF: route_family, VPN_LABEL: label}]
except BgpCoreError as e:
raise PrefixError(desc=e)
@RegisterWithArgChecks(name='prefix.delete_local',
req_args=[ROUTE_DISTINGUISHER, PREFIX],
opt_args=[VRF_RF])
def delete_local(route_dist, prefix, route_family=VRF_RF_IPV4):
"""Deletes/withdraws *prefix* from VRF identified by *route_dist* and
source as network controller.
"""
try:
tm = CORE_MANAGER.get_core_service().table_manager
tm.update_vrf_table(route_dist, prefix,
route_family=route_family, is_withdraw=True)
# Send success response.
return [{ROUTE_DISTINGUISHER: route_dist, PREFIX: prefix,
VRF_RF: route_family}]
except BgpCoreError as e:
raise PrefixError(desc=e)
# =============================================================================
# BGP EVPN Routes related APIs
# =============================================================================
@RegisterWithArgChecks(name='evpn_prefix.add_local',
req_args=[EVPN_ROUTE_TYPE, ROUTE_DISTINGUISHER,
NEXT_HOP],
opt_args=[EVPN_ESI, EVPN_ETHERNET_TAG_ID,
REDUNDANCY_MODE, MAC_ADDR, IP_ADDR, IP_PREFIX,
GW_IP_ADDR, EVPN_VNI, TUNNEL_TYPE,
PMSI_TUNNEL_TYPE])
def add_evpn_local(route_type, route_dist, next_hop, **kwargs):
"""Adds EVPN route from VRF identified by *route_dist*.
"""
if(route_type in [EVPN_ETH_AUTO_DISCOVERY, EVPN_ETH_SEGMENT]
and kwargs['esi'] == 0):
raise ConfigValueError(conf_name=EVPN_ESI,
conf_value=kwargs['esi'])
try:
# Create new path and insert into appropriate VRF table.
tm = CORE_MANAGER.get_core_service().table_manager
label = tm.update_vrf_table(route_dist, next_hop=next_hop,
route_family=VRF_RF_L2_EVPN,
route_type=route_type, **kwargs)
# Currently we only allocate one label per local route,
# so we share first label from the list.
if label:
label = label[0]
# Send success response with new label.
return [{EVPN_ROUTE_TYPE: route_type,
ROUTE_DISTINGUISHER: route_dist,
VRF_RF: VRF_RF_L2_EVPN,
VPN_LABEL: label}.update(kwargs)]
except BgpCoreError as e:
raise PrefixError(desc=e)
@RegisterWithArgChecks(name='evpn_prefix.delete_local',
req_args=[EVPN_ROUTE_TYPE, ROUTE_DISTINGUISHER],
opt_args=[EVPN_ESI, EVPN_ETHERNET_TAG_ID, MAC_ADDR,
IP_ADDR, IP_PREFIX, EVPN_VNI])
def delete_evpn_local(route_type, route_dist, **kwargs):
"""Deletes/withdraws EVPN route from VRF identified by *route_dist*.
"""
try:
tm = CORE_MANAGER.get_core_service().table_manager
tm.update_vrf_table(route_dist,
route_family=VRF_RF_L2_EVPN,
route_type=route_type, is_withdraw=True, **kwargs)
# Send success response.
return [{EVPN_ROUTE_TYPE: route_type,
ROUTE_DISTINGUISHER: route_dist,
VRF_RF: VRF_RF_L2_EVPN}.update(kwargs)]
except BgpCoreError as e:
raise PrefixError(desc=e)
# =============================================================================
# BGP Flow Specification Routes related APIs
# =============================================================================
@RegisterWithArgChecks(
name='flowspec.add_local',
req_args=[FLOWSPEC_FAMILY, ROUTE_DISTINGUISHER, FLOWSPEC_RULES],
opt_args=[FLOWSPEC_ACTIONS])
def add_flowspec_local(flowspec_family, route_dist, rules, **kwargs):
"""Adds Flow Specification route from VRF identified by *route_dist*.
"""
try:
# Create new path and insert into appropriate VRF table.
tm = CORE_MANAGER.get_core_service().table_manager
tm.update_flowspec_vrf_table(
flowspec_family=flowspec_family, route_dist=route_dist,
rules=rules, **kwargs)
# Send success response.
return [{FLOWSPEC_FAMILY: flowspec_family,
ROUTE_DISTINGUISHER: route_dist,
FLOWSPEC_RULES: rules}.update(kwargs)]
except BgpCoreError as e:
raise PrefixError(desc=e)
@RegisterWithArgChecks(
name='flowspec.del_local',
req_args=[FLOWSPEC_FAMILY, ROUTE_DISTINGUISHER, FLOWSPEC_RULES])
def del_flowspec_local(flowspec_family, route_dist, rules):
"""Deletes/withdraws Flow Specification route from VRF identified
by *route_dist*.
"""
try:
tm = CORE_MANAGER.get_core_service().table_manager
tm.update_flowspec_vrf_table(
flowspec_family=flowspec_family, route_dist=route_dist,
rules=rules, is_withdraw=True)
# Send success response.
return [{FLOWSPEC_FAMILY: flowspec_family,
ROUTE_DISTINGUISHER: route_dist,
FLOWSPEC_RULES: rules}]
except BgpCoreError as e:
raise PrefixError(desc=e)
|
|
#! /usr/bin/env python
__author__ = "Jose Caballero"
__email__ = "jcaballero@bnl.gov"
"""
Code to store and manipulate data.
-------------------------------------------------------------------------------
class StatusInfo
-------------------------------------------------------------------------------
This is the only class implemented that is meant to be public.
The data stored by instances of class StatusInfo must be a list of items.
These items can be anything, including objects.
A typical example is data is a list of HTCondor ClassAds, where each
item in the data list represents an HTCondor job.
Class StatusInfo has several methods to manipulate the data,
but in all cases the output of the method is a new instance of one of the
classes implemented: StatusInfo, _DictStatusInfo, etc.
Methods never modify the current instance data.
This allows to perform different manipulations from the same source object.
There are two types of methods in class StatusInfo:
- methods whose object output accepts further processing.
Examples are methods indexby(), filter(), and map().
- methods whose object output can not be processed anymore.
An attempt to call any method on these instances
will raise an Exception.
Examples are methods reduce(), and process().
The method indexby() is somehow special.
It is being used to split the stored data into a dictionary,
according to whatever rule is provided.
The values of this dictionary are themselves new StatusInfo instances.
Therefore, the output of calling indexby() once is an _DictStatusInfo object
with data:
self.data = {
key1: <StatusInfo>,
key2: <StatusInfo>,
...
keyN: <StatusInfo>
}
-------------------------------------------------------------------------------
The UML source for the classes is as follows:
@startuml
object <|-- _Base
_Base <|-- _BaseDict
_Base <|-- StatusInfo
_Base <|-- _NonMutableStatusInfo
_AnalysisInterface <|-- StatusInfo
_AnalysisInterface <|-- _DictStatusInfo
_BaseDict <|-- _DictStatusInfo
_BaseDict <|-- _NonMutableDictStatusInfo
_GetRawBase <|-- StatusInfo
_GetRawBase <|-- _NonMutableStatusInfo
@enduml
+--------+
| object |
+--------+
^
|
+--------------------+ +-------+
| _AnalysisInterface | +------------------------------->| _Base |<-----------------+
+--------------------+ | +-------+ |
^ ^ | +-------------+ ^ +-----------+
| | | | _GetRawBase | | | _BaseDict |
| | | +-------------+ | +-----------+
| | | ^ ^ | ^ ^
| | | | | | | |
| | | | | | | |
| | | | | | | |
| | | | | | | |
| +------------+ | | +-----------------------+ | |
| | StatusInfo |-------+ +---| _NonMutableStatusInfo | | |
| +------------+ +-----------------------+ | |
| +-----------------+ | +---------------------------+
+------------------------------------| _DictStatusInfo |-----------------------+ | _NonMutableDictStatusInfo |
+-----------------+ +---------------------------+
-------------------------------------------------------------------------------
Analyzers
-------------------------------------------------------------------------------
The input to all methods is an object of type Analyzer.
Analyzers are classes that implement the rules or policies to be used
for each method call.
For example:
- a call to method indexby() expects an object of type AnalyzerIndexBy
- a call to method map() expects an object of type AnalyzerMap
- a call to method reduce() expects an object of type AnalyzerReduce
- etc.
Each Analyzer object must have implemented a method
with the same name that the StatusInfo's method it is intended for.
For exmple:
- classes AnalyzerIndexBy must implement method indexby()
- classes AnalyzerMap must implement method map()
- classes AnalyzerReduce must implement method reduce()
- ...
Passing an analyzer object that does not implement the right method will
raise an IncorrectAnalyzer Exception.
A few basic pre-made Analyzers have been implemented, ready to use.
"""
import copy
import datetime
import inspect
import logging
import logging.handlers
import threading
import time
import traceback
import os
import pwd
import sys
# =============================================================================
# Decorators
#
# Note:
# the decorator must be implemented before the classes using it
# otherwise, they do not find it
# =============================================================================
def validate_call(method):
"""
validates calls to the processing methods.
Checks:
* if the StatusInfo object is mutable or not,
* if a method is being called with the right type of Analyzer
Exceptions are raised with some criteria is not met.
"""
def wrapper(self, analyzer, *k, **kw):
method_name = method.__name__
analyzertype = analyzer.analyzertype
if not analyzertype == method_name:
msg = 'Analyzer object {obj} is not type {name}. Raising exception.'
msg = msg.format(obj = analyzer,
name = method_name)
self.log.error(msg)
raise IncorrectAnalyzer(analyzer, analyzertype, method_name)
out = method(self, analyzer, *k, **kw)
return out
return wrapper
def catch_exception(method):
"""
catches any exception during data processing
and raises an AnalyzerFailure exception
"""
def wrapper(self, analyzer):
try:
out = method(self, analyzer)
except Exception as ex:
msg = 'Exception of type "%s" ' %ex.__class__.__name__
msg += 'with content "%s" ' %ex
msg += 'while calling "%s" ' %method.__name__
msg += 'with analyzer "%s"' %analyzer
raise AnalyzerFailure(msg)
else:
return out
return wrapper
# =============================================================================
# Base classes and interfaces
# =============================================================================
class _Base(object):
def __init__(self, data, timestamp=None):
"""
:param data: the data to be recorded
:param timestamp: the time when this object was created
"""
self.log = logging.getLogger('info')
self.log.addHandler(logging.NullHandler())
msg ='Initializing object with input options: \
data={data}, timestamp={timestamp}'
msg = msg.format(data=data,
timestamp=timestamp)
self.log.debug(msg)
self.data = data
if not timestamp:
timestamp = int(time.time())
msg = 'Setting timestamp to %s' %timestamp
self.log.debug(msg)
self.timestamp = timestamp
self.log.debug('Object initialized')
def get(self, *key_l):
"""
returns the data hosted by the Info object in the
tree structure pointed by all keys
The output is the data, either a dictionary or the original raw list
:param key_l list: list of keys for each nested dictionary
:rtype data:
"""
if len(key_l) == 0:
return self.data
else:
key = key_l[0]
if key not in self.data.keys():
raise MissingKey(key)
data = self.data[key]
return data.get(*key_l[1:])
class _BaseDict(_Base):
"""
adds an extra check for the input data
"""
def __init__(self, data, timestamp=None):
super(_BaseDict, self).__init__(data, timestamp)
if type(self.data) is not dict:
raise IncorrectInputDataType(dict)
def getraw(self):
out = {}
for key, value in self.data.items():
out[key] = value.getraw()
return out
def __getitem__(self, key):
"""
returns the Info object pointed by the key
:param key: the key in the higher level dictionary
:rtype StatusInfo:
"""
if key not in self.data.keys():
raise MissingKey(key)
return self.data[key]
# extra get methods
class _GetRawBase:
def getraw(self):
return self.data
# interfaces
class _AnalysisInterface:
def indexby(self, analyzer):
raise NotImplementedError
def map(self, analyzer):
raise NotImplementedError
def filter(self, analyzer):
raise NotImplementedError
def reduce(self, analyzer):
raise NotImplementedError
def transform(self, analyzer):
raise NotImplementedError
def process(self, analyzer):
raise NotImplementedError
# =============================================================================
# Info class
# =============================================================================
class StatusInfo(_Base, _AnalysisInterface, _GetRawBase):
def __init__(self, data, timestamp=None):
super(StatusInfo, self).__init__(data, timestamp)
if type(self.data) is not list:
msg = 'Input data %s is not a dict. Raising exception' %data
self.log.error(msg)
raise IncorrectInputDataType(list)
def analyze(self, analyzer):
"""
generic method that picks the right one
based on the type of analyzer
:param analyzer: an Analyzer object
:rtype StatusInfo:
"""
self.log.debug('Starting')
if analyzer.analyzertype == 'indexby':
return self.indexby(analyzer)
elif analyzer.analyzertype == 'filter':
return self.filter(analyzer)
elif analyzer.analyzertype == 'map':
return self.map(analyzer)
elif analyzer.analyzertype == 'reduce':
return self.reduce(analyzer)
elif analyzer.analyzertype == 'transform':
return self.transform(analyzer)
elif analyzer.analyzertype == 'process':
return self.process(analyzer)
else:
msg = 'Input object %s is not a valid analyzer. Raising exception.'
self.log.error(msg)
raise NotAnAnalyzer()
def apply_algorithm(self, algorithm):
"""
invoke all steps in an Algorithm object
and returns the final output
:param Algorithm algorithm:
:rtype StatusInfo:
"""
return algorithm.analyze(self)
# -------------------------------------------------------------------------
# methods to manipulate the data
# -------------------------------------------------------------------------
@validate_call
def indexby(self, analyzer):
"""
groups the items recorded in self.data into a dictionary
and creates a new StatusInfo object with it.
1. make a dictinary grouping items according to rules in analyzer
2. convert that dictionary into a dictionary of StatusInfo objects
3. make a new StatusInfo with that dictionary
:param analyzer: an instance of AnalyzerIndexBy-type class
implementing method indexby()
:rtype StatusInfo:
"""
self.log.debug('Starting with analyzer %s' %analyzer)
new_data = self.__indexby(analyzer)
new_info = _DictStatusInfo(new_data, timestamp=self.timestamp)
return new_info
@catch_exception
def __indexby(self, analyzer):
# 1
tmp_new_data = {}
for item in self.data:
key = analyzer.indexby(item)
if key is not None:
if key not in tmp_new_data.keys():
tmp_new_data[key] = []
tmp_new_data[key].append(item)
# 2
new_data = {}
for k, v in tmp_new_data.items():
new_data[k] = StatusInfo(v, timestamp=self.timestamp)
return new_data
# -------------------------------------------------------------------------
@validate_call
def map(self, analyzer):
"""
modifies each item in self.data according to rules
in analyzer
:param analyzer: an instance of AnalyzerMap-type class
implementing method map()
:rtype StatusInfo:
"""
self.log.debug('Starting with analyzer %s' %analyzer)
new_data = self.__map(analyzer)
new_info = StatusInfo(new_data, timestamp=self.timestamp)
return new_info
@catch_exception
def __map(self, analyzer):
new_data = []
for item in self.data:
new_item = analyzer.map(item)
new_data.append(new_item)
return new_data
# -------------------------------------------------------------------------
@validate_call
def filter(self, analyzer):
"""
eliminates the items in self.data that do not pass
the filter implemented in analyzer
:param analyzer: an instance of AnalyzerFilter-type class
implementing method filter()
:rtype StatusInfo:
"""
self.log.debug('Starting with analyzer %s' %analyzer)
new_data = self.__filter(analyzer)
new_info = StatusInfo(new_data, timestamp=self.timestamp)
return new_info
@catch_exception
def __filter(self, analyzer):
new_data = []
for item in self.data:
if analyzer.filter(item):
new_data.append(item)
return new_data
# -------------------------------------------------------------------------
@validate_call
def reduce(self, analyzer):
"""
process the entire self.data at the raw level and accumulate values
:param analyzer: an instance of AnalyzerReduce-type class
implementing method reduce()
:rtype StatusInfo:
"""
self.log.debug('Starting with analyzer %s' %analyzer)
new_data = self.__reduce(analyzer)
new_info = _NonMutableStatusInfo(new_data,
timestamp=self.timestamp)
return new_info
@catch_exception
def __reduce(self, analyzer):
value = analyzer.init_value
for item in self.data:
value = analyzer.reduce(value, item)
return value
# -------------------------------------------------------------------------
@validate_call
def transform(self, analyzer):
"""
process the entire self.data at the raw level
:param analyzer: an instance of AnalyzerTransform-type class
implementing method transform()
:rtype StatusInfo:
"""
self.log.debug('Starting with analyzer %s' %analyzer)
new_data = self.__transform(analyzer)
new_info = StatusInfo(new_data, timestamp=self.timestamp)
return new_info
@catch_exception
def __transform(self, analyzer):
new_data = analyzer.transform(self.data)
return new_data
# -------------------------------------------------------------------------
@validate_call
def process(self, analyzer):
"""
process the entire self.data at the raw level
:param analyzer: an instance of AnalyzerProcess-type class
implementing method process()
:rtype StatusInfo:
"""
self.log.debug('Starting with analyzer %s' %analyzer)
new_data = self.__process(analyzer)
new_info = _NonMutableStatusInfo(new_data, timestamp=self.timestamp)
return new_info
@catch_exception
def __process(self, analyzer):
new_data = analyzer.process(self.data)
return new_data
# =============================================================================
class _DictStatusInfo(_BaseDict, _AnalysisInterface):
# -------------------------------------------------------------------------
# methods to manipulate the data
# -------------------------------------------------------------------------
@validate_call
def indexby(self, analyzer):
new_data = {}
for key, statusinfo in self.data.items():
self.log.debug('calling indexby() for content in key %s'%key)
new_data[key] = statusinfo.indexby(analyzer)
new_info = _DictStatusInfo(new_data, timestamp=self.timestamp)
return new_info
@validate_call
def map(self, analyzer):
new_data = {}
for key, statusinfo in self.data.items():
self.log.debug('calling map() for content in key %s'%key)
new_data[key] = statusinfo.map(analyzer)
new_info = _DictStatusInfo(new_data, timestamp=self.timestamp)
return new_info
@validate_call
def filter(self, analyzer):
new_data = {}
for key, statusinfo in self.data.items():
self.log.debug('calling filter() for content in key %s'%key)
new_data[key] = statusinfo.filter(analyzer)
new_info = _DictStatusInfo(new_data, timestamp=self.timestamp)
return new_info
@validate_call
def reduce(self, analyzer):
new_data = {}
for key, statusinfo in self.data.items():
self.log.debug('calling reduce() for content in key %s'%key)
new_data[key] = statusinfo.reduce(analyzer)
new_info = _NonMutableDictStatusInfo(new_data, timestamp=self.timestamp)
return new_info
@validate_call
def transform(self, analyzer):
new_data = {}
for key, statusinfo in self.data.items():
self.log.debug('calling transform() for content in key %s'%key)
new_data[key] = statusinfo.transform(analyzer)
new_info = _DictStatusInfo(new_data, timestamp=self.timestamp)
return new_info
@validate_call
def process(self, analyzer):
new_data = {}
for key, statusinfo in self.data.items():
self.log.debug('calling process() for content in key %s'%key)
new_data[key] = statusinfo.process(analyzer)
new_info = _NonMutableDictStatusInfo(new_data, timestamp=self.timestamp)
return new_info
class _NonMutableStatusInfo(_Base, _GetRawBase):
pass
class _NonMutableDictStatusInfo(_BaseDict):
pass
# =============================================================================
# Analyzers
# =============================================================================
class Analyzer(object):
pass
class AnalyzerIndexBy(Analyzer):
analyzertype = "indexby"
def indexby(self):
raise NotImplementedError
class AnalyzerFilter(Analyzer):
analyzertype = "filter"
def filter(self):
raise NotImplementedError
class AnalyzerMap(Analyzer):
analyzertype = "map"
def map(self):
raise NotImplementedError
class AnalyzerReduce(Analyzer):
analyzertype = "reduce"
def __init__(self, init_value=None):
self.init_value = init_value
def reduce(self):
raise NotImplementedError
class AnalyzerTransform(Analyzer):
analyzertype = "transform"
def transform(self):
raise NotImplementedError
class AnalyzerProcess(Analyzer):
analyzertype = "process"
def process(self):
raise NotImplementedError
class Algorithm(object):
"""
container for multiple Analyzer objects
"""
def __init__(self):
self.analyzer_l= []
def add(self, analyzer):
self.analyzer_l.append(analyzer)
def analyze(self, input_data):
tmp_out = input_data
for analyzer in self.analyzer_l:
tmp_out = tmp_out.analyze(analyzer)
return tmp_out
# =============================================================================
# Some basic pre-made Analyzers
# =============================================================================
class IndexByKey(AnalyzerIndexBy):
def __init__(self, key):
self.key = key
def indexby(self, job):
try:
return job[self.key]
except Exception:
return None
class IndexByKeyRemap(AnalyzerIndexBy):
def __init__(self, key, mapping_d):
self.key = key
self.mapping_d = mapping_d
def indexby(self, job):
try:
value = str(job[self.key])
except Exception:
return None
if value in self.mapping_d.keys():
return self.mapping_d[value]
else:
return None
class AttributeValue(AnalyzerFilter):
def __init__(self, attribute, value):
self.attribute = attribute
self.value = value
def filter(self, job):
if self.attribute not in job.keys():
msg = 'job {job} does not have key {key}.'
msg = msg.format(job=job,
key=self.attribute)
logmsg = msg + ' Raising Exception.'
self.log.error(logmsg)
raise AnalyzerFailure(msg)
return job[self.attribute] == self.value
class Count(AnalyzerProcess):
def process(self, data):
return len(data)
class TotalRunningTimeFromRunningJobs(AnalyzerReduce):
def __init__(self):
self.now = int(time.time())
super(TotalRunningTimeFromRunningJobs, self).__init__(0)
def reduce(self, value, job):
running = self.now - int(job['enteredcurrentstatus'])
if value:
running += value
return running
class TotalRunningTimeFromRunningAndFinishedJobs(AnalyzerReduce):
def __init__(self):
self.now = int(time.time())
super(TotalRunningTimeFromRunningAndFinishedJobs, self).__init__(0)
def reduce(self, value, job):
if job['jobstatus'] == 2:
running = self.now - int(job['enteredcurrentstatus'])
elif job['jobstatus'] == 3 or \
job['jobstatus'] == 4:
try:
running = int(job['remotewallclocktime'])
except:
# unclear if a finished job that is still in condor_q
# but not yet in condor_history
# has classad remotewallclocktime
running = 0
else:
running = 0
if value:
running += value
return running
class IdleTime(AnalyzerMap):
def __init__(self):
self.now = int(time.time())
def map(self, job):
return self.now - int(job['enteredcurrentstatus'])
class ApplyFunction(AnalyzerProcess):
def __init__(self, func):
self.func = func
def process(self, data):
if data:
return self.func(data)
else:
return None
class CreateANY(AnalyzerTransform):
"""
duplicates the list of jobs,
adding a class MATCH_APF_QUEUE=ANY to the new ones
"""
def transform(self, job_l):
new_job_l = []
for job in job_l:
new_job = copy.copy(job)
new_job['match_apf_queue'] = 'ANY'
new_job_l.append(job)
new_job_l.append(new_job)
return new_job_l
# =============================================================================
# Exceptions
# =============================================================================
class IncorrectInputDataType(Exception):
def __init__(self, type):
self.value = 'Type of input data is not %s' %type
def __str__(self):
return repr(self.value)
class NotAnAnalyzer(Exception):
def __init__(self):
self.value = 'object does not have a valid analyzertype value'
def __str__(self):
return repr(self.value)
class IncorrectAnalyzer(Exception):
def __init__(self, analyzer, analyzertype, methodname):
value = "Analyzer object {ana} is of type '{atype}' but used for '{call}()'"
self.value = value.format(ana=analyzer,
atype=analyzertype,
call=methodname)
def __str__(self):
return repr(self.value)
class MissingKey(Exception):
def __init__(self, key):
self.value = "Key %s is not in the data dictionary" %key
def __str__(self):
return repr(self.value)
class AnalyzerFailure(Exception):
"""
generic Exception for any unclassified failure
"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
# =============================================================================
# class DataItem
# =============================================================================
class DataItem(object):
"""
class to store an arbitrary dictionary,
and read them as they were attributes
"""
def __init__(self, data_d={}, default=0, timestamp=None):
"""
:param dict data_d: input data
:param default: default value to return when the attribute
is being tried to read
is not a key in the dictionary
"""
self.log = logging.getLogger('info')
self.log.addHandler(logging.NullHandler())
msg ='Initializing object with input options: \
data_d={data_d}, default={default}, timestamp={timestamp}'
msg = msg.format(data_d=data_d,
default=default,
timestamp=timestamp)
self.log.debug(msg)
self._data_d = data_d
self._default = default
if not timestamp:
timestamp = int(time.time())
msg = 'Setting timestamp to %s' %timestamp
self.log.debug(msg)
self.timestamp = timestamp
def __getattr__(self, attr):
"""
read the values in the dictionary
as the keys of the dictionary were
attributes of the class.
For example, self.foo allows to read
the content of self.data_d['foo']
"""
return self._data_d.get(attr, self._default)
def __setitem__(self, attr, value):
"""
to allow using [] as if this class were actually a dict.
:param attr: the key
:param value: the value
"""
self._data_d[attr] = value
def __getitem__(self, attr):
"""
to allow using [] as if this class were actually a dict.
:param attr: the key
"""
return self.__getattr__(attr)
def __str__(self):
str_l = []
for pair in self._data_d.items():
s = '%s: %s' %pair
str_l.append(s)
return ', '.join(str_l)
def __repr__(self):
s = str(self)
return s
|
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: xmlrpclib.py
"""
An XML-RPC client interface for Python.
The marshalling and response parser code can also be used to
implement XML-RPC servers.
Exported exceptions:
Error Base class for client errors
ProtocolError Indicates an HTTP protocol error
ResponseError Indicates a broken response package
Fault Indicates an XML-RPC fault package
Exported classes:
ServerProxy Represents a logical connection to an XML-RPC server
MultiCall Executor of boxcared xmlrpc requests
Boolean boolean wrapper to generate a "boolean" XML-RPC value
DateTime dateTime wrapper for an ISO 8601 string or time tuple or
localtime integer value to generate a "dateTime.iso8601"
XML-RPC value
Binary binary data wrapper
SlowParser Slow but safe standard parser (based on xmllib)
Marshaller Generate an XML-RPC params chunk from a Python data structure
Unmarshaller Unmarshal an XML-RPC response from incoming XML event message
Transport Handles an HTTP transaction to an XML-RPC server
SafeTransport Handles an HTTPS transaction to an XML-RPC server
Exported constants:
True
False
Exported functions:
boolean Convert any Python value to an XML-RPC boolean
getparser Create instance of the fastest available parser & attach
to an unmarshalling object
dumps Convert an argument tuple or a Fault instance to an XML-RPC
request (or response, if the methodresponse option is used).
loads Convert an XML-RPC packet to unmarshalled data plus a method
name (None if not present).
"""
import re
import string
import time
import operator
from types import *
import socket
import errno
import httplib
try:
import gzip
except ImportError:
gzip = None
try:
unicode
except NameError:
unicode = None
try:
import datetime
except ImportError:
datetime = None
try:
_bool_is_builtin = False.__class__.__name__ == 'bool'
except NameError:
_bool_is_builtin = 0
def _decode(data, encoding, is8bit=re.compile('[-]').search):
if unicode and encoding and is8bit(data):
data = unicode(data, encoding)
return data
def escape(s, replace=string.replace):
s = replace(s, '&', '&')
s = replace(s, '<', '<')
return replace(s, '>', '>')
if unicode:
def _stringify(string):
try:
return string.encode('ascii')
except UnicodeError:
return string
else:
def _stringify(string):
return string
__version__ = '1.0.1'
MAXINT = 2147483647
MININT = -2147483648
PARSE_ERROR = -32700
SERVER_ERROR = -32600
APPLICATION_ERROR = -32500
SYSTEM_ERROR = -32400
TRANSPORT_ERROR = -32300
NOT_WELLFORMED_ERROR = -32700
UNSUPPORTED_ENCODING = -32701
INVALID_ENCODING_CHAR = -32702
INVALID_XMLRPC = -32600
METHOD_NOT_FOUND = -32601
INVALID_METHOD_PARAMS = -32602
INTERNAL_ERROR = -32603
class Error(Exception):
"""Base class for client errors."""
def __str__(self):
return repr(self)
class ProtocolError(Error):
"""Indicates an HTTP protocol error."""
def __init__(self, url, errcode, errmsg, headers):
Error.__init__(self)
self.url = url
self.errcode = errcode
self.errmsg = errmsg
self.headers = headers
def __repr__(self):
return '<ProtocolError for %s: %s %s>' % (
self.url, self.errcode, self.errmsg)
class ResponseError(Error):
"""Indicates a broken response package."""
pass
class Fault(Error):
"""Indicates an XML-RPC fault package."""
def __init__(self, faultCode, faultString, **extra):
Error.__init__(self)
self.faultCode = faultCode
self.faultString = faultString
def __repr__(self):
return '<Fault %s: %s>' % (
self.faultCode, repr(self.faultString))
from sys import modules
mod_dict = modules[__name__].__dict__
if _bool_is_builtin:
boolean = Boolean = bool
mod_dict['True'] = True
mod_dict['False'] = False
else:
class Boolean:
"""Boolean-value wrapper.
Use True or False to generate a "boolean" XML-RPC value.
"""
def __init__(self, value=0):
self.value = operator.truth(value)
def encode(self, out):
out.write('<value><boolean>%d</boolean></value>\n' % self.value)
def __cmp__(self, other):
if isinstance(other, Boolean):
other = other.value
return cmp(self.value, other)
def __repr__(self):
if self.value:
return '<Boolean True at %x>' % id(self)
else:
return '<Boolean False at %x>' % id(self)
def __int__(self):
return self.value
def __nonzero__(self):
return self.value
mod_dict['True'] = Boolean(1)
mod_dict['False'] = Boolean(0)
def boolean(value, _truefalse=(
False, True)):
"""Convert any Python value to XML-RPC 'boolean'."""
return _truefalse[operator.truth(value)]
del modules
del mod_dict
def _strftime(value):
if datetime:
if isinstance(value, datetime.datetime):
return '%04d%02d%02dT%02d:%02d:%02d' % (
value.year, value.month, value.day,
value.hour, value.minute, value.second)
if not isinstance(value, (TupleType, time.struct_time)):
if value == 0:
value = time.time()
value = time.localtime(value)
return '%04d%02d%02dT%02d:%02d:%02d' % value[:6]
class DateTime:
"""DateTime wrapper for an ISO 8601 string or time tuple or
localtime integer value to generate 'dateTime.iso8601' XML-RPC
value.
"""
def __init__(self, value=0):
if isinstance(value, StringType):
self.value = value
else:
self.value = _strftime(value)
def make_comparable(self, other):
if isinstance(other, DateTime):
s = self.value
o = other.value
elif datetime and isinstance(other, datetime.datetime):
s = self.value
o = other.strftime('%Y%m%dT%H:%M:%S')
elif isinstance(other, (str, unicode)):
s = self.value
o = other
elif hasattr(other, 'timetuple'):
s = self.timetuple()
o = other.timetuple()
else:
otype = hasattr(other, '__class__') and other.__class__.__name__ or type(other)
raise TypeError("Can't compare %s and %s" % (
self.__class__.__name__, otype))
return (s, o)
def __lt__(self, other):
s, o = self.make_comparable(other)
return s < o
def __le__(self, other):
s, o = self.make_comparable(other)
return s <= o
def __gt__(self, other):
s, o = self.make_comparable(other)
return s > o
def __ge__(self, other):
s, o = self.make_comparable(other)
return s >= o
def __eq__(self, other):
s, o = self.make_comparable(other)
return s == o
def __ne__(self, other):
s, o = self.make_comparable(other)
return s != o
def timetuple(self):
return time.strptime(self.value, '%Y%m%dT%H:%M:%S')
def __cmp__(self, other):
s, o = self.make_comparable(other)
return cmp(s, o)
def __str__(self):
return self.value
def __repr__(self):
return '<DateTime %s at %x>' % (repr(self.value), id(self))
def decode(self, data):
data = str(data)
self.value = string.strip(data)
def encode(self, out):
out.write('<value><dateTime.iso8601>')
out.write(self.value)
out.write('</dateTime.iso8601></value>\n')
def _datetime(data):
value = DateTime()
value.decode(data)
return value
def _datetime_type(data):
t = time.strptime(data, '%Y%m%dT%H:%M:%S')
return datetime.datetime(*tuple(t)[:6])
import base64
try:
import cStringIO as StringIO
except ImportError:
import StringIO
class Binary:
"""Wrapper for binary data."""
def __init__(self, data=None):
self.data = data
def __str__(self):
return self.data or ''
def __cmp__(self, other):
if isinstance(other, Binary):
other = other.data
return cmp(self.data, other)
def decode(self, data):
self.data = base64.decodestring(data)
def encode(self, out):
out.write('<value><base64>\n')
base64.encode(StringIO.StringIO(self.data), out)
out.write('</base64></value>\n')
def _binary(data):
value = Binary()
value.decode(data)
return value
WRAPPERS = (
DateTime, Binary)
if not _bool_is_builtin:
WRAPPERS = WRAPPERS + (Boolean,)
try:
import _xmlrpclib
FastParser = _xmlrpclib.Parser
FastUnmarshaller = _xmlrpclib.Unmarshaller
except (AttributeError, ImportError):
FastParser = FastUnmarshaller = None
try:
import _xmlrpclib
FastMarshaller = _xmlrpclib.Marshaller
except (AttributeError, ImportError):
FastMarshaller = None
try:
from xml.parsers import expat
if not hasattr(expat, 'ParserCreate'):
raise ImportError
except ImportError:
ExpatParser = None
else:
class ExpatParser:
def __init__(self, target):
self._parser = parser = expat.ParserCreate(None, None)
self._target = target
parser.StartElementHandler = target.start
parser.EndElementHandler = target.end
parser.CharacterDataHandler = target.data
encoding = None
if not parser.returns_unicode:
encoding = 'utf-8'
target.xml(encoding, None)
return
def feed(self, data):
self._parser.Parse(data, 0)
def close(self):
self._parser.Parse('', 1)
del self._target
del self._parser
class SlowParser:
"""Default XML parser (based on xmllib.XMLParser)."""
def __init__(self, target):
import xmllib
if xmllib.XMLParser not in SlowParser.__bases__:
SlowParser.__bases__ = (
xmllib.XMLParser,)
self.handle_xml = target.xml
self.unknown_starttag = target.start
self.handle_data = target.data
self.handle_cdata = target.data
self.unknown_endtag = target.end
try:
xmllib.XMLParser.__init__(self, accept_utf8=1)
except TypeError:
xmllib.XMLParser.__init__(self)
class Marshaller:
"""Generate an XML-RPC params chunk from a Python data structure.
Create a Marshaller instance for each set of parameters, and use
the "dumps" method to convert your data (represented as a tuple)
to an XML-RPC params chunk. To write a fault response, pass a
Fault instance instead. You may prefer to use the "dumps" module
function for this purpose.
"""
def __init__(self, encoding=None, allow_none=0):
self.memo = {}
self.data = None
self.encoding = encoding
self.allow_none = allow_none
return
dispatch = {}
def dumps(self, values):
out = []
write = out.append
dump = self.__dump
if isinstance(values, Fault):
write('<fault>\n')
dump({'faultCode': values.faultCode,'faultString': values.faultString
}, write)
write('</fault>\n')
else:
write('<params>\n')
for v in values:
write('<param>\n')
dump(v, write)
write('</param>\n')
write('</params>\n')
result = string.join(out, '')
return result
def __dump(self, value, write):
try:
f = self.dispatch[type(value)]
except KeyError:
try:
value.__dict__
except:
raise TypeError, 'cannot marshal %s objects' % type(value)
for type_ in type(value).__mro__:
if type_ in self.dispatch.keys():
raise TypeError, 'cannot marshal %s objects' % type(value)
f = self.dispatch[InstanceType]
f(self, value, write)
def dump_nil(self, value, write):
if not self.allow_none:
raise TypeError, 'cannot marshal None unless allow_none is enabled'
write('<value><nil/></value>')
dispatch[NoneType] = dump_nil
def dump_int(self, value, write):
if value > MAXINT or value < MININT:
raise OverflowError, 'int exceeds XML-RPC limits'
write('<value><int>')
write(str(value))
write('</int></value>\n')
dispatch[IntType] = dump_int
if _bool_is_builtin:
def dump_bool(self, value, write):
write('<value><boolean>')
write(value and '1' or '0')
write('</boolean></value>\n')
dispatch[bool] = dump_bool
def dump_long(self, value, write):
if value > MAXINT or value < MININT:
raise OverflowError, 'long int exceeds XML-RPC limits'
write('<value><int>')
write(str(int(value)))
write('</int></value>\n')
dispatch[LongType] = dump_long
def dump_double(self, value, write):
write('<value><double>')
write(repr(value))
write('</double></value>\n')
dispatch[FloatType] = dump_double
def dump_string(self, value, write, escape=escape):
write('<value><string>')
write(escape(value))
write('</string></value>\n')
dispatch[StringType] = dump_string
if unicode:
def dump_unicode(self, value, write, escape=escape):
value = value.encode(self.encoding)
write('<value><string>')
write(escape(value))
write('</string></value>\n')
dispatch[UnicodeType] = dump_unicode
def dump_array(self, value, write):
i = id(value)
if i in self.memo:
raise TypeError, 'cannot marshal recursive sequences'
self.memo[i] = None
dump = self.__dump
write('<value><array><data>\n')
for v in value:
dump(v, write)
write('</data></array></value>\n')
del self.memo[i]
return
dispatch[TupleType] = dump_array
dispatch[ListType] = dump_array
def dump_struct(self, value, write, escape=escape):
i = id(value)
if i in self.memo:
raise TypeError, 'cannot marshal recursive dictionaries'
self.memo[i] = None
dump = self.__dump
write('<value><struct>\n')
for k, v in value.items():
write('<member>\n')
if type(k) is not StringType:
if unicode and type(k) is UnicodeType:
k = k.encode(self.encoding)
else:
raise TypeError, 'dictionary key must be string'
write('<name>%s</name>\n' % escape(k))
dump(v, write)
write('</member>\n')
write('</struct></value>\n')
del self.memo[i]
return
dispatch[DictType] = dump_struct
if datetime:
def dump_datetime(self, value, write):
write('<value><dateTime.iso8601>')
write(_strftime(value))
write('</dateTime.iso8601></value>\n')
dispatch[datetime.datetime] = dump_datetime
def dump_instance(self, value, write):
if value.__class__ in WRAPPERS:
self.write = write
value.encode(self)
del self.write
else:
self.dump_struct(value.__dict__, write)
dispatch[InstanceType] = dump_instance
class Unmarshaller:
"""Unmarshal an XML-RPC response, based on incoming XML event
messages (start, data, end). Call close() to get the resulting
data structure.
Note that this reader is fairly tolerant, and gladly accepts bogus
XML-RPC data without complaining (but not bogus XML).
"""
def __init__(self, use_datetime=0):
self._type = None
self._stack = []
self._marks = []
self._data = []
self._methodname = None
self._encoding = 'utf-8'
self.append = self._stack.append
self._use_datetime = use_datetime
if use_datetime and not datetime:
raise ValueError, 'the datetime module is not available'
return
def close(self):
if self._type is None or self._marks:
raise ResponseError()
if self._type == 'fault':
raise Fault(**self._stack[0])
return tuple(self._stack)
def getmethodname(self):
return self._methodname
def xml(self, encoding, standalone):
self._encoding = encoding
def start(self, tag, attrs):
if tag == 'array' or tag == 'struct':
self._marks.append(len(self._stack))
self._data = []
self._value = tag == 'value'
def data(self, text):
self._data.append(text)
def end(self, tag, join=string.join):
try:
f = self.dispatch[tag]
except KeyError:
pass
else:
return f(self, join(self._data, ''))
def end_dispatch(self, tag, data):
try:
f = self.dispatch[tag]
except KeyError:
pass
else:
return f(self, data)
dispatch = {}
def end_nil(self, data):
self.append(None)
self._value = 0
return
dispatch['nil'] = end_nil
def end_boolean(self, data):
if data == '0':
self.append(False)
elif data == '1':
self.append(True)
else:
raise TypeError, 'bad boolean value'
self._value = 0
dispatch['boolean'] = end_boolean
def end_int(self, data):
self.append(int(data))
self._value = 0
dispatch['i4'] = end_int
dispatch['i8'] = end_int
dispatch['int'] = end_int
def end_double(self, data):
self.append(float(data))
self._value = 0
dispatch['double'] = end_double
def end_string(self, data):
if self._encoding:
data = _decode(data, self._encoding)
self.append(_stringify(data))
self._value = 0
dispatch['string'] = end_string
dispatch['name'] = end_string
def end_array(self, data):
mark = self._marks.pop()
self._stack[mark:] = [
self._stack[mark:]]
self._value = 0
dispatch['array'] = end_array
def end_struct(self, data):
mark = self._marks.pop()
dict = {}
items = self._stack[mark:]
for i in range(0, len(items), 2):
dict[_stringify(items[i])] = items[i + 1]
self._stack[mark:] = [
dict]
self._value = 0
dispatch['struct'] = end_struct
def end_base64(self, data):
value = Binary()
value.decode(data)
self.append(value)
self._value = 0
dispatch['base64'] = end_base64
def end_dateTime(self, data):
value = DateTime()
value.decode(data)
if self._use_datetime:
value = _datetime_type(data)
self.append(value)
dispatch['dateTime.iso8601'] = end_dateTime
def end_value(self, data):
if self._value:
self.end_string(data)
dispatch['value'] = end_value
def end_params(self, data):
self._type = 'params'
dispatch['params'] = end_params
def end_fault(self, data):
self._type = 'fault'
dispatch['fault'] = end_fault
def end_methodName(self, data):
if self._encoding:
data = _decode(data, self._encoding)
self._methodname = data
self._type = 'methodName'
dispatch['methodName'] = end_methodName
class _MultiCallMethod:
def __init__(self, call_list, name):
self.__call_list = call_list
self.__name = name
def __getattr__(self, name):
return _MultiCallMethod(self.__call_list, '%s.%s' % (self.__name, name))
def __call__(self, *args):
self.__call_list.append((self.__name, args))
class MultiCallIterator:
"""Iterates over the results of a multicall. Exceptions are
thrown in response to xmlrpc faults."""
def __init__(self, results):
self.results = results
def __getitem__(self, i):
item = self.results[i]
if type(item) == type({}):
raise Fault(item['faultCode'], item['faultString'])
else:
if type(item) == type([]):
return item[0]
raise ValueError, 'unexpected type in multicall result'
class MultiCall:
"""server -> a object used to boxcar method calls
server should be a ServerProxy object.
Methods can be added to the MultiCall using normal
method call syntax e.g.:
multicall = MultiCall(server_proxy)
multicall.add(2,3)
multicall.get_address("Guido")
To execute the multicall, call the MultiCall object e.g.:
add_result, address = multicall()
"""
def __init__(self, server):
self.__server = server
self.__call_list = []
def __repr__(self):
return '<MultiCall at %x>' % id(self)
__str__ = __repr__
def __getattr__(self, name):
return _MultiCallMethod(self.__call_list, name)
def __call__(self):
marshalled_list = []
for name, args in self.__call_list:
marshalled_list.append({'methodName': name,'params': args})
return MultiCallIterator(self.__server.system.multicall(marshalled_list))
def getparser(use_datetime=0):
"""getparser() -> parser, unmarshaller
Create an instance of the fastest available parser, and attach it
to an unmarshalling object. Return both objects.
"""
if use_datetime and not datetime:
raise ValueError, 'the datetime module is not available'
if FastParser and FastUnmarshaller:
if use_datetime:
mkdatetime = _datetime_type
else:
mkdatetime = _datetime
target = FastUnmarshaller(True, False, _binary, mkdatetime, Fault)
parser = FastParser(target)
else:
target = Unmarshaller(use_datetime=use_datetime)
if FastParser:
parser = FastParser(target)
elif ExpatParser:
parser = ExpatParser(target)
else:
parser = SlowParser(target)
return (
parser, target)
def dumps(params, methodname=None, methodresponse=None, encoding=None, allow_none=0):
"""data [,options] -> marshalled data
Convert an argument tuple or a Fault instance to an XML-RPC
request (or response, if the methodresponse option is used).
In addition to the data object, the following options can be given
as keyword arguments:
methodname: the method name for a methodCall packet
methodresponse: true to create a methodResponse packet.
If this option is used with a tuple, the tuple must be
a singleton (i.e. it can contain only one element).
encoding: the packet encoding (default is UTF-8)
All 8-bit strings in the data structure are assumed to use the
packet encoding. Unicode strings are automatically converted,
where necessary.
"""
if isinstance(params, Fault):
methodresponse = 1
elif methodresponse and isinstance(params, TupleType):
pass
if not encoding:
encoding = 'utf-8'
if FastMarshaller:
m = FastMarshaller(encoding)
else:
m = Marshaller(encoding, allow_none)
data = m.dumps(params)
if encoding != 'utf-8':
xmlheader = "<?xml version='1.0' encoding='%s'?>\n" % str(encoding)
else:
xmlheader = "<?xml version='1.0'?>\n"
if methodname:
if not isinstance(methodname, StringType):
methodname = methodname.encode(encoding)
data = (xmlheader,
'<methodCall>\n<methodName>',
methodname, '</methodName>\n',
data,
'</methodCall>\n')
elif methodresponse:
data = (
xmlheader,
'<methodResponse>\n',
data,
'</methodResponse>\n')
else:
return data
return string.join(data, '')
def loads(data, use_datetime=0):
"""data -> unmarshalled data, method name
Convert an XML-RPC packet to unmarshalled data plus a method
name (None if not present).
If the XML-RPC packet represents a fault condition, this function
raises a Fault exception.
"""
p, u = getparser(use_datetime=use_datetime)
p.feed(data)
p.close()
return (
u.close(), u.getmethodname())
def gzip_encode(data):
"""data -> gzip encoded data
Encode data using the gzip content encoding as described in RFC 1952
"""
if not gzip:
raise NotImplementedError
f = StringIO.StringIO()
gzf = gzip.GzipFile(mode='wb', fileobj=f, compresslevel=1)
gzf.write(data)
gzf.close()
encoded = f.getvalue()
f.close()
return encoded
def gzip_decode(data):
"""gzip encoded data -> unencoded data
Decode data using the gzip content encoding as described in RFC 1952
"""
if not gzip:
raise NotImplementedError
f = StringIO.StringIO(data)
gzf = gzip.GzipFile(mode='rb', fileobj=f)
try:
decoded = gzf.read()
except IOError:
raise ValueError('invalid data')
f.close()
gzf.close()
return decoded
class GzipDecodedResponse(gzip.GzipFile if gzip else object):
"""a file-like object to decode a response encoded with the gzip
method, as described in RFC 1952.
"""
def __init__(self, response):
if not gzip:
raise NotImplementedError
self.stringio = StringIO.StringIO(response.read())
gzip.GzipFile.__init__(self, mode='rb', fileobj=self.stringio)
def close(self):
gzip.GzipFile.close(self)
self.stringio.close()
class _Method:
def __init__(self, send, name):
self.__send = send
self.__name = name
def __getattr__(self, name):
return _Method(self.__send, '%s.%s' % (self.__name, name))
def __call__(self, *args):
return self.__send(self.__name, args)
class Transport:
"""Handles an HTTP transaction to an XML-RPC server."""
user_agent = 'xmlrpclib.py/%s (by www.pythonware.com)' % __version__
accept_gzip_encoding = True
encode_threshold = None
def __init__(self, use_datetime=0):
self._use_datetime = use_datetime
self._connection = (None, None)
self._extra_headers = []
return None
def request(self, host, handler, request_body, verbose=0):
for i in (0, 1):
try:
return self.single_request(host, handler, request_body, verbose)
except socket.error as e:
if i or e.errno not in (errno.ECONNRESET, errno.ECONNABORTED, errno.EPIPE):
raise
except httplib.BadStatusLine:
if i:
raise
def single_request(self, host, handler, request_body, verbose=0):
h = self.make_connection(host)
if verbose:
h.set_debuglevel(1)
try:
self.send_request(h, handler, request_body)
self.send_host(h, host)
self.send_user_agent(h)
self.send_content(h, request_body)
response = h.getresponse(buffering=True)
if response.status == 200:
self.verbose = verbose
return self.parse_response(response)
except Fault:
raise
except Exception:
self.close()
raise
if response.getheader('content-length', 0):
response.read()
raise ProtocolError(host + handler, response.status, response.reason, response.msg)
def getparser(self):
return getparser(use_datetime=self._use_datetime)
def get_host_info(self, host):
x509 = {}
if isinstance(host, TupleType):
host, x509 = host
import urllib
auth, host = urllib.splituser(host)
if auth:
import base64
auth = base64.encodestring(urllib.unquote(auth))
auth = string.join(string.split(auth), '')
extra_headers = [
(
'Authorization', 'Basic ' + auth)]
else:
extra_headers = None
return (
host, extra_headers, x509)
def make_connection(self, host):
if self._connection and host == self._connection[0]:
return self._connection[1]
chost, self._extra_headers, x509 = self.get_host_info(host)
self._connection = (
host, httplib.HTTPConnection(chost))
return self._connection[1]
def close(self):
if self._connection[1]:
self._connection[1].close()
self._connection = (None, None)
return None
def send_request(self, connection, handler, request_body):
if self.accept_gzip_encoding and gzip:
connection.putrequest('POST', handler, skip_accept_encoding=True)
connection.putheader('Accept-Encoding', 'gzip')
else:
connection.putrequest('POST', handler)
def send_host(self, connection, host):
extra_headers = self._extra_headers
if extra_headers:
if isinstance(extra_headers, DictType):
extra_headers = extra_headers.items()
for key, value in extra_headers:
connection.putheader(key, value)
def send_user_agent(self, connection):
connection.putheader('User-Agent', self.user_agent)
def send_content(self, connection, request_body):
connection.putheader('Content-Type', 'text/xml')
if self.encode_threshold is not None and self.encode_threshold < len(request_body) and gzip:
connection.putheader('Content-Encoding', 'gzip')
request_body = gzip_encode(request_body)
connection.putheader('Content-Length', str(len(request_body)))
connection.endheaders(request_body)
return
def parse_response(self, response):
if hasattr(response, 'getheader'):
if response.getheader('Content-Encoding', '') == 'gzip':
stream = GzipDecodedResponse(response)
else:
stream = response
else:
stream = response
p, u = self.getparser()
while 1:
data = stream.read(1024)
if not data:
break
if self.verbose:
print 'body:', repr(data)
p.feed(data)
if stream is not response:
stream.close()
p.close()
return u.close()
class SafeTransport(Transport):
"""Handles an HTTPS transaction to an XML-RPC server."""
def make_connection(self, host):
if self._connection and host == self._connection[0]:
return self._connection[1]
else:
try:
HTTPS = httplib.HTTPSConnection
except AttributeError:
raise NotImplementedError("your version of httplib doesn't support HTTPS")
else:
chost, self._extra_headers, x509 = self.get_host_info(host)
self._connection = (host, HTTPS(chost, None, **(x509 or {})))
return self._connection[1]
return
class ServerProxy:
"""uri [,options] -> a logical connection to an XML-RPC server
uri is the connection point on the server, given as
scheme://host/target.
The standard implementation always supports the "http" scheme. If
SSL socket support is available (Python 2.0), it also supports
"https".
If the target part and the slash preceding it are both omitted,
"/RPC2" is assumed.
The following options can be given as keyword arguments:
transport: a transport factory
encoding: the request encoding (default is UTF-8)
All 8-bit strings passed to the server proxy are assumed to use
the given encoding.
"""
def __init__(self, uri, transport=None, encoding=None, verbose=0, allow_none=0, use_datetime=0):
import urllib
type, uri = urllib.splittype(uri)
if type not in ('http', 'https'):
raise IOError, 'unsupported XML-RPC protocol'
self.__host, self.__handler = urllib.splithost(uri)
if not self.__handler:
self.__handler = '/RPC2'
if transport is None:
if type == 'https':
transport = SafeTransport(use_datetime=use_datetime)
else:
transport = Transport(use_datetime=use_datetime)
self.__transport = transport
self.__encoding = encoding
self.__verbose = verbose
self.__allow_none = allow_none
return
def __close(self):
self.__transport.close()
def __request(self, methodname, params):
request = dumps(params, methodname, encoding=self.__encoding, allow_none=self.__allow_none)
response = self.__transport.request(self.__host, self.__handler, request, verbose=self.__verbose)
if len(response) == 1:
response = response[0]
return response
def __repr__(self):
return '<ServerProxy for %s%s>' % (
self.__host, self.__handler)
__str__ = __repr__
def __getattr__(self, name):
return _Method(self.__request, name)
def __call__(self, attr):
"""A workaround to get special attributes on the ServerProxy
without interfering with the magic __getattr__
"""
if attr == 'close':
return self.__close
if attr == 'transport':
return self.__transport
raise AttributeError('Attribute %r not found' % (attr,))
Server = ServerProxy
if __name__ == '__main__':
server = ServerProxy('http://time.xmlrpc.com/RPC2')
print server
try:
print server.currentTime.getCurrentTime()
except Error as v:
print 'ERROR',
print v
multi = MultiCall(server)
multi.currentTime.getCurrentTime()
multi.currentTime.getCurrentTime()
try:
for response in multi():
print response
except Error as v:
print 'ERROR', v
|
|
from celery.schedules import crontab
import djcelery
from django.conf.global_settings import EMAIL_BACKEND
import os, sys, logging
import subprocess
###############################
# MISC #
##############################
ROOT_PATH = os.path.dirname(__file__)
def to_absolute_path(path):
return os.path.realpath(os.path.join(ROOT_PATH, path))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DONATION_DEBUG = True #So we don't have to rely on django's debug.
BLOCK_FB_POSTS = True
#ROOT_PATH = os.path.dirname(__file__)
EXTRA_PATHS = [
'lib',
]
for path in EXTRA_PATHS:
path = to_absolute_path(path)
if path not in sys.path:
sys.path.append(path)
PROXY_SERVER = "PROXY_SERVER"
IGNORE_HTTPS = False
###############################
# CAMPAIGN SETTINGS #
##############################
MAX_PAYMENT_RETRIES = 1
PAYMENT_RETRY_SCHEDULE = [1, 3, 7]
JUMOEIN = ""
###############################
# ADMIN SETTINGS #
##############################
ADMINS = (
('Jumo Site Error', 'EMAIL@HERE'),
)
MANAGERS = ADMINS
###############################
# STATIC SETTINGS #
##############################
SERVE_STATIC_FILES = False
STATIC_URL = ''
NO_STATIC_HASH = False
###############################
# DB SETTINGS #
##############################
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'jumodjango',
'USER': 'jumo',
'PASSWORD': 'DB_PASSWORD',
'HOST': '',
'PORT': '',
},
}
#Map the db name to path of matching schema file.
DATABASE_CREATE_SCHEMAS = {
'default':to_absolute_path('data/schema/jumodjango.schema'),
}
###############################
# SOLR SETTINGS #
##############################
SOLR_CONN = 'http://SOLRSERVER:8983/solr'
###############################
# DISQUS SETTINGS #
##############################
DISQUS_API_VERSION = '3.0'
DISQUS_FORUM_NAME = 'jumoprod'
DISQUS_SECRET_KEY = 'SOME_DISQUS_SECRET_KEY' #jumo_prod_app
DISQUS_PUBLIC_KEY = 'SOME_DISQUS_PUBLIC_KEY' #jumo_prod_app
DISQUS_DEV_MODE = 0 # 1 for dev, 0 for prod and stage
###############################
# EMAIL SETTINGS #
##############################
DEFAULT_FROM_EMAIL = 'FROM@USER'
EMAIL_HOST = ''
EMAIL_PORT = 25
EMAIL_HOST_USER = 'EMAIL@HOSTUSER'
EMAIL_HOST_PASSWORD = 'SOME_EMAIL_HOST_PASSWORD'
EMAIL_USER_TLS = False
CELERY_EMAIL_BACKEND = EMAIL_BACKEND
EMAIL_REAL_PEOPLE = False
CRYPTO_SECRET = r'CRYPTO_SECRET_HERE'
###############################
# CELERY SETTINGS #
##############################
# AMQP setup for Celery
BROKER_HOST = ""
BROKER_PORT = 5672
BROKER_USER = "jumo"
BROKER_PASSWORD = "SOME_BROKER_PASSWORD"
BROKER_VHOST = "/"
CELERY_DEFAULT_QUEUE = "now"
CELERY_QUEUES = {
"now": {
"binding_key": "task.#",
},
"deferred": {
"binding_key": "deferred.#",
},
"billing": {
"binding_key": "billing.#",
},
}
CELERY_DEFAULT_EXCHANGE = "tasks"
CELERY_DEFAULT_EXCHANGE_TYPE = "topic"
CELERY_DEFAULT_ROUTING_KEY = "task.default"
CELERY_ROUTES = {"mailer.reader_tasks.send_jumo_reader_email":
{"queue": "deferred",
"routing_key": "deferred.reader"
},
"donation.tasks.process_donation":
{"queue": "billing",
"routing_key": "billing.process_donation"}
}
CELERY_IMPORTS = ('mailer.notification_tasks',
'mailer.reader_tasks',
'donation.tasks',
'mailer.messager_tasks',)
###############################
# DJANGO SETTINGS #
##############################
CONSOLE_MIDDLEWARE_DEBUGGER = True
APPEND_SLASH = False
#SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db'
CACHE_BACKEND = 'memcached://127.0.0.1:11211?timeout=86400'
AUTHENTICATION_BACKENDS = (
'etc.backend.JumoBackend',
)
TIME_ZONE = 'America/New_York'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1337
USE_I18N = True
USE_L10N = True
MEDIA_ROOT = to_absolute_path('static')
MEDIA_URL = '/static/'
ADMIN_MEDIA_PREFIX = '/static/media/admin/'
HTTP_HOST = 'www.ogbon.com'
SECRET_KEY = 'SOME_SECRET_KEY_HERE'
MIDDLEWARE_CLASSES = (
'etc.middleware.SSLMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
#'django.contrib.auth.middleware.AuthenticationMiddleware',
#'django.contrib.messages.middleware.MessageMiddleware',
'etc.middleware.DetectUserMiddleware',
'etc.middleware.SourceTagCollectionMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
'etc.middleware.AddExceptionMessageMiddleware',
)
ROOT_URLCONF = 'urls'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
TEMPLATE_DIRS = (
to_absolute_path('templates'),
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.debug',
'django.core.context_processors.auth',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.request',
'etc.context_processors.general',
)
INSTALLED_APPS = (
'grappelli',
'djcelery',
'django.contrib.auth',
'django.contrib.contenttypes',
#'django.contrib.sessions',
'django.contrib.sites',
#'django.contrib.messages',
'django.contrib.admin',
'django.contrib.humanize',
'cust_admin',
'users',
'issue',
'org',
'data',
'cust_admin',
'etc',
'api',
'lib',
'search',
'utils',
'mailer',
'donation',
'message',
'sourcing',
'popularity',
'django_jenkins',
'tastypie',
'action',
'entity_items',
'commitment',
'debug_toolbar',
'discovery',
)
###############################
# API SETTINGS #
##############################
API_VERSION = 'v1'
###############################
# TESTING SETTINGS #
##############################
FIXTURE_DIRS = ("data/fixtures/",)
TEST_RUNNER = 'jumodjango.test.test_runner.JumoTestSuiteRunner'
JENKINS_TEST_RUNNER = 'jumodjango.test.test_runner.JumoTestSuiteRunner'
EXCLUDED_TEST_PACKAGES = ['django',]
PROJECT_APPS = (
'users',
'issue',
'org',
'mailer',
'donation',
'message',
'sourcing',
'popularity',
)
###############################
# API KEY SETTINGS #
##############################
MIXPANEL_TOKEN = 'SOME_MIXPANEL_TOKEN'
FACEBOOK_APP_ID = 'SOME_FACEBOOK_APP_ID'
FACEBOOK_API_KEY = 'SOME_FACEBOOK_API_KEY'
FACEBOOK_SECRET = 'SOME_FACEBOOK_SECRET'
FACEBOOK_ACCESS_TOKEN = 'SOME_FACEBOOK_ACCESS_TOKEN'
AWS_ACCESS_KEY = 'SOME_AWS_ACCESS_KEY'
AWS_SECRET_KEY = 'SOME_AWS_SECRET'
AWS_PHOTO_UPLOAD_BUCKET = "jumoimgs"
###############################################################
# DATAMINE SETTINGS - serve miner.views if IS_DATAMINE is True
###############################################################
IS_DATAMINE = False
###############################
# DATA SCIENCE TOOLKIT SETTINGS
###############################
# Use their AMI in production,
DSTK_API_BASE = "http://DSTK_HOST"
##############################
# DATAMINE SERVER
##############################
DATAMINE_BASE = "http://DATAMINE_HOST"
##############################
# LOGGER SETTINGS
##############################
LOG_DIR = '/cloud/logs/'
###############################
# DEBUG TOOLBAR SETTINGS
###############################
INTERNAL_IPS = ('127.0.0.1',)
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
'SHOW_TOOLBAR_CALLBACK': lambda x: False
}
DEBUG_TOOLBAR_PANELS = (
'debug_toolbar.panels.version.VersionDebugPanel',
'debug_toolbar.panels.timer.TimerDebugPanel',
'debug_toolbar.panels.settings_vars.SettingsVarsDebugPanel',
'debug_toolbar.panels.headers.HeaderDebugPanel',
'debug_toolbar.panels.request_vars.RequestVarsDebugPanel',
'debug_toolbar.panels.template.TemplateDebugPanel',
'debug_toolbar.panels.sql.SQLDebugPanel',
'debug_toolbar.panels.signals.SignalDebugPanel',
'debug_toolbar.panels.logger.LoggingPanel',
)
###############################
# LOCAL SETTINGS #
##############################
try:
from local_settings import *
except ImportError:
pass
if NO_STATIC_HASH:
ASSET_HASH = 'abcdefg'
else:
import git
repo = git.Repo(to_absolute_path('.'), odbt=git.GitCmdObjectDB)
ASSET_HASH = repo.head.commit.hexsha[0:7]
del(repo)
if IS_DATAMINE:
INSTALLED_APPS += ('miner',
'gunicorn')
RELATED_SEARCH_MODEL_BASE_DIR = '/cloud/data'
LOG_LEVEL = logging.DEBUG if DEBUG else logging.INFO
LOG_FORMAT = '%(asctime)s %(levelname)s %(message)s'
logging.basicConfig(level=LOG_LEVEL, format=LOG_FORMAT)
log = logging.getLogger('jumo')
|
|
from __future__ import print_function, absolute_import, division
import pytest
import warnings
from astropy.io import fits
import numpy as np
from ..wcs_utils import (WCS, drop_axis, wcs_swapaxes, add_stokes_axis_to_wcs,
axis_names, slice_wcs, check_equality, strip_wcs_from_header)
from . import path
def test_wcs_dropping():
wcs = WCS(naxis=4)
wcs.wcs.pc = np.zeros([4, 4])
np.fill_diagonal(wcs.wcs.pc, np.arange(1, 5))
pc = wcs.wcs.pc # for later use below
dropped = drop_axis(wcs, 0)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([2, 3, 4]))
dropped = drop_axis(wcs, 1)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 3, 4]))
dropped = drop_axis(wcs, 2)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 2, 4]))
dropped = drop_axis(wcs, 3)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 2, 3]))
wcs = WCS(naxis=4)
wcs.wcs.cd = pc
dropped = drop_axis(wcs, 0)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([2, 3, 4]))
dropped = drop_axis(wcs, 1)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 3, 4]))
dropped = drop_axis(wcs, 2)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 2, 4]))
dropped = drop_axis(wcs, 3)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 2, 3]))
def test_wcs_swapping():
wcs = WCS(naxis=4)
wcs.wcs.pc = np.zeros([4, 4])
np.fill_diagonal(wcs.wcs.pc, np.arange(1, 5))
pc = wcs.wcs.pc # for later use below
swapped = wcs_swapaxes(wcs, 0, 1)
assert np.all(swapped.wcs.get_pc().diagonal() == np.array([2, 1, 3, 4]))
swapped = wcs_swapaxes(wcs, 0, 3)
assert np.all(swapped.wcs.get_pc().diagonal() == np.array([4, 2, 3, 1]))
swapped = wcs_swapaxes(wcs, 2, 3)
assert np.all(swapped.wcs.get_pc().diagonal() == np.array([1, 2, 4, 3]))
wcs = WCS(naxis=4)
wcs.wcs.cd = pc
swapped = wcs_swapaxes(wcs, 0, 1)
assert np.all(swapped.wcs.get_pc().diagonal() == np.array([2, 1, 3, 4]))
swapped = wcs_swapaxes(wcs, 0, 3)
assert np.all(swapped.wcs.get_pc().diagonal() == np.array([4, 2, 3, 1]))
swapped = wcs_swapaxes(wcs, 2, 3)
assert np.all(swapped.wcs.get_pc().diagonal() == np.array([1, 2, 4, 3]))
def test_add_stokes():
wcs = WCS(naxis=3)
for ii in range(4):
outwcs = add_stokes_axis_to_wcs(wcs, ii)
assert outwcs.wcs.naxis == 4
def test_axis_names(data_adv, data_vad):
wcs = WCS(str(data_adv))
assert axis_names(wcs) == ['RA', 'DEC', 'VOPT']
wcs = WCS(str(data_vad))
assert axis_names(wcs) == ['VOPT', 'RA', 'DEC']
def test_wcs_slice():
wcs = WCS(naxis=3)
wcs.wcs.crpix = [50., 45., 30.]
wcs_new = slice_wcs(wcs, (slice(10,20), slice(None), slice(20,30)))
np.testing.assert_allclose(wcs_new.wcs.crpix, [30., 45., 20.])
def test_wcs_slice_reversal():
wcs = WCS(naxis=3)
wcs.wcs.crpix = [50., 45., 30.]
wcs.wcs.crval = [0., 0., 0.]
wcs.wcs.cdelt = [1., 1., 1.]
wcs_new = slice_wcs(wcs, (slice(None, None, -1), slice(None), slice(None)),
shape=[100., 150., 200.])
spaxis = wcs.sub([0]).wcs_pix2world(np.arange(100), 0)
new_spaxis = wcs_new.sub([0]).wcs_pix2world(np.arange(100), 0)
np.testing.assert_allclose(spaxis, new_spaxis[::-1])
def test_reversal_roundtrip():
wcs = WCS(naxis=3)
wcs.wcs.crpix = [50., 45., 30.]
wcs.wcs.crval = [0., 0., 0.]
wcs.wcs.cdelt = [1., 1., 1.]
wcs_new = slice_wcs(wcs, (slice(None, None, -1), slice(None), slice(None)),
shape=[100., 150., 200.])
spaxis = wcs.sub([0]).wcs_pix2world(np.arange(100), 0)
new_spaxis = wcs_new.sub([0]).wcs_pix2world(np.arange(100), 0)
np.testing.assert_allclose(spaxis, new_spaxis[::-1])
re_reverse = slice_wcs(wcs_new, (slice(None, None, -1), slice(None), slice(None)),
shape=[100., 150., 200.])
new_spaxis = re_reverse.sub([0]).wcs_pix2world(np.arange(100), 0)
np.testing.assert_allclose(spaxis, new_spaxis[::-1])
#These are NOT equal, but they are equivalent: CRVAL and CRPIX are shifted
#by an acceptable amount
# assert check_equality(wcs, re_reverse)
re_re_reverse = slice_wcs(re_reverse, (slice(None, None, -1), slice(None),
slice(None)),
shape=[100., 150., 200.])
re_re_re_reverse = slice_wcs(re_re_reverse, (slice(None, None, -1),
slice(None), slice(None)),
shape=[100., 150., 200.])
assert check_equality(re_re_re_reverse, re_reverse)
def test_wcs_comparison():
wcs1 = WCS(naxis=3)
wcs1.wcs.crpix = np.array([50., 45., 30.], dtype='float32')
wcs2 = WCS(naxis=3)
wcs2.wcs.crpix = np.array([50., 45., 30.], dtype='float64')
wcs3 = WCS(naxis=3)
wcs3.wcs.crpix = np.array([50., 45., 31.], dtype='float64')
wcs4 = WCS(naxis=3)
wcs4.wcs.crpix = np.array([50., 45., 30.0001], dtype='float64')
assert check_equality(wcs1,wcs2)
assert not check_equality(wcs1,wcs3)
assert check_equality(wcs1, wcs3, wcs_tolerance=1.0e1)
assert not check_equality(wcs1,wcs4)
assert check_equality(wcs1, wcs4, wcs_tolerance=1e-3)
@pytest.mark.parametrize('fn', ('cubewcs1.hdr', 'cubewcs2.hdr'))
def test_strip_wcs(fn):
header1 = fits.Header.fromtextfile(path(fn))
header1_stripped = strip_wcs_from_header(header1)
with open(path(fn),'r') as fh:
hdrlines = fh.readlines()
newfn = fn.replace('.hdr', '_blanks.hdr')
hdrlines.insert(-20,"\n")
hdrlines.insert(-1,"\n")
with open(path(newfn),'w') as fh:
fh.writelines(hdrlines)
header2 = fits.Header.fromtextfile(path(newfn))
header2_stripped = strip_wcs_from_header(header2)
assert header1_stripped == header2_stripped
def test_wcs_slice_unmatched_celestial():
wcs = WCS(naxis=3)
wcs.wcs.ctype = ['RA---TAN', 'DEC--TAN', 'FREQ']
wcs.wcs.crpix = [50., 45., 30.]
# drop RA
with warnings.catch_warnings(record=True) as wrn:
wcs_new = drop_axis(wcs, 0)
assert 'is being removed' in str(wrn[-1].message)
# drop Dec
with warnings.catch_warnings(record=True) as wrn:
wcs_new = drop_axis(wcs, 1)
assert 'is being removed' in str(wrn[-1].message)
with warnings.catch_warnings(record=True) as wrn:
wcs_new = slice_wcs(wcs, (slice(10,20), 0, slice(20,30)),
drop_degenerate=True)
assert 'is being removed' in str(wrn[-1].message)
def test_wcs_downsampling():
"""
Regression tests for #525
These are a series of simple tests I verified with pen and paper, but it's
always worth checking me again.
"""
wcs = WCS(naxis=1)
wcs.wcs.ctype = ['FREQ',]
wcs.wcs.crpix = [1.,]
nwcs = slice_wcs(wcs, slice(0, None, 1))
assert nwcs.wcs.crpix[0] == 1
nwcs = slice_wcs(wcs, slice(0, None, 2))
assert nwcs.wcs.crpix[0] == 0.75
nwcs = slice_wcs(wcs, slice(0, None, 4))
assert nwcs.wcs.crpix[0] == 0.625
nwcs = slice_wcs(wcs, slice(2, None, 1))
assert nwcs.wcs.crpix[0] == -1
nwcs = slice_wcs(wcs, slice(2, None, 2))
assert nwcs.wcs.crpix[0] == -0.25
nwcs = slice_wcs(wcs, slice(2, None, 4))
assert nwcs.wcs.crpix[0] == 0.125
|
|
"""Module/script to byte-compile all .py files to .pyc (or .pyo) files.
When called as a script with arguments, this compiles the directories
given as arguments recursively; the -l option prevents it from
recursing into directories.
Without arguments, if compiles all modules on sys.path, without
recursing into subdirectories. (Even though it should do so for
packages -- for now, you'll have to deal with packages separately.)
See module py_compile for details of the actual byte-compilation.
"""
import os
import sys
import errno
import importlib.util
import py_compile
import struct
__all__ = ["compile_dir","compile_file","compile_path"]
def compile_dir(dir, maxlevels=10, ddir=None, force=False, rx=None,
quiet=False, legacy=False, optimize=-1):
"""Byte-compile all modules in the given directory tree.
Arguments (only dir is required):
dir: the directory to byte-compile
maxlevels: maximum recursion level (default 10)
ddir: the directory that will be prepended to the path to the
file as it is compiled into each byte-code file.
force: if True, force compilation, even if timestamps are up-to-date
quiet: if True, be quiet during compilation
legacy: if True, produce legacy pyc paths instead of PEP 3147 paths
optimize: optimization level or -1 for level of the interpreter
"""
if not quiet:
print('Listing {!r}...'.format(dir))
try:
names = os.listdir(dir)
except OSError:
print("Can't list {!r}".format(dir))
names = []
names.sort()
success = 1
for name in names:
if name == '__pycache__':
continue
fullname = os.path.join(dir, name)
if ddir is not None:
dfile = os.path.join(ddir, name)
else:
dfile = None
if not os.path.isdir(fullname):
if not compile_file(fullname, ddir, force, rx, quiet,
legacy, optimize):
success = 0
elif (maxlevels > 0 and name != os.curdir and name != os.pardir and
os.path.isdir(fullname) and not os.path.islink(fullname)):
if not compile_dir(fullname, maxlevels - 1, dfile, force, rx,
quiet, legacy, optimize):
success = 0
return success
def compile_file(fullname, ddir=None, force=False, rx=None, quiet=False,
legacy=False, optimize=-1):
"""Byte-compile one file.
Arguments (only fullname is required):
fullname: the file to byte-compile
ddir: if given, the directory name compiled in to the
byte-code file.
force: if True, force compilation, even if timestamps are up-to-date
quiet: if True, be quiet during compilation
legacy: if True, produce legacy pyc paths instead of PEP 3147 paths
optimize: optimization level or -1 for level of the interpreter
"""
success = 1
name = os.path.basename(fullname)
if ddir is not None:
dfile = os.path.join(ddir, name)
else:
dfile = None
if rx is not None:
mo = rx.search(fullname)
if mo:
return success
if os.path.isfile(fullname):
if legacy:
cfile = fullname + ('c' if __debug__ else 'o')
else:
if optimize >= 0:
cfile = importlib.util.cache_from_source(
fullname, debug_override=not optimize)
else:
cfile = importlib.util.cache_from_source(fullname)
cache_dir = os.path.dirname(cfile)
head, tail = name[:-3], name[-3:]
if tail == '.py':
if not force:
try:
mtime = int(os.stat(fullname).st_mtime)
expect = struct.pack('<4sl', importlib.util.MAGIC_NUMBER,
mtime)
with open(cfile, 'rb') as chandle:
actual = chandle.read(8)
if expect == actual:
return success
except OSError:
pass
if not quiet:
print('Compiling {!r}...'.format(fullname))
try:
ok = py_compile.compile(fullname, cfile, dfile, True,
optimize=optimize)
except py_compile.PyCompileError as err:
if quiet:
print('*** Error compiling {!r}...'.format(fullname))
else:
print('*** ', end='')
# escape non-printable characters in msg
msg = err.msg.encode(sys.stdout.encoding,
errors='backslashreplace')
msg = msg.decode(sys.stdout.encoding)
print(msg)
success = 0
except (SyntaxError, UnicodeError, OSError) as e:
if quiet:
print('*** Error compiling {!r}...'.format(fullname))
else:
print('*** ', end='')
print(e.__class__.__name__ + ':', e)
success = 0
else:
if ok == 0:
success = 0
return success
def compile_path(skip_curdir=1, maxlevels=0, force=False, quiet=False,
legacy=False, optimize=-1):
"""Byte-compile all module on sys.path.
Arguments (all optional):
skip_curdir: if true, skip current directory (default True)
maxlevels: max recursion level (default 0)
force: as for compile_dir() (default False)
quiet: as for compile_dir() (default False)
legacy: as for compile_dir() (default False)
optimize: as for compile_dir() (default -1)
"""
success = 1
for dir in sys.path:
if (not dir or dir == os.curdir) and skip_curdir:
print('Skipping current directory')
else:
success = success and compile_dir(dir, maxlevels, None,
force, quiet=quiet,
legacy=legacy, optimize=optimize)
return success
def main():
"""Script main program."""
import argparse
parser = argparse.ArgumentParser(
description='Utilities to support installing Python libraries.')
parser.add_argument('-l', action='store_const', const=0,
default=10, dest='maxlevels',
help="don't recurse into subdirectories")
parser.add_argument('-f', action='store_true', dest='force',
help='force rebuild even if timestamps are up to date')
parser.add_argument('-q', action='store_true', dest='quiet',
help='output only error messages')
parser.add_argument('-b', action='store_true', dest='legacy',
help='use legacy (pre-PEP3147) compiled file locations')
parser.add_argument('-d', metavar='DESTDIR', dest='ddir', default=None,
help=('directory to prepend to file paths for use in '
'compile-time tracebacks and in runtime '
'tracebacks in cases where the source file is '
'unavailable'))
parser.add_argument('-x', metavar='REGEXP', dest='rx', default=None,
help=('skip files matching the regular expression; '
'the regexp is searched for in the full path '
'of each file considered for compilation'))
parser.add_argument('-i', metavar='FILE', dest='flist',
help=('add all the files and directories listed in '
'FILE to the list considered for compilation; '
'if "-", names are read from stdin'))
parser.add_argument('compile_dest', metavar='FILE|DIR', nargs='*',
help=('zero or more file and directory names '
'to compile; if no arguments given, defaults '
'to the equivalent of -l sys.path'))
args = parser.parse_args()
compile_dests = args.compile_dest
if (args.ddir and (len(compile_dests) != 1
or not os.path.isdir(compile_dests[0]))):
parser.exit('-d destdir requires exactly one directory argument')
if args.rx:
import re
args.rx = re.compile(args.rx)
# if flist is provided then load it
if args.flist:
try:
with (sys.stdin if args.flist=='-' else open(args.flist)) as f:
for line in f:
compile_dests.append(line.strip())
except OSError:
print("Error reading file list {}".format(args.flist))
return False
success = True
try:
if compile_dests:
for dest in compile_dests:
if os.path.isfile(dest):
if not compile_file(dest, args.ddir, args.force, args.rx,
args.quiet, args.legacy):
success = False
else:
if not compile_dir(dest, args.maxlevels, args.ddir,
args.force, args.rx, args.quiet,
args.legacy):
success = False
return success
else:
return compile_path(legacy=args.legacy, force=args.force,
quiet=args.quiet)
except KeyboardInterrupt:
print("\n[interrupted]")
return False
return True
if __name__ == '__main__':
exit_status = int(not main())
sys.exit(exit_status)
|
|
"""
sentry.testutils.cases
~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
__all__ = (
'TestCase', 'TransactionTestCase', 'APITestCase', 'AuthProviderTestCase',
'RuleTestCase', 'PermissionTestCase', 'PluginTestCase', 'CliTestCase',
'AcceptanceTestCase',
)
import base64
import os
import os.path
import pytest
import urllib
from click.testing import CliRunner
from contextlib import contextmanager
from django.conf import settings
from django.contrib.auth import login
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.http import HttpRequest
from django.test import TestCase, TransactionTestCase
from django.utils.importlib import import_module
from exam import before, fixture, Exam
from rest_framework.test import APITestCase as BaseAPITestCase
from sentry import auth
from sentry.auth.providers.dummy import DummyProvider
from sentry.constants import MODULE_ROOT
from sentry.models import GroupMeta, ProjectOption
from sentry.plugins import plugins
from sentry.rules import EventState
from sentry.utils import json
from .fixtures import Fixtures
from .helpers import AuthProvider, Feature, get_auth_header, TaskRunner, override_options
class BaseTestCase(Fixtures, Exam):
urls = 'sentry.web.urls'
def assertRequiresAuthentication(self, path, method='GET'):
resp = getattr(self.client, method.lower())(path)
assert resp.status_code == 302
assert resp['Location'].startswith('http://testserver' + reverse('sentry-login'))
@before
def setup_dummy_auth_provider(self):
auth.register('dummy', DummyProvider)
self.addCleanup(auth.unregister, 'dummy', DummyProvider)
@before
def setup_session(self):
engine = import_module(settings.SESSION_ENGINE)
session = engine.SessionStore()
session.save()
self.session = session
def tasks(self):
return TaskRunner()
def feature(self, name, active=True):
"""
>>> with self.feature('feature:name')
>>> # ...
"""
return Feature(name, active)
def auth_provider(self, name, cls):
"""
>>> with self.auth_provider('name', Provider)
>>> # ...
"""
return AuthProvider(name, cls)
def save_session(self):
self.session.save()
cookie_data = {
'max-age': None,
'path': '/',
'domain': settings.SESSION_COOKIE_DOMAIN,
'secure': settings.SESSION_COOKIE_SECURE or None,
'expires': None,
}
session_cookie = settings.SESSION_COOKIE_NAME
self.client.cookies[session_cookie] = self.session.session_key
self.client.cookies[session_cookie].update(cookie_data)
def login_as(self, user):
user.backend = settings.AUTHENTICATION_BACKENDS[0]
request = HttpRequest()
request.session = self.session
login(request, user)
request.user = user
# Save the session values.
self.save_session()
def load_fixture(self, filepath):
filepath = os.path.join(
MODULE_ROOT,
'tests',
'fixtures',
filepath,
)
with open(filepath, 'rb') as fp:
return fp.read()
def _pre_setup(self):
super(BaseTestCase, self)._pre_setup()
cache.clear()
ProjectOption.objects.clear_local_cache()
GroupMeta.objects.clear_local_cache()
def _post_teardown(self):
super(BaseTestCase, self)._post_teardown()
def _makeMessage(self, data):
return json.dumps(data)
def _makePostMessage(self, data):
return base64.b64encode(self._makeMessage(data))
def _postWithHeader(self, data, key=None, secret=None, protocol=None):
if key is None:
key = self.projectkey.public_key
secret = self.projectkey.secret_key
message = self._makePostMessage(data)
with self.tasks():
resp = self.client.post(
reverse('sentry-api-store'), message,
content_type='application/octet-stream',
HTTP_X_SENTRY_AUTH=get_auth_header(
'_postWithHeader/0.0.0',
key,
secret,
protocol,
),
)
return resp
def _postCspWithHeader(self, data, key=None, **extra):
if isinstance(data, dict):
body = json.dumps({'csp-report': data})
elif isinstance(data, basestring):
body = data
path = reverse('sentry-api-csp-report', kwargs={'project_id': self.project.id})
path += '?sentry_key=%s' % self.projectkey.public_key
with self.tasks():
return self.client.post(
path, data=body,
content_type='application/csp-report',
HTTP_USER_AGENT='awesome',
**extra
)
def _getWithReferer(self, data, key=None, referer='getsentry.com', protocol='4'):
if key is None:
key = self.projectkey.public_key
headers = {}
if referer is not None:
headers['HTTP_REFERER'] = referer
message = self._makeMessage(data)
qs = {
'sentry_version': protocol,
'sentry_client': 'raven-js/lol',
'sentry_key': key,
'sentry_data': message,
}
with self.tasks():
resp = self.client.get(
'%s?%s' % (reverse('sentry-api-store', args=(self.project.pk,)), urllib.urlencode(qs)),
**headers
)
return resp
def _postWithReferer(self, data, key=None, referer='getsentry.com', protocol='4'):
if key is None:
key = self.projectkey.public_key
headers = {}
if referer is not None:
headers['HTTP_REFERER'] = referer
message = self._makeMessage(data)
qs = {
'sentry_version': protocol,
'sentry_client': 'raven-js/lol',
'sentry_key': key,
}
with self.tasks():
resp = self.client.post(
'%s?%s' % (reverse('sentry-api-store', args=(self.project.pk,)), urllib.urlencode(qs)),
data=message,
content_type='application/json',
**headers
)
return resp
def options(self, options):
"""
A context manager that temporarily sets a global option and reverts
back to the original value when exiting the context.
"""
return override_options(options)
@contextmanager
def dsn(self, dsn):
"""
A context manager that temporarily sets the internal client's DSN
"""
from raven.contrib.django.models import client
try:
client.set_dsn(dsn)
yield
finally:
client.set_dsn(None)
_postWithSignature = _postWithHeader
_postWithNewSignature = _postWithHeader
class TestCase(BaseTestCase, TestCase):
pass
class TransactionTestCase(BaseTestCase, TransactionTestCase):
pass
class APITestCase(BaseTestCase, BaseAPITestCase):
pass
class AuthProviderTestCase(TestCase):
provider = DummyProvider
provider_name = 'dummy'
def setUp(self):
super(AuthProviderTestCase, self).setUp()
# TestCase automatically sets up dummy provider
if self.provider_name != 'dummy' or self.provider != DummyProvider:
auth.register(self.provider_name, self.provider)
self.addCleanup(auth.unregister, self.provider_name, self.provider)
class RuleTestCase(TestCase):
rule_cls = None
def get_event(self):
return self.event
def get_rule(self, data=None):
return self.rule_cls(
project=self.project,
data=data or {},
)
def get_state(self, **kwargs):
kwargs.setdefault('is_new', True)
kwargs.setdefault('is_regression', True)
kwargs.setdefault('is_sample', True)
kwargs.setdefault('rule_is_active', False)
kwargs.setdefault('rule_last_active', None)
return EventState(**kwargs)
def assertPasses(self, rule, event=None, **kwargs):
if event is None:
event = self.event
state = self.get_state(**kwargs)
assert rule.passes(event, state) is True
def assertDoesNotPass(self, rule, event=None, **kwargs):
if event is None:
event = self.event
state = self.get_state(**kwargs)
assert rule.passes(event, state) is False
class PermissionTestCase(TestCase):
def setUp(self):
super(PermissionTestCase, self).setUp()
self.owner = self.create_user(is_superuser=False)
self.organization = self.create_organization(
owner=self.owner,
flags=0, # disable default allow_joinleave access
)
self.team = self.create_team(organization=self.organization)
def assert_can_access(self, user, path, method='GET'):
self.login_as(user)
resp = getattr(self.client, method.lower())(path)
assert resp.status_code >= 200 and resp.status_code < 300
def assert_cannot_access(self, user, path, method='GET'):
self.login_as(user)
resp = getattr(self.client, method.lower())(path)
assert resp.status_code >= 300
def assert_member_can_access(self, path):
user = self.create_user(is_superuser=False)
self.create_member(
user=user, organization=self.organization,
role='member', teams=[self.team],
)
self.assert_can_access(user, path)
def assert_teamless_member_can_access(self, path):
user = self.create_user(is_superuser=False)
self.create_member(
user=user, organization=self.organization,
role='member', teams=[],
)
self.assert_can_access(user, path)
def assert_member_cannot_access(self, path):
user = self.create_user(is_superuser=False)
self.create_member(
user=user, organization=self.organization,
role='member', teams=[self.team],
)
self.assert_cannot_access(user, path)
def assert_teamless_member_cannot_access(self, path):
user = self.create_user(is_superuser=False)
self.create_member(
user=user, organization=self.organization,
role='member', teams=[],
)
self.assert_cannot_access(user, path)
def assert_team_admin_can_access(self, path):
user = self.create_user(is_superuser=False)
self.create_member(
user=user, organization=self.organization,
teams=[self.team], role='admin',
)
self.assert_can_access(user, path)
def assert_teamless_admin_can_access(self, path):
user = self.create_user(is_superuser=False)
self.create_member(
user=user, organization=self.organization,
role='admin', teams=[],
)
self.assert_can_access(user, path)
def assert_team_admin_cannot_access(self, path):
user = self.create_user(is_superuser=False)
self.create_member(
user=user, organization=self.organization,
teams=[self.team], role='admin',
)
self.assert_cannot_access(user, path)
def assert_teamless_admin_cannot_access(self, path):
user = self.create_user(is_superuser=False)
self.create_member(
user=user, organization=self.organization,
role='admin', teams=[],
)
self.assert_cannot_access(user, path)
def assert_team_owner_can_access(self, path):
user = self.create_user(is_superuser=False)
self.create_member(
user=user, organization=self.organization,
teams=[self.team], role='owner',
)
self.assert_can_access(user, path)
def assert_owner_can_access(self, path):
user = self.create_user(is_superuser=False)
self.create_member(
user=user, organization=self.organization,
role='owner', teams=[self.team],
)
self.assert_can_access(user, path)
def assert_owner_cannot_access(self, path):
user = self.create_user(is_superuser=False)
self.create_member(
user=user, organization=self.organization,
role='owner', teams=[self.team],
)
self.assert_cannot_access(user, path)
def assert_non_member_cannot_access(self, path):
user = self.create_user(is_superuser=False)
self.assert_cannot_access(user, path)
class PluginTestCase(TestCase):
plugin = None
def setUp(self):
super(PluginTestCase, self).setUp()
plugins.register(self.plugin)
self.addCleanup(plugins.unregister, self.plugin)
class CliTestCase(TestCase):
runner = fixture(CliRunner)
command = None
default_args = []
def invoke(self, *args):
args += tuple(self.default_args)
return self.runner.invoke(self.command, args, obj={})
@pytest.mark.usefixtures('browser')
class AcceptanceTestCase(TransactionTestCase):
def save_session(self):
self.session.save()
self.browser.save_cookie(
name=settings.SESSION_COOKIE_NAME,
value=self.session.session_key,
)
|
|
'''
Created on 2009/07/17
@author: kamiya
'''
import sys, os
import itertools
import unittest
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from pyrem_torq.treeseq import *
from pyrem_torq.expression import *
def mc4la_to_readable(r):
if r is None: return None
nodes, literals, epsilon = r.nodes, r.literals, r.emptyseq
return (sorted(nodes) if nodes is not ANY_ITEM else None), (sorted(literals) if literals is not ANY_ITEM else None), epsilon
class TestTorqExpression(unittest.TestCase):
def test1st(self):
expr = Literal('ab')
seq = [ 'text', 0, 'ab' ]
posDelta, outSeq = expr.match(seq, 1)
self.assertEqual(1 + posDelta, len(seq))
self.assertEqual(outSeq, [ 0, 'ab' ])
def testSeq(self):
expr = Seq(Literal('a'), Literal('b'))
seq = [ 'text', 0, 'a', 1, 'b' ]
posDelta, outSeq = expr.match(seq, 1)
self.assertEqual(1 + posDelta, len(seq))
self.assertEqual(outSeq, [ 0, 'a', 1, 'b' ])
def testOr(self):
expr = Or(Literal('a'), Literal('b'))
seq = [ 'text', 0, 'a' ]
posDelta, outSeq = expr.match(seq, 1)
self.assertEqual(1 + posDelta, len(seq))
self.assertEqual(outSeq, [ 0, 'a' ])
seq = [ 'text', 0, 'b' ]
posDelta, outSeq = expr.match(seq, 1)
self.assertEqual(1 + posDelta, len(seq))
self.assertEqual(outSeq, [ 0, 'b' ])
def testRepeat(self):
expr = Repeat(Literal('a'), 3, 3)
seq = [ 'text', 0, 'a', 1, 'a', 2, 'a' ]
posDelta, outSeq = expr.match(seq, 1)
self.assertEqual(1 + posDelta, len(seq))
self.assertEqual(outSeq, [ 0, 'a', 1, 'a', 2, 'a' ])
seq = [ 'text', 0, 'a', 1, 'a' ]
posDelta, outSeq = expr.match(seq, 1)
self.assertEqual(posDelta, 0)
expr = Repeat(Literal('a'), 0, 3)
seq = [ 'text' ]
for i in xrange(10): seq.extend(( i, 'a' ))
posDelta, outSeq = expr.match(seq, 1)
self.assertEqual(posDelta, 6)
self.assertEqual(outSeq, [ 0, 'a', 1, 'a', 2, 'a' ])
seq = [ 'text', 0, 'a', 1, 'a' ]
posDelta, outSeq = expr.match(seq, 1)
self.assertEqual(posDelta, 4)
self.assertEqual(outSeq, [ 0, 'a', 1, 'a' ])
expr = Repeat(Literal('a'), 0, None)
seq = [ 'text' ]
posDelta, outSeq = expr.match(seq, 1)
self.assertEqual(posDelta, 0)
self.assertFalse(outSeq)
seq = [ 'text', 0, 'a', 1, 'a' ]
posDelta, outSeq = expr.match(seq, 1)
self.assertEqual(posDelta, 4)
self.assertEqual(outSeq, [ 0, 'a', 1, 'a' ])
def testNodeMatch(self):
expr = NodeMatch('A', Literal("a"))
seq = [ 'text', [ 'A', 0, 'a' ] ]
posDelta, outSeq = expr.match(seq, 1)
self.assertEqual(posDelta, 1)
self.assertEqual(outSeq, [ [ 'A', 0, 'a' ] ])
def testNode(self):
expr = Node('A')
seq = [ 'text', [ 'A', 0, 'p', 1, 'a', 2, 'b', 3, 'q' ] ]
posDelta, outSeq = expr.match(seq, 1)
self.assertEqual(posDelta, 1)
self.assertEqual(outSeq, [ [ 'A', 0, 'p', 1, 'a', 2, 'b', 3, 'q' ] ])
def testAny(self):
expr = Any()
seq = [ 'text', [ 'A', 0, 'p', 1, 'a', 2, 'b', 3, 'q' ] ]
posDelta, outSeq = expr.match(seq, 1)
self.assertEqual(posDelta, 1)
self.assertEqual(outSeq, [ [ 'A', 0, 'p', 1, 'a', 2, 'b', 3, 'q' ] ])
seq = [ 'text', 0, 'a' ]
posDelta, outSeq = expr.match(seq, 1)
self.assertEqual(posDelta, 2)
self.assertEqual(outSeq, [ 0, 'a' ])
# def testLiteralClass(self):
# expr = LiteralClass(("a", "b", "c"))
# seq = [ 'text', 0, 'a' ]
#
# posDelta, outSeq = expr.match(seq, 1)
# self.assertEqual(posDelta, 2)
# self.assertEqual(outSeq, [ 0, 'a' ])
#
# seq = [ 'text', 0, 'b' ]
#
# posDelta, outSeq = expr.match(seq, 1)
# self.assertEqual(posDelta, 2)
# self.assertEqual(outSeq, [ 0, 'b' ])
#
# seq = [ 'text', 0, 'p' ]
#
# posDelta, outSeq = expr.match(seq, 1)
# self.assertEqual(posDelta, 0)
def testRex(self):
expr = Rex(r"^[a-c]$")
seq = [ 'text', 0, 'a' ]
posDelta, outSeq = expr.match(seq, 1)
self.assertEqual(posDelta, 2)
self.assertEqual(outSeq, [ 0, 'a' ])
seq = [ 'text', 0, 'd' ]
posDelta, outSeq = expr.match(seq, 1)
self.assertFalse(posDelta)
seq = [ 'text', 0, u'a' ]
posDelta, outSeq = expr.match(seq, 1)
self.assertEqual(posDelta, 2)
self.assertEqual(outSeq, [ 0, u'a' ])
def testEpsilonConcatination(self):
expr1 = Epsilon()
expr2 = Literal("fuga")
expr1_2 = expr1 + expr2
self.assertNotEqual(expr1_2, Literal("fuga"))
def testSelection(self):
exprs = map(Literal, [ "a", "b", "c" ])
expr = exprs[0] | exprs[1] | exprs[2]
self.assertEqual(expr, Or(Literal("a"), Literal("b"), Literal("c")))
def testConcatLiteal(self):
exprs = map(Literal, [ "a", "b", "c" ])
expr = exprs[0] + exprs[1] + exprs[2]
self.assertEqual(expr, Seq(Literal("a"), Literal("b"), Literal("c")))
def testIdentifier(self):
idExpr = Literal('_') + [0,]*(Literal('_') | \
Rex(r"^[a-zA-Z]") | \
Rex(r"^[0-9]")) | \
Rex(r"^[a-zA-Z]") + [0,]*(Literal('_') | Rex(r"^[a-zA-Z]") | Rex(r"^[0-9]"))
seq = [ 'text', 0, 'abc' ]
posDelta, outSeq = idExpr.match(seq, 1)
self.assertEqual(posDelta, 2)
self.assertEqual(outSeq, [ 0, 'abc' ])
def testRepeat2(self):
expr = Repeat(InsertNode('hoge'), 3, 3)
seq = [ 'text' ]
posDelta, outSeq = expr.match(seq, 1)
self.assertEqual(posDelta, 0)
self.assertEqual(outSeq, [ [ 'hoge' ], [ 'hoge' ], [ 'hoge' ] ])
expr = Repeat(InsertNode('hoge'), 3, 3) + Literal('a')
seq = [ 'text', 0, 'a' ]
posDelta, outSeq = expr.match(seq, 1)
self.assertEqual(posDelta, 2)
self.assertEqual(outSeq, [ [ 'hoge' ], [ 'hoge' ], [ 'hoge' ], 0, 'a' ])
def testAnyLiteral(self):
expr = Repeat(AnyLiteral(), 0, None)
seq = [ 'text', 0, "a", 1, "b", 2, "c" ]
posDelta, outSeq = expr.match(seq, 1)
self.assertEqual(posDelta, 6)
self.assertEqual(outSeq, [ 0, "a", 1, "b", 2, "c" ])
def testAnyNode(self):
expr = Repeat(AnyNode(), 0, None)
seq = [ 'text', [ 'a' ], [ 'b' ], [ 'c' ] ]
posDelta, outSeq = expr.match(seq, 1)
self.assertEqual(posDelta, 3)
self.assertEqual(outSeq, [ [ 'a' ], [ 'b' ], [ 'c' ] ])
seq = [ 'code', 0, 'a', 1, 'b', 2, 'c' ]
posDelta, outSeq = expr.match(seq, 1)
self.assertEqual(posDelta, 0)
def testAnyNodeMatch(self):
expr = Repeat(AnyNodeMatch(Literal('X')), 0, None)
seq = [ 'text', [ 'a', 0, 'X' ], [ 'b', 1, 'X' ], [ 'c', 2, 'Y' ] ]
posDelta, outSeq = expr.match(seq, 1)
self.assertEqual(posDelta, 2)
self.assertEqual(outSeq, [ [ 'a', 0, 'X' ], [ 'b', 1, 'X' ] ])
def testReqs(self):
expr = Require(Node("a"))
self.assertEqual(mc4la_to_readable(expr.getMatchCandidateForLookAhead()),
( [ 'a' ], [], False ))
expr = Require(Literal("a"))
self.assertEqual(mc4la_to_readable(expr.getMatchCandidateForLookAhead()),
( [], [ 'a' ], False ))
expr = Seq(Repeat(Literal("a"), 0, 1), Literal("b"))
self.assertEqual(mc4la_to_readable(expr.getMatchCandidateForLookAhead()),
( [], [ 'a', 'b' ], False ))
expr = Or(Literal('a'), Literal('b'))
self.assertEqual(mc4la_to_readable(expr.getMatchCandidateForLookAhead()),
( [], [ 'a', 'b' ], False ))
expr = Or(
Seq(
Repeat(Literal("a"), 0, 1),
Literal("b")),
Literal("c"))
self.assertEqual(mc4la_to_readable(expr.getMatchCandidateForLookAhead()),
( [], [ 'a', 'b', 'c' ], False ))
def testSearch(self):
expr = Search(Seq(InsertNode("here"), Literal("a")))
seq = [ 'text', 0, "a", 1, "b", 2, "c", 3, "a" ]
posDelta, outSeq = expr.match(seq, 1)
self.assertEqual(posDelta, 8)
self.assertEqual(outSeq, [ [ "here" ], 0, "a", 1, "b", 2, "c", [ "here" ], 3, "a" ])
expr = Repeat(Or(Seq(InsertNode("here"), Literal("a")), Any()), 0, None)
posDelta, outSeq = expr.match(seq, 1)
self.assertEqual(posDelta, 8)
self.assertEqual(outSeq, [ [ "here" ], 0, "a", 1, "b", 2, "c", [ "here" ], 3, "a" ])
def testSearch2(self):
expr = Search(InsertNode("here"))
seq = [ 'text', 0, "a", 1, "b", 2, "c" ]
posDelta, outSeq = expr.match(seq, 1)
self.assertEqual(posDelta, 6)
self.assertEqual(outSeq, [ [ "here" ], 0, "a", [ "here" ], 1, "b", [ "here" ], 2, "c", [ "here" ] ])
e1 = Or(InsertNode("here"), Any())
expr = Repeat(e1, 0, None)
posDelta, outSeq = expr.match(seq, 1)
self.assertEqual(posDelta, 0)
self.assertEqual(outSeq, [ ])
def testNodeInsideNode(self):
expr = NodeMatch("expr", Node("expr") | Node("literal"))
seq = [ 'code', [ 'expr', [ 'expr' ] ] ]
posDelta, outSeq = expr.match(seq, 1)
self.assertEqual(posDelta, 1)
def testRemoveRedundantParen(self):
expr0 = Holder()
expr0.expr = Or(
Require(NodeMatch("expr", Node("expr") | Node("literal"))) + Flattened(NodeMatch("expr", expr0)),
NodeMatch("expr", Search(expr0)),
Any())
expr = Search(expr0)
seq = [ 'code', [ 'expr', [ 'literal', 0, 'a' ] ] ]
posDelta, outSeq = expr.match(seq, 1)
self.assertEqual(posDelta, 1)
outSeq2 = [ seq[0] ] + outSeq
self.assertEqual(outSeq2, [ 'code', [ 'literal', 0, 'a' ] ])
seq = [ 'code', [ 'expr', [ 'expr', [ 'literal', 0, 'a' ] ] ] ]
posDelta, outSeq = expr.match(seq, 1)
self.assertEqual(posDelta, 1)
outSeq2 = [ seq[0] ] + outSeq
self.assertEqual(outSeq2, [ 'code', [ 'literal', 0, 'a' ] ])
def testCheckingLeftRecursion(self):
expr2 = Holder()
expr1 = Or(expr2, Literal('a'))
expr2.expr = expr1
self.assertTrue(expr2.isLeftRecursive())
self.assertTrue(expr1.isLeftRecursive())
expr3 = Or(Literal('a'), expr1)
self.assertFalse(expr3.isLeftRecursive()) # expr3 includes a left-recursion expr, but expr3 itself is not left recursive.
includesLeftRecursiveExpressions = any(e.isLeftRecursive() for e in expr3.extract_exprs())
self.assertTrue(includesLeftRecursiveExpressions)
expr = Or(Literal('b'), Literal('a'))
self.assertFalse(expr.isLeftRecursive())
expr = Holder()
with self.assertRaises(LeftRecursionUndecided):
expr.isLeftRecursive() # expr's inner expression is not assigned yet, thus undecidable.
def testFlattening(self):
expr = BuildToNode("lc", Flattened(Node("c")))
seq = [ 'code', [ 'c', 0, 'a' ] ]
posDelta, outSeq = expr.match(seq, 1)
self.assertEqual(posDelta, 1)
outSeq2 = [ seq[0] ] + outSeq
self.assertEqual(outSeq2, [ 'code', [ 'lc', 0, 'a' ] ])
def testInsertNode(self):
expr = Seq(Node("c"), InsertNode("X"), Node("a"))
seq = [ 'code', [ 'c' ], [ 'a' ] ]
posDelta, outSeq = expr.match(seq, 1)
self.assertEqual(posDelta, 2)
outSeq2 = [ seq[0] ] + outSeq
self.assertEqual(outSeq2, [ 'code', [ 'c' ], [ 'X' ], [ 'a' ] ])
def testInnerExprProperties(self):
a = Node('a'); b = Node('b')
singleExprs = [ Repeat(a, 0, 1), Search(a), Holder(),
Relabeled('c', a), Flattened(a), NodeMatch('c', a), AnyNodeMatch(a), BuildToNode('c', a) ]
multipleExprs = [ Or(a, b), Seq(a, b) ]
for e in singleExprs + multipleExprs:
self.assertTrue(hasattr(e, "extract_exprs"))
for e in singleExprs:
self.assertTrue(hasattr(e, "expr"))
for e in multipleExprs:
self.assertTrue(hasattr(e, "exprs"))
def testNodeExprProperties(self):
nodeExprs = [ Node('a'), NodeMatch('a', Epsilon()) ]
for ne in nodeExprs:
self.assertTrue(hasattr(ne, "label"))
self.assertTrue(hasattr(ne, "extract_labels"))
# nodeClassExpr = NodeClass(['a', 'b', 'c'])
# self.assertTrue(hasattr(nodeClassExpr, "labels"))
# self.assertTrue(hasattr(nodeClassExpr, "extract_labels"))
newLabelExprs = [ Relabeled('newa', Node('a')), InsertNode('a'), BuildToNode('a', Epsilon()) ]
for nle in newLabelExprs:
self.assertTrue(hasattr(nle, "newLabel"))
def testUpdateMC4LAWithRecursion(self):
h = Holder()
e = Or(Seq(Literal('a'), h), Literal('b'))
h.expr = e
self.assertEqual(mc4la_to_readable(e.getMatchCandidateForLookAhead()),
( [], [ 'a', 'b' ], False ))
self.assertEqual(mc4la_to_readable(h.getMatchCandidateForLookAhead()),
( [], [ 'a', 'b' ], False ))
def testEquality(self):
ea = Literal('a')
eb = Literal('b')
self.assertEqual(ea, ea)
self.assertNotEqual(ea, eb)
exprs = [ Or(ea, ea), Or(ea, eb), Or(eb, ea),
Seq(ea, ea), Seq(ea, eb), Seq(eb, ea),
Repeat(ea, 0, 1), Repeat(eb, 0, 1), Repeat(ea, 0, 2),
Epsilon(), Any(), Never() ]
for e, f in itertools.combinations(exprs, 2):
if e is f:
self.assertEqual(e, f)
else:
self.assertNotEqual(e, f)
def testEqualityWithRecursion(self):
h = Holder()
ea = Or(Literal('a'), Seq(Literal('b'), h))
h2 = Holder()
ea2 = Or(Literal('a'), Seq(Literal('b'), h2))
h3 = Holder()
ea3 = Or(Literal('a'), Seq(Literal('a'), h3))
self.assertEqual(h, h2)
self.assertNotEqual(h, h3)
h.expr = ea
self.assertNotEqual(h, h2)
h2.expr = ea2
h3.expr = ea3
self.assertEqual(h, h2)
self.assertNotEqual(h, h3)
def testEqualityOfSearchAndRepeat(self):
s = Search(Literal('a'))
r = Repeat(Or(Literal('a'), Any()), 0, None)
seq = [ 'code', 0, 'a', 1, 'a', 2, 'a' ]
sd, outSeq = s.match(seq, 1)
self.assertEqual(sd, 6)
rd, outSeq = s.match(seq, 1)
self.assertEqual(sd, 6)
def testSubapply(self):
def capitalize(seq):
assert len(seq) == 2
return seq[0], seq[1].upper()
expr = Search(SubApply(capitalize, Rex('^[a-z]')))
seq = [ 'code', 0, 'a', 1, 'b', 2, 'c', 3, 'Abc', 6, 'dEF' ]
outSeq = expr.parse(seq)
self.assertEqual(outSeq, [ 'code', 0, 'A', 1, 'B', 2, 'C', 3, 'Abc', 6, 'DEF' ])
def testSubapply2(self):
def removeNodeB(seq):
assert len(seq) == 1
node = seq[0]
if node[0] == 'b':
return []
else:
return seq
expr = Search(SubApply(removeNodeB, AnyNode()))
seq = [ 'code', [ 'a' ], [ 'b' ], [ 'c' ] ]
outSeq = expr.parse(seq)
self.assertEqual(outSeq, [ 'code', [ 'a' ], [ 'c' ] ])
def failNodeB(seq):
assert len(seq) == 1
node = seq[0]
if node[0] == 'b':
return None
else:
return seq
expr = Repeat(SubApply(failNodeB, AnyNode()), 0, None)
seq = [ 'code', [ 'a' ], [ 'b' ], [ 'c' ] ]
self.assertEqual(expr.parse(seq), None)
def testJoin(self):
expr = Join(Node('comma'), Node('hoge'), 2, 2)
seq = [ 'text', [ 'hoge' ], [ 'comma' ], [ 'hoge' ] ]
posDelta, outSeq = expr.match(seq, 1)
self.assertEqual(posDelta, 3)
self.assertEqual(outSeq, [ [ 'hoge' ], [ 'comma' ], [ 'hoge' ] ])
expr = Join(Node('comma'), InsertNode('hoge'), 3, 3) + Literal('a')
seq = [ 'text', [ 'comma' ], [ 'comma' ], 0, 'a' ]
posDelta, outSeq = expr.match(seq, 1)
self.assertEqual(posDelta, 4)
self.assertEqual(outSeq, [ [ 'hoge' ], [ 'comma' ], [ 'hoge' ], [ 'comma' ], [ 'hoge' ], 0, 'a' ])
expr = Join(InsertNode('comma'), InsertNode('hoge'), 3, 3)
seq = [ 'text' ]
posDelta, outSeq = expr.match(seq, 1)
self.assertEqual(posDelta, 0)
self.assertEqual(outSeq, [ [ 'hoge' ], [ 'comma' ], [ 'hoge' ], [ 'comma' ], [ 'hoge' ] ])
def TestSuite(TestTorqExpression):
return unittest.makeSuite(TestTorqExpression)
if __name__ == '__main__':
unittest.main()
|
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RGBD viewer."""
import threading
import time
from typing import List, Optional, Tuple
import cv2 # type: ignore # type: ignore
import numpy as np
import google3.third_party.open3d.open3d as o3d # type: ignore
_DEPTH_SCALE = 0.1
# Fast conversion of rgbd images to point cloud.
# TODO(hirak): Include depthDistortions.
# TODO(hirak): May be this should be moved to transform_utils.py.
def pgm_to_pointcloud(
depth_image: np.ndarray, color_image: Optional[np.ndarray],
intrinsics: Tuple[float, float, float, float],
distortion: List[float]) -> Tuple[np.ndarray, Optional[np.ndarray]]:
"""Fast conversion of opencv images to pointcloud.
Takes ~7 ms per 1280x720 RGBD on my corp laptop (hirak).
Args:
depth_image: OpenCV image.
color_image: Corresponding color image, if colors for each point is desired.
intrinsics: fx, fy, cx, cy.
distortion: Standard distoriton params k1, k2, p1, p2, [k3, [k4, k5, k6]].
Returns:
points: Nx3 array of points in space.
colors: Nx3 array of colors, each row an RGB. None if color_image is None.
"""
# The code below is optimized for speed, further optimizations may also be
# possible.
x_axis, y_axis = np.mgrid[0:depth_image.shape[1], 0:depth_image.shape[0]]
valid = ~np.isnan(depth_image)
x_axis = x_axis.T[valid]
y_axis = y_axis.T[valid]
depth = depth_image[valid] * _DEPTH_SCALE
x_and_y = np.vstack([x_axis, y_axis]).astype(float)
fx, fy, cx, cy = intrinsics
camera_matrix = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])
x_and_y = cv2.undistortPoints(x_and_y, camera_matrix, np.array(distortion))
x_and_y = x_and_y.T.reshape(2, -1)
points = np.vstack([x_and_y * depth, depth]).T
colors = None
if color_image is not None:
colors = color_image[valid]
if len(colors.shape) > 1 and colors.shape[1] == 3:
# OpenCV uses BGR. Point cloud libraries like to use RGB.
colors[:, [0, 2]] = colors[:, [2, 0]]
else:
colors = np.vstack([colors, colors, colors]).T
return points, colors
# TODO(hirak): Correct for extrinsics (useful to test multiple depth cams).
def _pgm_to_pcd(depth_raw: np.ndarray, color_raw: np.ndarray,
intrinsics: Tuple[float, float, float, float],
distortion: List[float]) -> o3d.geometry.PointCloud:
"""Obtain an Open3d pointcloud from RGBD data."""
points, colors = pgm_to_pointcloud(depth_raw, color_raw, intrinsics,
distortion)
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(points)
# This is to appease pytype.
# colors must be populated, since we passed color to pgm_to_pointcloud.
assert colors is not None
pcd.colors = o3d.utility.Vector3dVector(colors / 256)
# Flip it, otherwise the pointcloud will be upside down.
pcd.transform([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])
return pcd
class RgbdViewer:
"""Allows viewing 3d images."""
def __init__(self) -> None:
"""Initialize the visualizer."""
self._vis = o3d.visualization.Visualizer()
self._lock = threading.Lock()
self._refresh_lock = threading.Lock()
self._frame_count = 0
self._intrinsics: Optional[Tuple[float, float, float, float]] = None
self._distortion: Optional[List[float]] = None
self._thread: Optional[threading.Thread] = None
self._thread_created = threading.Event()
def _setup_vis(self) -> None:
"""Setup the visualizer."""
self._vis.create_window()
opt = self._vis.get_render_option()
opt.background_color = np.asarray([0, 0, 0])
opt.point_size = 2
def o3d_viz() -> None:
while True:
with self._refresh_lock:
if not self._vis.poll_events():
print("Stopped")
self._vis.close()
self._vis.destroy_window()
break
self._vis.update_renderer()
time.sleep(0.05)
self._thread = threading.Thread(target=o3d_viz, daemon=True)
self._thread.start()
self._thread_created.set()
def set_calibration(self, intrinsics: Tuple[float, float, float, float],
distortion: List[float]) -> None:
"""Updates the calibration parameters.
Args:
intrinsics: the camera intrinsics.
distortion: the camera distortion.
"""
self._intrinsics = intrinsics
self._distortion = distortion
def update_image(self, depth_fname: str, color_fname: str) -> None:
"""Update image updates the image.
Args:
depth_fname: the depth filename.
color_fname: the color filename.
"""
if self._intrinsics is None or self._distortion is None:
print("Invalid intrinsics or distortion")
return
if self._lock.locked():
print("Skipping image...")
return
with self._lock:
depth_raw = cv2.imread(depth_fname, cv2.IMREAD_ANYDEPTH)
if depth_raw is None:
return
color_raw = cv2.imread(color_fname, cv2.IMREAD_ANYCOLOR)
# Get the point cloud from depth and color images.
pcd = _pgm_to_pcd(depth_raw, color_raw, self._intrinsics,
self._distortion)
if self._frame_count == 0:
self._setup_vis()
with self._refresh_lock:
self._vis.clear_geometries()
self._vis.add_geometry(pcd, reset_bounding_box=self._frame_count == 0)
self._frame_count += 1
def update_image_frames(self, depth_raw: np.ndarray,
color_raw: np.ndarray) -> None:
"""Update image updates the image frames.
Args:
depth_raw: the depth image.
color_raw: the color image.
"""
if self._intrinsics is None or self._distortion is None:
print("Invalid intrinsics or distortion")
return
if self._lock.locked():
print("Skipping image...")
return
with self._lock:
# Get the point cloud from depth and color images.
pcd = _pgm_to_pcd(depth_raw, color_raw, self._intrinsics,
self._distortion)
if self._frame_count == 0:
self._setup_vis()
with self._refresh_lock:
self._vis.clear_geometries()
self._vis.add_geometry(pcd, reset_bounding_box=self._frame_count == 0)
self._frame_count += 1
def is_alive(self) -> bool:
"""Returns True until the user closes the UI."""
if self._frame_count == 0:
return True
return self._thread is not None and self._thread.is_alive()
def wait_until_closed(self) -> None:
"""Blocks the thread and wait till the user closes the UI."""
self._thread_created.wait()
assert self._thread is not None
self._thread.join()
|
|
###
# Copyright (c) 2002-2005, Jeremiah Fincher
# Copyright (c) 2011, James Vega
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import gc
import os
import re
import sys
import time
import shutil
import unittest
import threading
import supybot.log as log
import supybot.conf as conf
import supybot.utils as utils
import supybot.ircdb as ircdb
import supybot.world as world
import supybot.irclib as irclib
import supybot.plugin as plugin
import supybot.drivers as drivers
import supybot.ircmsgs as ircmsgs
import supybot.registry as registry
import supybot.ircutils as ircutils
import supybot.callbacks as callbacks
network = True
# This is the global list of suites that are to be run.
suites = []
timeout = 10
originalCallbacksGetHelp = callbacks.getHelp
lastGetHelp = 'x' * 1000
def cachingGetHelp(method, name=None, doc=None):
global lastGetHelp
lastGetHelp = originalCallbacksGetHelp(method, name, doc)
return lastGetHelp
callbacks.getHelp = cachingGetHelp
def getTestIrc():
irc = irclib.Irc('test')
# Gotta clear the connect messages (USER, NICK, etc.)
while irc.takeMsg():
pass
return irc
class TimeoutError(AssertionError):
def __str__(self):
return '%r timed out' % self.args[0]
class TestPlugin(callbacks.Plugin):
def eval(self, irc, msg, args):
"""<text>
This is the help for eval. Since Owner doesn't have an eval command
anymore, we needed to add this so as not to invalidate any of the tests
that depended on that eval command.
"""
try:
irc.reply(repr(eval(' '.join(args))))
except callbacks.ArgumentError:
raise
except Exception, e:
irc.reply(utils.exnToString(e))
# Since we know we don't now need the Irc object, we just give None. This
# might break if callbacks.Privmsg ever *requires* the Irc object.
TestInstance = TestPlugin(None)
conf.registerPlugin('TestPlugin', True, public=False)
class SupyTestCase(unittest.TestCase):
"""This class exists simply for extra logging. It's come in useful in the
past."""
def setUp(self):
log.critical('Beginning test case %s', self.id())
threads = [t.getName() for t in threading.enumerate()]
log.critical('Threads: %L', threads)
unittest.TestCase.setUp(self)
def tearDown(self):
for irc in world.ircs[:]:
irc._reallyDie()
class PluginTestCase(SupyTestCase):
"""Subclass this to write a test case for a plugin. See
plugins/Plugin/test.py for an example.
"""
plugins = None
cleanConfDir = True
cleanDataDir = True
config = {}
def __init__(self, methodName='runTest'):
self.timeout = timeout
originalRunTest = getattr(self, methodName)
def runTest(self):
run = True
if hasattr(self, 'irc') and self.irc:
for cb in self.irc.callbacks:
cbModule = sys.modules[cb.__class__.__module__]
if hasattr(cbModule, 'deprecated') and cbModule.deprecated:
print
print 'Ignored, %s is deprecated.' % cb.name()
run = False
if run:
originalRunTest()
runTest = utils.python.changeFunctionName(runTest, methodName)
setattr(self.__class__, methodName, runTest)
SupyTestCase.__init__(self, methodName=methodName)
self.originals = {}
def setUp(self, nick='test'):
if self.__class__ in (PluginTestCase, ChannelPluginTestCase):
# Necessary because there's a test in here that shouldn\'t run.
return
SupyTestCase.setUp(self)
# Just in case, let's do this. Too many people forget to call their
# super methods.
for irc in world.ircs[:]:
irc._reallyDie()
# Set conf variables appropriately.
conf.supybot.reply.whenAddressedBy.chars.setValue('@')
conf.supybot.reply.error.detailed.setValue(True)
conf.supybot.reply.whenNotCommand.setValue(True)
self.myVerbose = world.myVerbose
def rmFiles(dir):
for filename in os.listdir(dir):
file = os.path.join(dir, filename)
if os.path.isfile(file):
os.remove(file)
else:
shutil.rmtree(file)
if self.cleanConfDir:
rmFiles(conf.supybot.directories.conf())
if self.cleanDataDir:
rmFiles(conf.supybot.directories.data())
ircdb.users.reload()
ircdb.ignores.reload()
ircdb.channels.reload()
if self.plugins is None:
raise ValueError, 'PluginTestCase must have a "plugins" attribute.'
self.nick = nick
self.prefix = ircutils.joinHostmask(nick, 'user', 'host.domain.tld')
self.irc = getTestIrc()
MiscModule = plugin.loadPluginModule('Misc')
OwnerModule = plugin.loadPluginModule('Owner')
ConfigModule = plugin.loadPluginModule('Config')
_ = plugin.loadPluginClass(self.irc, MiscModule)
_ = plugin.loadPluginClass(self.irc, OwnerModule)
_ = plugin.loadPluginClass(self.irc, ConfigModule)
if isinstance(self.plugins, str):
self.plugins = [self.plugins]
else:
for name in self.plugins:
if name not in ('Owner', 'Misc', 'Config'):
module = plugin.loadPluginModule(name,
ignoreDeprecation=True)
cb = plugin.loadPluginClass(self.irc, module)
self.irc.addCallback(TestInstance)
for (name, value) in self.config.iteritems():
group = conf.supybot
parts = registry.split(name)
if parts[0] == 'supybot':
parts.pop(0)
for part in parts:
group = group.get(part)
self.originals[group] = group()
group.setValue(value)
def tearDown(self):
if self.__class__ in (PluginTestCase, ChannelPluginTestCase):
# Necessary because there's a test in here that shouldn\'t run.
return
for (group, original) in self.originals.iteritems():
group.setValue(original)
ircdb.users.close()
ircdb.ignores.close()
ircdb.channels.close()
SupyTestCase.tearDown(self)
self.irc = None
gc.collect()
def _feedMsg(self, query, timeout=None, to=None, frm=None,
usePrefixChar=True):
if to is None:
to = self.irc.nick
if frm is None:
frm = self.prefix
if timeout is None:
timeout = self.timeout
if self.myVerbose:
print # Extra newline, so it's pretty.
prefixChars = conf.supybot.reply.whenAddressedBy.chars()
if not usePrefixChar and query[0] in prefixChars:
query = query[1:]
msg = ircmsgs.privmsg(to, query, prefix=frm)
if self.myVerbose:
print 'Feeding: %r' % msg
self.irc.feedMsg(msg)
fed = time.time()
response = self.irc.takeMsg()
while response is None and time.time() - fed < timeout:
time.sleep(0.1) # So it doesn't suck up 100% cpu.
drivers.run()
response = self.irc.takeMsg()
if self.myVerbose:
print 'Response: %r' % response
return response
def getMsg(self, query, **kwargs):
return self._feedMsg(query, **kwargs)
def feedMsg(self, query, to=None, frm=None):
"""Just feeds it a message, that's all."""
if to is None:
to = self.irc.nick
if frm is None:
frm = self.prefix
self.irc.feedMsg(ircmsgs.privmsg(to, query, prefix=frm))
# These assertError/assertNoError are somewhat fragile. The proper way to
# do them would be to use a proxy for the irc object and intercept .error.
# But that would be hard, so I don't bother. When this breaks, it'll get
# fixed, but not until then.
def assertError(self, query, **kwargs):
m = self._feedMsg(query, **kwargs)
if m is None:
raise TimeoutError, query
if lastGetHelp not in m.args[1]:
self.failUnless(m.args[1].startswith('Error:'),
'%r did not error: %s' % (query, m.args[1]))
return m
def assertSnarfError(self, query, **kwargs):
return self.assertError(query, usePrefixChar=False, **kwargs)
def assertNotError(self, query, **kwargs):
m = self._feedMsg(query, **kwargs)
if m is None:
raise TimeoutError, query
self.failIf(m.args[1].startswith('Error:'),
'%r errored: %s' % (query, m.args[1]))
self.failIf(lastGetHelp in m.args[1],
'%r returned the help string.' % query)
return m
def assertSnarfNotError(self, query, **kwargs):
return self.assertNotError(query, usePrefixChar=False, **kwargs)
def assertHelp(self, query, **kwargs):
m = self._feedMsg(query, **kwargs)
if m is None:
raise TimeoutError, query
self.failUnless(lastGetHelp in m.args[1],
'%s is not the help (%s)' % (m.args[1], lastGetHelp))
return m
def assertNoResponse(self, query, timeout=0, **kwargs):
m = self._feedMsg(query, timeout=timeout, **kwargs)
self.failIf(m, 'Unexpected response: %r' % m)
return m
def assertSnarfNoResponse(self, query, timeout=0, **kwargs):
return self.assertNoResponse(query, timeout=timeout,
usePrefixChar=False, **kwargs)
def assertResponse(self, query, expectedResponse, **kwargs):
m = self._feedMsg(query, **kwargs)
if m is None:
raise TimeoutError, query
self.assertEqual(m.args[1], expectedResponse,
'%r != %r' % (expectedResponse, m.args[1]))
return m
def assertSnarfResponse(self, query, expectedResponse, **kwargs):
return self.assertResponse(query, expectedResponse,
usePrefixChar=False, **kwargs)
def assertRegexp(self, query, regexp, flags=re.I, **kwargs):
m = self._feedMsg(query, **kwargs)
if m is None:
raise TimeoutError, query
self.failUnless(re.search(regexp, m.args[1], flags),
'%r does not match %r' % (m.args[1], regexp))
return m
def assertSnarfRegexp(self, query, regexp, flags=re.I, **kwargs):
return self.assertRegexp(query, regexp, flags=re.I,
usePrefixChar=False, **kwargs)
def assertNotRegexp(self, query, regexp, flags=re.I, **kwargs):
m = self._feedMsg(query, **kwargs)
if m is None:
raise TimeoutError, query
self.failUnless(re.search(regexp, m.args[1], flags) is None,
'%r matched %r' % (m.args[1], regexp))
return m
def assertSnarfNotRegexp(self, query, regexp, flags=re.I, **kwargs):
return self.assertNotRegexp(query, regexp, flags=re.I,
usePrefixChar=False, **kwargs)
def assertAction(self, query, expectedResponse=None, **kwargs):
m = self._feedMsg(query, **kwargs)
if m is None:
raise TimeoutError, query
self.failUnless(ircmsgs.isAction(m), '%r is not an action.' % m)
if expectedResponse is not None:
s = ircmsgs.unAction(m)
self.assertEqual(s, expectedResponse,
'%r != %r' % (s, expectedResponse))
return m
def assertSnarfAction(self, query, expectedResponse=None, **kwargs):
return self.assertAction(query, expectedResponse=None,
usePrefixChar=False, **kwargs)
def assertActionRegexp(self, query, regexp, flags=re.I, **kwargs):
m = self._feedMsg(query, **kwargs)
if m is None:
raise TimeoutError, query
self.failUnless(ircmsgs.isAction(m))
s = ircmsgs.unAction(m)
self.failUnless(re.search(regexp, s, flags),
'%r does not match %r' % (s, regexp))
def assertSnarfActionRegexp(self, query, regexp, flags=re.I, **kwargs):
return self.assertActionRegexp(query, regexp, flags=re.I,
usePrefixChar=False, **kwargs)
_noTestDoc = ('Admin', 'Channel', 'Config',
'Misc', 'Owner', 'User', 'TestPlugin')
def testDocumentation(self):
if self.__class__ in (PluginTestCase, ChannelPluginTestCase):
return
for cb in self.irc.callbacks:
name = cb.name()
if ((name in self._noTestDoc) and \
not name.lower() in self.__class__.__name__.lower()):
continue
self.failUnless(sys.modules[cb.__class__.__name__].__doc__,
'%s has no module documentation.' % name)
if hasattr(cb, 'isCommandMethod'):
for attr in dir(cb):
if cb.isCommandMethod(attr) and \
attr == callbacks.canonicalName(attr):
self.failUnless(getattr(cb, attr, None).__doc__,
'%s.%s has no help.' % (name, attr))
class ChannelPluginTestCase(PluginTestCase):
channel = '#test'
def setUp(self):
if self.__class__ in (PluginTestCase, ChannelPluginTestCase):
return
PluginTestCase.setUp(self)
self.irc.feedMsg(ircmsgs.join(self.channel, prefix=self.prefix))
m = self.irc.takeMsg()
self.failIf(m is None, 'No message back from joining channel.')
self.assertEqual(m.command, 'MODE')
m = self.irc.takeMsg()
self.failIf(m is None, 'No message back from joining channel.')
self.assertEqual(m.command, 'WHO')
def _feedMsg(self, query, timeout=None, to=None, frm=None, private=False,
usePrefixChar=True):
if to is None:
if private:
to = self.irc.nick
else:
to = self.channel
if frm is None:
frm = self.prefix
if timeout is None:
timeout = self.timeout
if self.myVerbose:
print # Newline, just like PluginTestCase.
prefixChars = conf.supybot.reply.whenAddressedBy.chars()
if query[0] not in prefixChars and usePrefixChar:
query = prefixChars[0] + query
msg = ircmsgs.privmsg(to, query, prefix=frm)
if self.myVerbose:
print 'Feeding: %r' % msg
self.irc.feedMsg(msg)
fed = time.time()
response = self.irc.takeMsg()
while response is None and time.time() - fed < timeout:
time.sleep(0.1)
drivers.run()
response = self.irc.takeMsg()
if response is not None:
if response.command == 'PRIVMSG':
args = list(response.args)
# Strip off nick: at beginning of response.
if args[1].startswith(self.nick) or \
args[1].startswith(ircutils.nickFromHostmask(self.prefix)):
try:
args[1] = args[1].split(' ', 1)[1]
except IndexError:
# Odd. We'll skip this.
pass
ret = ircmsgs.privmsg(*args)
else:
ret = response
else:
ret = None
if self.myVerbose:
print 'Returning: %r' % ret
return ret
def feedMsg(self, query, to=None, frm=None, private=False):
"""Just feeds it a message, that's all."""
if to is None:
if private:
to = self.irc.nick
else:
to = self.channel
if frm is None:
frm = self.prefix
self.irc.feedMsg(ircmsgs.privmsg(to, query, prefix=frm))
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
|
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models import Q
from django.http import HttpResponseRedirect, HttpResponseForbidden, Http404
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.utils.http import base36_to_int
from django.utils.translation import ugettext, ugettext_lazy as _
from django.contrib import messages
from django.contrib.auth import authenticate
from django.contrib.auth import login as auth_login
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.contrib.auth.tokens import default_token_generator
from emailconfirmation.models import EmailAddress, EmailConfirmation
association_model = models.get_model("django_openid", "Association")
if association_model is not None:
from django_openid.models import UserOpenidAssociation
from account.utils import get_default_redirect, user_display
from account.models import OtherServiceInfo
from account.forms import AddEmailForm, ChangeLanguageForm, ChangePasswordForm
from account.forms import ChangeTimezoneForm, LoginForm, ResetPasswordKeyForm
from account.forms import ResetPasswordForm, SetPasswordForm, SignupForm
from account.forms import TwitterForm
def group_and_bridge(kwargs):
"""
Given kwargs from the view (with view specific keys popped) pull out the
bridge and fetch group from database.
"""
bridge = kwargs.pop("bridge", None)
if bridge:
try:
group = bridge.get_group(**kwargs)
except ObjectDoesNotExist:
raise Http404
else:
group = None
return group, bridge
def group_context(group, bridge):
# @@@ use bridge
return {
"group": group,
}
def login(request, **kwargs):
form_class = kwargs.pop("form_class", LoginForm)
template_name = kwargs.pop("template_name", "account/login.html")
success_url = kwargs.pop("success_url", None)
associate_openid = kwargs.pop("associate_openid", False)
openid_success_url = kwargs.pop("openid_success_url", None)
url_required = kwargs.pop("url_required", False)
extra_context = kwargs.pop("extra_context", {})
redirect_field_name = kwargs.pop("redirect_field_name", "next")
group, bridge = group_and_bridge(kwargs)
if extra_context is None:
extra_context = {}
if success_url is None:
success_url = get_default_redirect(request, redirect_field_name)
if request.method == "POST" and not url_required:
form = form_class(request.POST, group=group)
if form.login(request):
if associate_openid and association_model is not None:
for openid in request.session.get("openids", []):
assoc, created = UserOpenidAssociation.objects.get_or_create(
user=form.user, openid=openid.openid
)
success_url = openid_success_url or success_url
messages.add_message(request, messages.SUCCESS,
ugettext(u"Successfully logged in as %(user)s.") % {
"user": user_display(form.user)
}
)
return HttpResponseRedirect(success_url)
else:
form = form_class(group=group)
ctx = group_context(group, bridge)
ctx.update({
"form": form,
"url_required": url_required,
"redirect_field_name": redirect_field_name,
"redirect_field_value": request.GET.get(redirect_field_name),
})
ctx.update(extra_context)
return render_to_response(template_name, RequestContext(request, ctx))
def signup(request, **kwargs):
form_class = kwargs.pop("form_class", SignupForm)
template_name = kwargs.pop("template_name", "account/signup.html")
redirect_field_name = kwargs.pop("redirect_field_name", "next")
success_url = kwargs.pop("success_url", None)
group, bridge = group_and_bridge(kwargs)
if success_url is None:
success_url = get_default_redirect(request, redirect_field_name)
if request.method == "POST":
form = form_class(request.POST, group=group)
if form.is_valid():
credentials = form.save(request=request)
if settings.ACCOUNT_EMAIL_VERIFICATION:
return render_to_response("account/verification_sent.html", {
"email": form.cleaned_data["email"],
}, context_instance=RequestContext(request))
else:
user = authenticate(**credentials)
auth_login(request, user)
messages.add_message(request, messages.SUCCESS,
ugettext("Successfully logged in as %(user)s.") % {
"user": user_display(user)
}
)
return HttpResponseRedirect(success_url)
else:
form = form_class(group=group)
ctx = group_context(group, bridge)
ctx.update({
"form": form,
"redirect_field_name": redirect_field_name,
"redirect_field_value": request.GET.get(redirect_field_name),
})
return render_to_response(template_name, RequestContext(request, ctx))
@login_required
def email(request, **kwargs):
form_class = kwargs.pop("form_class", AddEmailForm)
template_name = kwargs.pop("template_name", "account/email.html")
group, bridge = group_and_bridge(kwargs)
if request.method == "POST" and request.user.is_authenticated():
if request.POST["action"] == "add":
add_email_form = form_class(request.user, request.POST)
if add_email_form.is_valid():
add_email_form.save()
messages.add_message(request, messages.INFO,
ugettext(u"Confirmation email sent to %(email)s") % {
"email": add_email_form.cleaned_data["email"]
}
)
add_email_form = form_class() # @@@
else:
add_email_form = form_class()
if request.POST["action"] == "send":
email = request.POST["email"]
try:
email_address = EmailAddress.objects.get(
user=request.user,
email=email,
)
messages.add_message(request, messages.INFO,
ugettext("Confirmation email sent to %(email)s") % {
"email": email,
}
)
EmailConfirmation.objects.send_confirmation(email_address)
except EmailAddress.DoesNotExist:
pass
elif request.POST["action"] == "remove":
email = request.POST["email"]
try:
email_address = EmailAddress.objects.get(
user=request.user,
email=email
)
email_address.delete()
messages.add_message(request, messages.SUCCESS,
ugettext("Removed email address %(email)s") % {
"email": email,
}
)
except EmailAddress.DoesNotExist:
pass
elif request.POST["action"] == "primary":
email = request.POST["email"]
email_address = EmailAddress.objects.get(
user=request.user,
email=email,
)
email_address.set_as_primary()
else:
add_email_form = form_class()
ctx = group_context(group, bridge)
ctx.update({
"add_email_form": add_email_form,
})
return render_to_response(template_name, RequestContext(request, ctx))
@login_required
def password_change(request, **kwargs):
form_class = kwargs.pop("form_class", ChangePasswordForm)
template_name = kwargs.pop("template_name", "account/password_change.html")
group, bridge = group_and_bridge(kwargs)
if not request.user.password:
return HttpResponseRedirect(reverse("acct_passwd_set"))
if request.method == "POST":
password_change_form = form_class(request.user, request.POST)
if password_change_form.is_valid():
password_change_form.save()
messages.add_message(request, messages.SUCCESS,
ugettext(u"Password successfully changed.")
)
password_change_form = form_class(request.user)
else:
password_change_form = form_class(request.user)
ctx = group_context(group, bridge)
ctx.update({
"password_change_form": password_change_form,
})
return render_to_response(template_name, RequestContext(request, ctx))
@login_required
def password_set(request, **kwargs):
form_class = kwargs.pop("form_class", SetPasswordForm)
template_name = kwargs.pop("template_name", "account/password_set.html")
group, bridge = group_and_bridge(kwargs)
if request.user.password:
return HttpResponseRedirect(reverse("acct_passwd"))
if request.method == "POST":
password_set_form = form_class(request.user, request.POST)
if password_set_form.is_valid():
password_set_form.save()
messages.add_message(request, messages.SUCCESS,
ugettext(u"Password successfully set.")
)
return HttpResponseRedirect(reverse("acct_passwd"))
else:
password_set_form = form_class(request.user)
ctx = group_context(group, bridge)
ctx.update({
"password_set_form": password_set_form,
})
return render_to_response(template_name, RequestContext(request, ctx))
@login_required
def password_delete(request, **kwargs):
template_name = kwargs.pop("template_name", "account/password_delete.html")
# prevent this view when openids is not present or it is empty.
if not request.user.password or \
(not hasattr(request, "openids") or \
not getattr(request, "openids", None)):
return HttpResponseForbidden()
group, bridge = group_and_bridge(kwargs)
if request.method == "POST":
request.user.password = u""
request.user.save()
return HttpResponseRedirect(reverse("acct_passwd_delete_done"))
ctx = group_context(group, bridge)
return render_to_response(template_name, RequestContext(request, ctx))
def password_reset(request, **kwargs):
form_class = kwargs.pop("form_class", ResetPasswordForm)
template_name = kwargs.pop("template_name", "account/password_reset.html")
group, bridge = group_and_bridge(kwargs)
ctx = group_context(group, bridge)
if request.method == "POST":
password_reset_form = form_class(request.POST)
if password_reset_form.is_valid():
email = password_reset_form.save()
if group:
redirect_to = bridge.reverse("acct_passwd_reset_done", group)
else:
redirect_to = reverse("acct_passwd_reset_done")
return HttpResponseRedirect(redirect_to)
else:
password_reset_form = form_class()
ctx.update({
"password_reset_form": password_reset_form,
})
return render_to_response(template_name, RequestContext(request, ctx))
def password_reset_done(request, **kwargs):
template_name = kwargs.pop("template_name", "account/password_reset_done.html")
group, bridge = group_and_bridge(kwargs)
ctx = group_context(group, bridge)
return render_to_response(template_name, RequestContext(request, ctx))
def password_reset_from_key(request, uidb36, key, **kwargs):
form_class = kwargs.get("form_class", ResetPasswordKeyForm)
template_name = kwargs.get("template_name", "account/password_reset_from_key.html")
token_generator = kwargs.get("token_generator", default_token_generator)
group, bridge = group_and_bridge(kwargs)
ctx = group_context(group, bridge)
# pull out user
try:
uid_int = base36_to_int(uidb36)
except ValueError:
raise Http404
user = get_object_or_404(User, id=uid_int)
if token_generator.check_token(user, key):
if request.method == "POST":
password_reset_key_form = form_class(request.POST, user=user, temp_key=key)
if password_reset_key_form.is_valid():
password_reset_key_form.save()
messages.add_message(request, messages.SUCCESS,
ugettext(u"Password successfully changed.")
)
password_reset_key_form = None
else:
password_reset_key_form = form_class()
ctx.update({
"form": password_reset_key_form,
})
else:
ctx.update({
"token_fail": True,
})
return render_to_response(template_name, RequestContext(request, ctx))
@login_required
def timezone_change(request, **kwargs):
form_class = kwargs.pop("form_class", ChangeTimezoneForm)
template_name = kwargs.pop("template_name", "account/timezone_change.html")
group, bridge = group_and_bridge(kwargs)
if request.method == "POST":
form = form_class(request.user, request.POST)
if form.is_valid():
form.save()
messages.add_message(request, messages.SUCCESS,
ugettext(u"Timezone successfully updated.")
)
else:
form = form_class(request.user)
ctx = group_context(group, bridge)
ctx.update({
"form": form,
})
return render_to_response(template_name, RequestContext(request, ctx))
@login_required
def language_change(request, **kwargs):
form_class = kwargs.pop("form_class", ChangeLanguageForm)
template_name = kwargs.pop("template_name", "account/language_change.html")
group, bridge = group_and_bridge(kwargs)
if request.method == "POST":
form = form_class(request.user, request.POST)
if form.is_valid():
form.save()
messages.add_message(request, messages.SUCCESS,
ugettext(u"Language successfully updated.")
)
next = request.META.get("HTTP_REFERER", None)
return HttpResponseRedirect(next)
else:
form = form_class(request.user)
ctx = group_context(group, bridge)
ctx.update({
"form": form,
})
return render_to_response(template_name, RequestContext(request, ctx))
@login_required
def other_services(request, **kwargs):
from microblogging.utils import twitter_verify_credentials
template_name = kwargs.pop("template_name", "account/other_services.html")
group, bridge = group_and_bridge(kwargs)
twitter_form = TwitterForm(request.user)
twitter_authorized = False
if request.method == "POST":
twitter_form = TwitterForm(request.user, request.POST)
if request.POST["actionType"] == "saveTwitter":
if twitter_form.is_valid():
from microblogging.utils import twitter_account_raw
twitter_account = twitter_account_raw(
request.POST["username"], request.POST["password"])
twitter_authorized = twitter_verify_credentials(
twitter_account)
if not twitter_authorized:
messages.add_message(request, messages.ERROR,
ugettext("Twitter authentication failed")
)
else:
twitter_form.save()
messages.add_message(request, messages.SUCCESS,
ugettext(u"Successfully authenticated.")
)
else:
from microblogging.utils import twitter_account_for_user
twitter_account = twitter_account_for_user(request.user)
twitter_authorized = twitter_verify_credentials(twitter_account)
twitter_form = TwitterForm(request.user)
ctx = group_context(group, bridge)
ctx.update({
"twitter_form": twitter_form,
"twitter_authorized": twitter_authorized,
})
return render_to_response(template_name, RequestContext(request, ctx))
@login_required
def other_services_remove(request):
group, bridge = group_and_bridge(kwargs)
# @@@ this is a bit coupled
OtherServiceInfo.objects.filter(user=request.user).filter(
Q(key="twitter_user") | Q(key="twitter_password")
).delete()
messages.add_message(request, messages.SUCCESS,
ugettext("Removed twitter account information successfully.")
)
return HttpResponseRedirect(reverse("acct_other_services"))
|
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.rebol
~~~~~~~~~~~~~~~~~~~~~
Lexers for the REBOL and related languages.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Generic, Whitespace
__all__ = ['RebolLexer', 'RedLexer']
class RebolLexer(RegexLexer):
"""
A `REBOL <http://www.rebol.com/>`_ lexer.
.. versionadded:: 1.1
"""
name = 'REBOL'
aliases = ['rebol']
filenames = ['*.r', '*.r3', '*.reb']
mimetypes = ['text/x-rebol']
flags = re.IGNORECASE | re.MULTILINE
escape_re = r'(?:\^\([0-9a-f]{1,4}\)*)'
def word_callback(lexer, match):
word = match.group()
if re.match(".*:$", word):
yield match.start(), Generic.Subheading, word
elif re.match(
r'(native|alias|all|any|as-string|as-binary|bind|bound\?|case|'
r'catch|checksum|comment|debase|dehex|exclude|difference|disarm|'
r'either|else|enbase|foreach|remove-each|form|free|get|get-env|if|'
r'in|intersect|loop|minimum-of|maximum-of|mold|new-line|'
r'new-line\?|not|now|prin|print|reduce|compose|construct|repeat|'
r'reverse|save|script\?|set|shift|switch|throw|to-hex|trace|try|'
r'type\?|union|unique|unless|unprotect|unset|until|use|value\?|'
r'while|compress|decompress|secure|open|close|read|read-io|'
r'write-io|write|update|query|wait|input\?|exp|log-10|log-2|'
r'log-e|square-root|cosine|sine|tangent|arccosine|arcsine|'
r'arctangent|protect|lowercase|uppercase|entab|detab|connected\?|'
r'browse|launch|stats|get-modes|set-modes|to-local-file|'
r'to-rebol-file|encloak|decloak|create-link|do-browser|bind\?|'
r'hide|draw|show|size-text|textinfo|offset-to-caret|'
r'caret-to-offset|local-request-file|rgb-to-hsv|hsv-to-rgb|'
r'crypt-strength\?|dh-make-key|dh-generate-key|dh-compute-key|'
r'dsa-make-key|dsa-generate-key|dsa-make-signature|'
r'dsa-verify-signature|rsa-make-key|rsa-generate-key|'
r'rsa-encrypt)$', word):
yield match.start(), Name.Builtin, word
elif re.match(
r'(add|subtract|multiply|divide|remainder|power|and~|or~|xor~|'
r'minimum|maximum|negate|complement|absolute|random|head|tail|'
r'next|back|skip|at|pick|first|second|third|fourth|fifth|sixth|'
r'seventh|eighth|ninth|tenth|last|path|find|select|make|to|copy\*|'
r'insert|remove|change|poke|clear|trim|sort|min|max|abs|cp|'
r'copy)$', word):
yield match.start(), Name.Function, word
elif re.match(
r'(error|source|input|license|help|install|echo|Usage|with|func|'
r'throw-on-error|function|does|has|context|probe|\?\?|as-pair|'
r'mod|modulo|round|repend|about|set-net|append|join|rejoin|reform|'
r'remold|charset|array|replace|move|extract|forskip|forall|alter|'
r'first+|also|take|for|forever|dispatch|attempt|what-dir|'
r'change-dir|clean-path|list-dir|dirize|rename|split-path|delete|'
r'make-dir|delete-dir|in-dir|confirm|dump-obj|upgrade|what|'
r'build-tag|process-source|build-markup|decode-cgi|read-cgi|'
r'write-user|save-user|set-user-name|protect-system|parse-xml|'
r'cvs-date|cvs-version|do-boot|get-net-info|desktop|layout|'
r'scroll-para|get-face|alert|set-face|uninstall|unfocus|'
r'request-dir|center-face|do-events|net-error|decode-url|'
r'parse-header|parse-header-date|parse-email-addrs|import-email|'
r'send|build-attach-body|resend|show-popup|hide-popup|open-events|'
r'find-key-face|do-face|viewtop|confine|find-window|'
r'insert-event-func|remove-event-func|inform|dump-pane|dump-face|'
r'flag-face|deflag-face|clear-fields|read-net|vbug|path-thru|'
r'read-thru|load-thru|do-thru|launch-thru|load-image|'
r'request-download|do-face-alt|set-font|set-para|get-style|'
r'set-style|make-face|stylize|choose|hilight-text|hilight-all|'
r'unlight-text|focus|scroll-drag|clear-face|reset-face|scroll-face|'
r'resize-face|load-stock|load-stock-block|notify|request|flash|'
r'request-color|request-pass|request-text|request-list|'
r'request-date|request-file|dbug|editor|link-relative-path|'
r'emailer|parse-error)$', word):
yield match.start(), Keyword.Namespace, word
elif re.match(
r'(halt|quit|do|load|q|recycle|call|run|ask|parse|view|unview|'
r'return|exit|break)$', word):
yield match.start(), Name.Exception, word
elif re.match('REBOL$', word):
yield match.start(), Generic.Heading, word
elif re.match("to-.*", word):
yield match.start(), Keyword, word
elif re.match('(\+|-|\*|/|//|\*\*|and|or|xor|=\?|=|==|<>|<|>|<=|>=)$',
word):
yield match.start(), Operator, word
elif re.match(".*\?$", word):
yield match.start(), Keyword, word
elif re.match(".*\!$", word):
yield match.start(), Keyword.Type, word
elif re.match("'.*", word):
yield match.start(), Name.Variable.Instance, word # lit-word
elif re.match("#.*", word):
yield match.start(), Name.Label, word # issue
elif re.match("%.*", word):
yield match.start(), Name.Decorator, word # file
else:
yield match.start(), Name.Variable, word
tokens = {
'root': [
(r'[^R]+', Comment),
(r'REBOL\s+\[', Generic.Strong, 'script'),
(r'R', Comment)
],
'script': [
(r'\s+', Text),
(r'#"', String.Char, 'char'),
(r'#\{[0-9a-f]*\}', Number.Hex),
(r'2#\{', Number.Hex, 'bin2'),
(r'64#\{[0-9a-z+/=\s]*\}', Number.Hex),
(r'"', String, 'string'),
(r'\{', String, 'string2'),
(r';#+.*\n', Comment.Special),
(r';\*+.*\n', Comment.Preproc),
(r';.*\n', Comment),
(r'%"', Name.Decorator, 'stringFile'),
(r'%[^(^{")\s\[\]]+', Name.Decorator),
(r'[+-]?([a-z]{1,3})?\$\d+(\.\d+)?', Number.Float), # money
(r'[+-]?\d+\:\d+(\:\d+)?(\.\d+)?', String.Other), # time
(r'\d+[\-/][0-9a-z]+[\-/]\d+(\/\d+\:\d+((\:\d+)?'
r'([.\d+]?([+-]?\d+:\d+)?)?)?)?', String.Other), # date
(r'\d+(\.\d+)+\.\d+', Keyword.Constant), # tuple
(r'\d+X\d+', Keyword.Constant), # pair
(r'[+-]?\d+(\'\d+)?([.,]\d*)?E[+-]?\d+', Number.Float),
(r'[+-]?\d+(\'\d+)?[.,]\d*', Number.Float),
(r'[+-]?\d+(\'\d+)?', Number),
(r'[\[\]()]', Generic.Strong),
(r'[a-z]+[^(^{"\s:)]*://[^(^{"\s)]*', Name.Decorator), # url
(r'mailto:[^(^{"@\s)]+@[^(^{"@\s)]+', Name.Decorator), # url
(r'[^(^{"@\s)]+@[^(^{"@\s)]+', Name.Decorator), # email
(r'comment\s"', Comment, 'commentString1'),
(r'comment\s\{', Comment, 'commentString2'),
(r'comment\s\[', Comment, 'commentBlock'),
(r'comment\s[^(\s{"\[]+', Comment),
(r'/[^(^{")\s/[\]]*', Name.Attribute),
(r'([^(^{")\s/[\]]+)(?=[:({"\s/\[\]])', word_callback),
(r'<[\w:.-]*>', Name.Tag),
(r'<[^(<>\s")]+', Name.Tag, 'tag'),
(r'([^(^{")\s]+)', Text),
],
'string': [
(r'[^(^")]+', String),
(escape_re, String.Escape),
(r'[(|)]+', String),
(r'\^.', String.Escape),
(r'"', String, '#pop'),
],
'string2': [
(r'[^(^{})]+', String),
(escape_re, String.Escape),
(r'[(|)]+', String),
(r'\^.', String.Escape),
(r'\{', String, '#push'),
(r'\}', String, '#pop'),
],
'stringFile': [
(r'[^(^")]+', Name.Decorator),
(escape_re, Name.Decorator),
(r'\^.', Name.Decorator),
(r'"', Name.Decorator, '#pop'),
],
'char': [
(escape_re + '"', String.Char, '#pop'),
(r'\^."', String.Char, '#pop'),
(r'."', String.Char, '#pop'),
],
'tag': [
(escape_re, Name.Tag),
(r'"', Name.Tag, 'tagString'),
(r'[^(<>\r\n")]+', Name.Tag),
(r'>', Name.Tag, '#pop'),
],
'tagString': [
(r'[^(^")]+', Name.Tag),
(escape_re, Name.Tag),
(r'[(|)]+', Name.Tag),
(r'\^.', Name.Tag),
(r'"', Name.Tag, '#pop'),
],
'tuple': [
(r'(\d+\.)+', Keyword.Constant),
(r'\d+', Keyword.Constant, '#pop'),
],
'bin2': [
(r'\s+', Number.Hex),
(r'([01]\s*){8}', Number.Hex),
(r'\}', Number.Hex, '#pop'),
],
'commentString1': [
(r'[^(^")]+', Comment),
(escape_re, Comment),
(r'[(|)]+', Comment),
(r'\^.', Comment),
(r'"', Comment, '#pop'),
],
'commentString2': [
(r'[^(^{})]+', Comment),
(escape_re, Comment),
(r'[(|)]+', Comment),
(r'\^.', Comment),
(r'\{', Comment, '#push'),
(r'\}', Comment, '#pop'),
],
'commentBlock': [
(r'\[', Comment, '#push'),
(r'\]', Comment, '#pop'),
(r'"', Comment, "commentString1"),
(r'\{', Comment, "commentString2"),
(r'[^(\[\]"{)]+', Comment),
],
}
def analyse_text(text):
"""
Check if code contains REBOL header and so it probably not R code
"""
if re.match(r'^\s*REBOL\s*\[', text, re.IGNORECASE):
# The code starts with REBOL header
return 1.0
elif re.search(r'\s*REBOL\s*[', text, re.IGNORECASE):
# The code contains REBOL header but also some text before it
return 0.5
class RedLexer(RegexLexer):
"""
A `Red-language <http://www.red-lang.org/>`_ lexer.
.. versionadded:: 2.0
"""
name = 'Red'
aliases = ['red', 'red/system']
filenames = ['*.red', '*.reds']
mimetypes = ['text/x-red', 'text/x-red-system']
flags = re.IGNORECASE | re.MULTILINE
escape_re = r'(?:\^\([0-9a-f]{1,4}\)*)'
def word_callback(lexer, match):
word = match.group()
if re.match(".*:$", word):
yield match.start(), Generic.Subheading, word
elif re.match(r'(if|unless|either|any|all|while|until|loop|repeat|'
r'foreach|forall|func|function|does|has|switch|'
r'case|reduce|compose|get|set|print|prin|equal\?|'
r'not-equal\?|strict-equal\?|lesser\?|greater\?|lesser-or-equal\?|'
r'greater-or-equal\?|same\?|not|type\?|stats|'
r'bind|union|replace|charset|routine)$', word):
yield match.start(), Name.Builtin, word
elif re.match(r'(make|random|reflect|to|form|mold|absolute|add|divide|multiply|negate|'
r'power|remainder|round|subtract|even\?|odd\?|and~|complement|or~|xor~|'
r'append|at|back|change|clear|copy|find|head|head\?|index\?|insert|'
r'length\?|next|pick|poke|remove|reverse|select|sort|skip|swap|tail|tail\?|'
r'take|trim|create|close|delete|modify|open|open\?|query|read|rename|'
r'update|write)$', word):
yield match.start(), Name.Function, word
elif re.match(r'(yes|on|no|off|true|false|tab|cr|lf|newline|escape|slash|sp|space|null|'
r'none|crlf|dot|null-byte)$', word):
yield match.start(), Name.Builtin.Pseudo, word
elif re.match(r'(#system-global|#include|#enum|#define|#either|#if|#import|#export|'
r'#switch|#default|#get-definition)$', word):
yield match.start(), Keyword.Namespace, word
elif re.match(r'(system|halt|quit|quit-return|do|load|q|recycle|call|run|ask|parse|'
r'raise-error|return|exit|break|alias|push|pop|probe|\?\?|spec-of|body-of|'
r'quote|forever)$', word):
yield match.start(), Name.Exception, word
elif re.match(r'(action\?|block\?|char\?|datatype\?|file\?|function\?|get-path\?|zero\?|'
r'get-word\?|integer\?|issue\?|lit-path\?|lit-word\?|logic\?|native\?|'
r'op\?|paren\?|path\?|refinement\?|set-path\?|set-word\?|string\?|unset\?|'
r'any-struct\?|none\?|word\?|any-series\?)$', word):
yield match.start(), Keyword, word
elif re.match(r'(JNICALL|stdcall|cdecl|infix)$', word):
yield match.start(), Keyword.Namespace, word
elif re.match("to-.*", word):
yield match.start(), Keyword, word
elif re.match('(\+|-\*\*|-|\*\*|//|/|\*|and|or|xor|=\?|===|==|=|<>|<=|>=|'
'<<<|>>>|<<|>>|<|>%)$', word):
yield match.start(), Operator, word
elif re.match(".*\!$", word):
yield match.start(), Keyword.Type, word
elif re.match("'.*", word):
yield match.start(), Name.Variable.Instance, word # lit-word
elif re.match("#.*", word):
yield match.start(), Name.Label, word # issue
elif re.match("%.*", word):
yield match.start(), Name.Decorator, word # file
elif re.match(":.*", word):
yield match.start(), Generic.Subheading, word # get-word
else:
yield match.start(), Name.Variable, word
tokens = {
'root': [
(r'[^R]+', Comment),
(r'Red/System\s+\[', Generic.Strong, 'script'),
(r'Red\s+\[', Generic.Strong, 'script'),
(r'R', Comment)
],
'script': [
(r'\s+', Text),
(r'#"', String.Char, 'char'),
(r'#\{[0-9a-f\s]*\}', Number.Hex),
(r'2#\{', Number.Hex, 'bin2'),
(r'64#\{[0-9a-z+/=\s]*\}', Number.Hex),
(r'([0-9a-f]+)(h)((\s)|(?=[\[\]{}"()]))',
bygroups(Number.Hex, Name.Variable, Whitespace)),
(r'"', String, 'string'),
(r'\{', String, 'string2'),
(r';#+.*\n', Comment.Special),
(r';\*+.*\n', Comment.Preproc),
(r';.*\n', Comment),
(r'%"', Name.Decorator, 'stringFile'),
(r'%[^(^{")\s\[\]]+', Name.Decorator),
(r'[+-]?([a-z]{1,3})?\$\d+(\.\d+)?', Number.Float), # money
(r'[+-]?\d+\:\d+(\:\d+)?(\.\d+)?', String.Other), # time
(r'\d+[\-/][0-9a-z]+[\-/]\d+(/\d+:\d+((:\d+)?'
r'([\.\d+]?([+-]?\d+:\d+)?)?)?)?', String.Other), # date
(r'\d+(\.\d+)+\.\d+', Keyword.Constant), # tuple
(r'\d+X\d+', Keyword.Constant), # pair
(r'[+-]?\d+(\'\d+)?([.,]\d*)?E[+-]?\d+', Number.Float),
(r'[+-]?\d+(\'\d+)?[.,]\d*', Number.Float),
(r'[+-]?\d+(\'\d+)?', Number),
(r'[\[\]()]', Generic.Strong),
(r'[a-z]+[^(^{"\s:)]*://[^(^{"\s)]*', Name.Decorator), # url
(r'mailto:[^(^{"@\s)]+@[^(^{"@\s)]+', Name.Decorator), # url
(r'[^(^{"@\s)]+@[^(^{"@\s)]+', Name.Decorator), # email
(r'comment\s"', Comment, 'commentString1'),
(r'comment\s\{', Comment, 'commentString2'),
(r'comment\s\[', Comment, 'commentBlock'),
(r'comment\s[^(\s{"\[]+', Comment),
(r'/[^(^{^")\s/[\]]*', Name.Attribute),
(r'([^(^{^")\s/[\]]+)(?=[:({"\s/\[\]])', word_callback),
(r'<[\w:.-]*>', Name.Tag),
(r'<[^(<>\s")]+', Name.Tag, 'tag'),
(r'([^(^{")\s]+)', Text),
],
'string': [
(r'[^(^")]+', String),
(escape_re, String.Escape),
(r'[(|)]+', String),
(r'\^.', String.Escape),
(r'"', String, '#pop'),
],
'string2': [
(r'[^(^{})]+', String),
(escape_re, String.Escape),
(r'[(|)]+', String),
(r'\^.', String.Escape),
(r'\{', String, '#push'),
(r'\}', String, '#pop'),
],
'stringFile': [
(r'[^(^")]+', Name.Decorator),
(escape_re, Name.Decorator),
(r'\^.', Name.Decorator),
(r'"', Name.Decorator, '#pop'),
],
'char': [
(escape_re + '"', String.Char, '#pop'),
(r'\^."', String.Char, '#pop'),
(r'."', String.Char, '#pop'),
],
'tag': [
(escape_re, Name.Tag),
(r'"', Name.Tag, 'tagString'),
(r'[^(<>\r\n")]+', Name.Tag),
(r'>', Name.Tag, '#pop'),
],
'tagString': [
(r'[^(^")]+', Name.Tag),
(escape_re, Name.Tag),
(r'[(|)]+', Name.Tag),
(r'\^.', Name.Tag),
(r'"', Name.Tag, '#pop'),
],
'tuple': [
(r'(\d+\.)+', Keyword.Constant),
(r'\d+', Keyword.Constant, '#pop'),
],
'bin2': [
(r'\s+', Number.Hex),
(r'([01]\s*){8}', Number.Hex),
(r'\}', Number.Hex, '#pop'),
],
'commentString1': [
(r'[^(^")]+', Comment),
(escape_re, Comment),
(r'[(|)]+', Comment),
(r'\^.', Comment),
(r'"', Comment, '#pop'),
],
'commentString2': [
(r'[^(^{})]+', Comment),
(escape_re, Comment),
(r'[(|)]+', Comment),
(r'\^.', Comment),
(r'\{', Comment, '#push'),
(r'\}', Comment, '#pop'),
],
'commentBlock': [
(r'\[', Comment, '#push'),
(r'\]', Comment, '#pop'),
(r'"', Comment, "commentString1"),
(r'\{', Comment, "commentString2"),
(r'[^(\[\]"{)]+', Comment),
],
}
|
|
'''
PIPELINE
BETA 1.0.0
Project manager for Maya
Ahutor: Lior Ben Horin
All rights reserved (c) 2016
liorbenhorin.ghost.io
liorbenhorin@gmail.com
---------------------------------------------------------------------------------------------
install:
Place the pipeline folder in your maya scripts folder. Run these lines in a python tab in the script editor:
from pipeline import pipeline
reload(pipeline)
pipeline.show()
---------------------------------------------------------------------------------------------
You are using PIPELINE on you own risk.
Things can allways go wrong, and under no circumstances the author
would be responsible for any damages cuesed from the use of this software.
When using this beta program you hearby agree to allow this program to collect
and send usage data to the author.
---------------------------------------------------------------------------------------------
The coded instructions, statements, computer programs, and/or related
material (collectively the "Data") in these files are subject to the terms
and conditions defined by
Creative Commons Attribution-NonCommercial-NoDerivs 4.0 Unported License:
http://creativecommons.org/licenses/by-nc-nd/4.0/
http://creativecommons.org/licenses/by-nc-nd/4.0/legalcode
http://creativecommons.org/licenses/by-nc-nd/4.0/legalcode.txt
---------------------------------------------------------------------------------------------
'''
import maya.cmds as cmds
import pymel.core as pm
import os
import maya.mel as mel
import string
import random
import logging
import pipeline.dialogue as dlg
reload(dlg)
import pipeline.modules.files as files
reload(files)
def new_scene():
checkState()
return cmds.file(new=True, f=True)
def rewind():
cmds.currentTime(1)
cmds.playbackOptions(minTime=1)
def save_scene_as(path = None, file_name = None):
if os.path.exists(path):
if file_name:
fullpath = os.path.join(path,file_name)
cmds.file(rename = fullpath)
return cmds.file(s=True, type="mayaAscii")
def userPrefDir():
return cmds.internalVar(userPrefDir=True)
def open_scene(path = None):
if os.path.exists(path):
checkState()
insert_recent_file(path)
opend = cmds.file(path, o = True, f = True, esn = True)
logging.info("{}".format(opend))
return opend
def insert_recent_file(path):
cmds.optionVar(stringValueAppend=('RecentFilesList', path))
def current_open_file():
return cmds.file(q=True,sn=True)
def checkState():
# check if there are unsaved changes
fileCheckState = cmds.file(q=True, modified=True)
# if there are, save them first ... then we can proceed
if fileCheckState:
# This is maya's native call to save, with dialogs, etc.
# No need to write your own.
if dlg.warning("warning", "Scene Not Saved", "Scene Not Saved, Do you want to save it first?"):
cmds.SaveScene()
pass
else:
pass
def reference_scene(path = None):
if os.path.exists(path):
namesspace = files.file_name_no_extension(files.file_name(path))
return cmds.file(path, r = True, f = True, ns = namesspace, esn = False)
def import_scene(path = None):
if os.path.exists(path):
namesspace = files.file_name_no_extension(files.file_name(path))
return cmds.file(path, i = True, f = True, ns = namesspace, esn = False)
def list_referenced_files():
results = []
links = cmds.filePathEditor(query=True, listDirectories="")
for link in links:
pairs = cmds.filePathEditor(query=True, listFiles=link, withAttribute=True, status=True)
'''
paris: list of strings ["file_name node status ...", "file_name node status ...",...]
we need to make this large list of ugly strings (good inforamtion seperated by white space) into a dictionry we can use
'''
l = len(pairs)
items = l/3
order = {}
index = 0
'''
order: dict of {node: [file_name, status],...}
'''
for i in range(0,items):
order[pairs[index+1]] = [os.path.join(link,pairs[index]),pairs[index+1],pairs[index+2]]
index = index + 3
for key in order:
# for each item in the dict, if the status is 0, repath it
if order[key][2] == "1":
results.append([order[key][0],cmds.nodeType(order[key][1])])
return results
def relink_pathes(project_path = None):
results = []
links = cmds.filePathEditor(query=True, listDirectories="")
for link in links:
pairs = cmds.filePathEditor(query=True, listFiles=link, withAttribute=True, status=True)
'''
paris: list of strings ["file_name node status ...", "file_name node status ...",...]
we need to make this large list of ugly strings (good inforamtion seperated by white space) into a dictionry we can use
'''
l = len(pairs)
items = l/3
order = {}
index = 0
'''
order: dict of {node: [file_name, status],...}
'''
for i in range(0,items):
order[pairs[index+1]] = [pairs[index],pairs[index+2]]
index = index + 3
for key in order:
# for each item in the dict, if the status is 0, repath it
if order[key][1] == "0":
if repath(key,order[key][0],project_path):
results.append(key)
return results
def repath(node, file, project_path):
matches = []
for root, dirnames, filenames in os.walk(project_path):
for x in filenames:
if x == file:
matches.append([root,os.path.join(root, x)])
elif x.split(".")[0] == file.split(".")[0]: #---> this second option is used when a file is useing ##### padding, we can match by name only
x_ext = x.split(".")[len(x.split("."))-1]
file_ext = file.split(".")[len(file.split("."))-1]
if x_ext == file_ext:
matches.append([root,os.path.join(root, x)])
if len(matches)>0:
return cmds.filePathEditor(node, repath=matches[0][0])
return None
def snapshot(path = None, width = 96, height = 96):
current_image_format = cmds.getAttr("defaultRenderGlobals.imageFormat")
cmds.setAttr("defaultRenderGlobals.imageFormat", 32) # *.png
#path = "/Users/liorbenhorin/Library/Preferences/Autodesk/maya/2015-x64/scripts/pipeline/thumb.png"
cmds.playblast(cf = path, fmt="image", frame = cmds.currentTime( query=True ), orn=False, wh = [width,height], p=100, v=False)
cmds.setAttr("defaultRenderGlobals.imageFormat", current_image_format)
if os.path.isfile(path):
return path
else:
return False
def playblast_snapshot(path = None,format = None, compression = None, hud = None, offscreen = None, range=None, scale = None):
current_image_format = cmds.getAttr("defaultRenderGlobals.imageFormat")
cmds.setAttr("defaultRenderGlobals.imageFormat", 32) # *.png
if range is None:
range = playback_selection_range()
print range
if range is None:
start = cmds.playbackOptions( q=True,min=True )
end = cmds.playbackOptions( q=True,max=True )
range = [start, end]
cmds.playblast(frame =int((range[0] + range[1])/2), cf = path, fmt="image", orn=hud, os=offscreen, wh = scene_resolution(), p=scale, v=False)
cmds.setAttr("defaultRenderGlobals.imageFormat", current_image_format)
def playblast(path = None,format = None, compression = None, hud = None, offscreen = None, range=None, scale = None):
if range is None:
range = playback_selection_range()
print range
if range is None:
start = cmds.playbackOptions( q=True,min=True )
end = cmds.playbackOptions( q=True,max=True )
range = [start, end]
cmds.playblast(startTime =range[0] ,endTime =range[1], f = path, fmt=format, orn=hud, os=offscreen, wh = scene_resolution(), p=scale, qlt=90,c=compression, v=True, s = qeury_active_sound_node())
def qeury_active_sound_node():
aPlayBackSliderPython = mel.eval('$tmpVar=$gPlayBackSlider')
sound = cmds.timeControl(aPlayBackSliderPython, q=1, s=1)
if sound:
return sound
else:
return None
def playback_selection_range():
aPlayBackSliderPython = mel.eval('$tmpVar=$gPlayBackSlider')
time_selection = cmds.timeControl( aPlayBackSliderPython, q=True,rng=True )[1:-1]
start = round(float(time_selection.split(":")[0]))
end = round(float(time_selection.split(":")[1]))
if start+1 == end:
return None
else:
return [start, end]
def getPlayblastOptions():
options = {}
options["format"] = cmds.playblast(q=True,fmt=True)
options["compression"] = cmds.playblast(q=True,c=True)
return options
def maya_api_version():
return int(cmds.about(api=True))
def scene_resolution():
return [cmds.getAttr("defaultResolution.width"),cmds.getAttr("defaultResolution.height")]
def create_scriptjob(parent = None, event = None, script = None):
if event and script:
return cmds.scriptJob(e=[event,script], ro=False, p = parent)
def kill_scriptjob(job = None):
if job:
return cmds.scriptJob(kill = job, f = True)
def new_scene_script(parent = None, script = None):
return create_scriptjob(parent = parent, event = "NewSceneOpened", script = script)
def open_scene_script(parent = None, script = None):
return create_scriptjob(parent = parent, event = "SceneOpened", script = script)
def new_scene_from_selection(project_path = None, mode = "include"):
temp_file = os.path.join(project_path, "scenes", "temp_%s.ma"%(id_generator()))
logging.info(temp_file)
sel = cmds.ls(sl=True)
if len(sel)>0:
if mode == "include":
saved_file = cmds.file(temp_file, type='mayaAscii', exportSelected=True, expressions=True, constraints=True, channels=True, constructionHistory=True, shader=True)
if mode == "exclude":
saved_file = cmds.file(temp_file, type='mayaAscii', exportSelected=True, expressions=False, constraints=False, channels=False, constructionHistory=False, shader=True)
if saved_file:
open_scene(saved_file)
return saved_file
return None
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def maya_version():
return cmds.about(version=True)
def set_fps(fps = None):
fps_string = "pal"
if fps == 25:
fps_string = "pal"
if fps == 24:
fps_string = "film"
if fps == 30:
fps_string = "ntsc"
cmds.currentUnit(t=fps_string)
def clean_up_file():
pass
# import references
"""
refs = cmds.ls(type='reference')
for i in refs:
rFile = cmds.referenceQuery(i, f=True)
cmds.file(rFile, importReference=True, mnr=True)
defaults = ['UI', 'shared']
# Used as a sort key, this will sort namespaces by how many children they have.
def num_children(ns):
return ns.count(':')
namespaces = [ns for ns in cmds.namespaceInfo(lon=True, r=True) if ns not in defaults]
# We want to reverse the list, so that namespaces with more children are at the front of the list.
namespaces.sort(key=num_children, reverse=True)
for ns in namespaces:
if namespaces.index(ns)+1 < len(namespaces):
parent_ns = namespaces[namespaces.index(ns)+1]
cmds.namespace(mv=[ns,parent_ns], f=True)
cmds.namespace(rm=ns)
else:
cmds.namespace(mv=[ns,":"], f=True)
cmds.namespace(rm=ns)
# remove ngSkinTools custom nodes
from ngSkinTools.layerUtils import LayerUtils
LayerUtils.deleteCustomNodes()
# remove RRM proxies
if cmds.objExists("RRM_MAIN"):
cmds.select("RRM_MAIN",hi=True)
proxies = cmds.ls(sl=True)
cmds.lockNode(proxies,lock=False)
cmds.delete(proxies)
if cmds.objExists("RRM_ProxiesLayer"):
cmds.delete("RRM_ProxiesLayer")"""
def viewMassage(text = None):
cmds.inViewMessage( amg="Pipeline: " + text, pos='topCenter', fade=True, fst = 3000 )
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=line-too-long
"""List of renames to apply when converting from TF 1.0 to TF 2.0.
THIS FILE IS AUTOGENERATED: To update, please run:
bazel build tensorflow/tools/compatibility/update:generate_v2_renames_map
bazel-bin/tensorflow/tools/compatibility/update/generate_v2_renames_map
pyformat --in_place third_party/tensorflow/tools/compatibility/renames_v2.py
This file should be updated whenever endpoints are deprecated.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
renames = {
'tf.AUTO_REUSE':
'tf.compat.v1.AUTO_REUSE',
'tf.AttrValue':
'tf.compat.v1.AttrValue',
'tf.COMPILER_VERSION':
'tf.version.COMPILER_VERSION',
'tf.CXX11_ABI_FLAG':
'tf.sysconfig.CXX11_ABI_FLAG',
'tf.ConditionalAccumulator':
'tf.compat.v1.ConditionalAccumulator',
'tf.ConditionalAccumulatorBase':
'tf.compat.v1.ConditionalAccumulatorBase',
'tf.ConfigProto':
'tf.compat.v1.ConfigProto',
'tf.Dimension':
'tf.compat.v1.Dimension',
'tf.Event':
'tf.compat.v1.Event',
'tf.FIFOQueue':
'tf.queue.FIFOQueue',
'tf.FixedLenFeature':
'tf.io.FixedLenFeature',
'tf.FixedLenSequenceFeature':
'tf.io.FixedLenSequenceFeature',
'tf.FixedLengthRecordReader':
'tf.compat.v1.FixedLengthRecordReader',
'tf.GIT_VERSION':
'tf.version.GIT_VERSION',
'tf.GPUOptions':
'tf.compat.v1.GPUOptions',
'tf.GRAPH_DEF_VERSION':
'tf.version.GRAPH_DEF_VERSION',
'tf.GRAPH_DEF_VERSION_MIN_CONSUMER':
'tf.version.GRAPH_DEF_VERSION_MIN_CONSUMER',
'tf.GRAPH_DEF_VERSION_MIN_PRODUCER':
'tf.version.GRAPH_DEF_VERSION_MIN_PRODUCER',
'tf.GraphDef':
'tf.compat.v1.GraphDef',
'tf.GraphKeys':
'tf.compat.v1.GraphKeys',
'tf.GraphOptions':
'tf.compat.v1.GraphOptions',
'tf.HistogramProto':
'tf.compat.v1.HistogramProto',
'tf.IdentityReader':
'tf.compat.v1.IdentityReader',
'tf.InteractiveSession':
'tf.compat.v1.InteractiveSession',
'tf.LMDBReader':
'tf.compat.v1.LMDBReader',
'tf.LogMessage':
'tf.compat.v1.LogMessage',
'tf.MONOLITHIC_BUILD':
'tf.sysconfig.MONOLITHIC_BUILD',
'tf.MetaGraphDef':
'tf.compat.v1.MetaGraphDef',
'tf.NameAttrList':
'tf.compat.v1.NameAttrList',
'tf.NoGradient':
'tf.no_gradient',
'tf.NodeDef':
'tf.compat.v1.NodeDef',
'tf.NotDifferentiable':
'tf.no_gradient',
'tf.OpError':
'tf.errors.OpError',
'tf.OptimizerOptions':
'tf.compat.v1.OptimizerOptions',
'tf.PaddingFIFOQueue':
'tf.queue.PaddingFIFOQueue',
'tf.Print':
'tf.compat.v1.Print',
'tf.PriorityQueue':
'tf.queue.PriorityQueue',
'tf.QUANTIZED_DTYPES':
'tf.dtypes.QUANTIZED_DTYPES',
'tf.QueueBase':
'tf.queue.QueueBase',
'tf.RandomShuffleQueue':
'tf.queue.RandomShuffleQueue',
'tf.ReaderBase':
'tf.compat.v1.ReaderBase',
'tf.RunMetadata':
'tf.compat.v1.RunMetadata',
'tf.RunOptions':
'tf.compat.v1.RunOptions',
'tf.Session':
'tf.compat.v1.Session',
'tf.SessionLog':
'tf.compat.v1.SessionLog',
'tf.SparseConditionalAccumulator':
'tf.compat.v1.SparseConditionalAccumulator',
'tf.SparseFeature':
'tf.io.SparseFeature',
'tf.SparseTensorValue':
'tf.compat.v1.SparseTensorValue',
'tf.Summary':
'tf.compat.v1.Summary',
'tf.SummaryMetadata':
'tf.compat.v1.SummaryMetadata',
'tf.TFRecordReader':
'tf.compat.v1.TFRecordReader',
'tf.TensorInfo':
'tf.compat.v1.TensorInfo',
'tf.TextLineReader':
'tf.compat.v1.TextLineReader',
'tf.VERSION':
'tf.version.VERSION',
'tf.VarLenFeature':
'tf.io.VarLenFeature',
'tf.VariableScope':
'tf.compat.v1.VariableScope',
'tf.WholeFileReader':
'tf.compat.v1.WholeFileReader',
'tf.accumulate_n':
'tf.math.accumulate_n',
'tf.add_check_numerics_ops':
'tf.compat.v1.add_check_numerics_ops',
'tf.add_to_collection':
'tf.compat.v1.add_to_collection',
'tf.add_to_collections':
'tf.compat.v1.add_to_collections',
'tf.all_variables':
'tf.compat.v1.all_variables',
'tf.angle':
'tf.math.angle',
'tf.app.run':
'tf.compat.v1.app.run',
'tf.assert_greater_equal':
'tf.compat.v1.assert_greater_equal',
'tf.assert_integer':
'tf.compat.v1.assert_integer',
'tf.assert_less_equal':
'tf.compat.v1.assert_less_equal',
'tf.assert_near':
'tf.compat.v1.assert_near',
'tf.assert_negative':
'tf.compat.v1.assert_negative',
'tf.assert_non_negative':
'tf.compat.v1.assert_non_negative',
'tf.assert_non_positive':
'tf.compat.v1.assert_non_positive',
'tf.assert_none_equal':
'tf.compat.v1.assert_none_equal',
'tf.assert_positive':
'tf.compat.v1.assert_positive',
'tf.assert_proper_iterable':
'tf.debugging.assert_proper_iterable',
'tf.assert_rank_at_least':
'tf.compat.v1.assert_rank_at_least',
'tf.assert_rank_in':
'tf.compat.v1.assert_rank_in',
'tf.assert_same_float_dtype':
'tf.debugging.assert_same_float_dtype',
'tf.assert_scalar':
'tf.compat.v1.assert_scalar',
'tf.assert_type':
'tf.compat.v1.assert_type',
'tf.assert_variables_initialized':
'tf.compat.v1.assert_variables_initialized',
'tf.assign':
'tf.compat.v1.assign',
'tf.assign_add':
'tf.compat.v1.assign_add',
'tf.assign_sub':
'tf.compat.v1.assign_sub',
'tf.batch_scatter_update':
'tf.compat.v1.batch_scatter_update',
'tf.betainc':
'tf.math.betainc',
'tf.ceil':
'tf.math.ceil',
'tf.check_numerics':
'tf.debugging.check_numerics',
'tf.cholesky':
'tf.linalg.cholesky',
'tf.cholesky_solve':
'tf.linalg.cholesky_solve',
'tf.clip_by_average_norm':
'tf.compat.v1.clip_by_average_norm',
'tf.colocate_with':
'tf.compat.v1.colocate_with',
'tf.conj':
'tf.math.conj',
'tf.container':
'tf.compat.v1.container',
'tf.control_flow_v2_enabled':
'tf.compat.v1.control_flow_v2_enabled',
'tf.convert_to_tensor_or_indexed_slices':
'tf.compat.v1.convert_to_tensor_or_indexed_slices',
'tf.convert_to_tensor_or_sparse_tensor':
'tf.compat.v1.convert_to_tensor_or_sparse_tensor',
'tf.count_up_to':
'tf.compat.v1.count_up_to',
'tf.create_partitioned_variables':
'tf.compat.v1.create_partitioned_variables',
'tf.cross':
'tf.linalg.cross',
'tf.cumprod':
'tf.math.cumprod',
'tf.data.get_output_classes':
'tf.compat.v1.data.get_output_classes',
'tf.data.get_output_shapes':
'tf.compat.v1.data.get_output_shapes',
'tf.data.get_output_types':
'tf.compat.v1.data.get_output_types',
'tf.data.make_initializable_iterator':
'tf.compat.v1.data.make_initializable_iterator',
'tf.data.make_one_shot_iterator':
'tf.compat.v1.data.make_one_shot_iterator',
'tf.debugging.is_finite':
'tf.math.is_finite',
'tf.debugging.is_inf':
'tf.math.is_inf',
'tf.debugging.is_nan':
'tf.math.is_nan',
'tf.debugging.is_non_decreasing':
'tf.math.is_non_decreasing',
'tf.debugging.is_strictly_increasing':
'tf.math.is_strictly_increasing',
'tf.decode_base64':
'tf.io.decode_base64',
'tf.decode_compressed':
'tf.io.decode_compressed',
'tf.decode_json_example':
'tf.io.decode_json_example',
'tf.delete_session_tensor':
'tf.compat.v1.delete_session_tensor',
'tf.depth_to_space':
'tf.compat.v1.depth_to_space',
'tf.dequantize':
'tf.quantization.dequantize',
'tf.deserialize_many_sparse':
'tf.io.deserialize_many_sparse',
'tf.diag':
'tf.linalg.tensor_diag',
'tf.diag_part':
'tf.linalg.tensor_diag_part',
'tf.digamma':
'tf.math.digamma',
'tf.dimension_at_index':
'tf.compat.dimension_at_index',
'tf.dimension_value':
'tf.compat.dimension_value',
'tf.disable_control_flow_v2':
'tf.compat.v1.disable_control_flow_v2',
'tf.disable_eager_execution':
'tf.compat.v1.disable_eager_execution',
'tf.disable_resource_variables':
'tf.compat.v1.disable_resource_variables',
'tf.disable_tensor_equality':
'tf.compat.v1.disable_tensor_equality',
'tf.disable_v2_behavior':
'tf.compat.v1.disable_v2_behavior',
'tf.disable_v2_tensorshape':
'tf.compat.v1.disable_v2_tensorshape',
'tf.distribute.get_loss_reduction':
'tf.compat.v1.distribute.get_loss_reduction',
'tf.distributions.Bernoulli':
'tf.compat.v1.distributions.Bernoulli',
'tf.distributions.Beta':
'tf.compat.v1.distributions.Beta',
'tf.distributions.Categorical':
'tf.compat.v1.distributions.Categorical',
'tf.distributions.Dirichlet':
'tf.compat.v1.distributions.Dirichlet',
'tf.distributions.DirichletMultinomial':
'tf.compat.v1.distributions.DirichletMultinomial',
'tf.distributions.Distribution':
'tf.compat.v1.distributions.Distribution',
'tf.distributions.Exponential':
'tf.compat.v1.distributions.Exponential',
'tf.distributions.FULLY_REPARAMETERIZED':
'tf.compat.v1.distributions.FULLY_REPARAMETERIZED',
'tf.distributions.Gamma':
'tf.compat.v1.distributions.Gamma',
'tf.distributions.Laplace':
'tf.compat.v1.distributions.Laplace',
'tf.distributions.Multinomial':
'tf.compat.v1.distributions.Multinomial',
'tf.distributions.NOT_REPARAMETERIZED':
'tf.compat.v1.distributions.NOT_REPARAMETERIZED',
'tf.distributions.Normal':
'tf.compat.v1.distributions.Normal',
'tf.distributions.RegisterKL':
'tf.compat.v1.distributions.RegisterKL',
'tf.distributions.ReparameterizationType':
'tf.compat.v1.distributions.ReparameterizationType',
'tf.distributions.StudentT':
'tf.compat.v1.distributions.StudentT',
'tf.distributions.Uniform':
'tf.compat.v1.distributions.Uniform',
'tf.distributions.kl_divergence':
'tf.compat.v1.distributions.kl_divergence',
'tf.div':
'tf.compat.v1.div',
'tf.div_no_nan':
'tf.math.divide_no_nan',
'tf.dtypes.as_string':
'tf.strings.as_string',
'tf.enable_control_flow_v2':
'tf.compat.v1.enable_control_flow_v2',
'tf.enable_eager_execution':
'tf.compat.v1.enable_eager_execution',
'tf.enable_resource_variables':
'tf.compat.v1.enable_resource_variables',
'tf.enable_tensor_equality':
'tf.compat.v1.enable_tensor_equality',
'tf.enable_v2_behavior':
'tf.compat.v1.enable_v2_behavior',
'tf.enable_v2_tensorshape':
'tf.compat.v1.enable_v2_tensorshape',
'tf.encode_base64':
'tf.io.encode_base64',
'tf.erf':
'tf.math.erf',
'tf.erfc':
'tf.math.erfc',
'tf.estimator.experimental.KMeans':
'tf.compat.v1.estimator.experimental.KMeans',
'tf.estimator.experimental.dnn_logit_fn_builder':
'tf.compat.v1.estimator.experimental.dnn_logit_fn_builder',
'tf.estimator.experimental.linear_logit_fn_builder':
'tf.compat.v1.estimator.experimental.linear_logit_fn_builder',
'tf.estimator.inputs.numpy_input_fn':
'tf.compat.v1.estimator.inputs.numpy_input_fn',
'tf.estimator.inputs.pandas_input_fn':
'tf.compat.v1.estimator.inputs.pandas_input_fn',
'tf.estimator.tpu.InputPipelineConfig':
'tf.compat.v1.estimator.tpu.InputPipelineConfig',
'tf.estimator.tpu.RunConfig':
'tf.compat.v1.estimator.tpu.RunConfig',
'tf.estimator.tpu.TPUConfig':
'tf.compat.v1.estimator.tpu.TPUConfig',
'tf.estimator.tpu.TPUEstimator':
'tf.compat.v1.estimator.tpu.TPUEstimator',
'tf.estimator.tpu.TPUEstimatorSpec':
'tf.compat.v1.estimator.tpu.TPUEstimatorSpec',
'tf.estimator.tpu.experimental.EmbeddingConfigSpec':
'tf.compat.v1.estimator.tpu.experimental.EmbeddingConfigSpec',
'tf.executing_eagerly_outside_functions':
'tf.compat.v1.executing_eagerly_outside_functions',
'tf.experimental.output_all_intermediates':
'tf.compat.v1.experimental.output_all_intermediates',
'tf.expm1':
'tf.math.expm1',
'tf.fake_quant_with_min_max_args':
'tf.quantization.fake_quant_with_min_max_args',
'tf.fake_quant_with_min_max_args_gradient':
'tf.quantization.fake_quant_with_min_max_args_gradient',
'tf.fake_quant_with_min_max_vars':
'tf.quantization.fake_quant_with_min_max_vars',
'tf.fake_quant_with_min_max_vars_gradient':
'tf.quantization.fake_quant_with_min_max_vars_gradient',
'tf.fake_quant_with_min_max_vars_per_channel':
'tf.quantization.fake_quant_with_min_max_vars_per_channel',
'tf.fake_quant_with_min_max_vars_per_channel_gradient':
'tf.quantization.fake_quant_with_min_max_vars_per_channel_gradient',
'tf.feature_column.input_layer':
'tf.compat.v1.feature_column.input_layer',
'tf.feature_column.linear_model':
'tf.compat.v1.feature_column.linear_model',
'tf.feature_column.shared_embedding_columns':
'tf.compat.v1.feature_column.shared_embedding_columns',
'tf.fft':
'tf.signal.fft',
'tf.fft2d':
'tf.signal.fft2d',
'tf.fft3d':
'tf.signal.fft3d',
'tf.fixed_size_partitioner':
'tf.compat.v1.fixed_size_partitioner',
'tf.floordiv':
'tf.math.floordiv',
'tf.floormod':
'tf.math.floormod',
'tf.get_collection':
'tf.compat.v1.get_collection',
'tf.get_collection_ref':
'tf.compat.v1.get_collection_ref',
'tf.get_default_graph':
'tf.compat.v1.get_default_graph',
'tf.get_default_session':
'tf.compat.v1.get_default_session',
'tf.get_local_variable':
'tf.compat.v1.get_local_variable',
'tf.get_seed':
'tf.compat.v1.get_seed',
'tf.get_session_handle':
'tf.compat.v1.get_session_handle',
'tf.get_session_tensor':
'tf.compat.v1.get_session_tensor',
'tf.get_variable':
'tf.compat.v1.get_variable',
'tf.get_variable_scope':
'tf.compat.v1.get_variable_scope',
'tf.gfile.FastGFile':
'tf.compat.v1.gfile.FastGFile',
'tf.global_norm':
'tf.linalg.global_norm',
'tf.global_variables':
'tf.compat.v1.global_variables',
'tf.global_variables_initializer':
'tf.compat.v1.global_variables_initializer',
'tf.graph_util.convert_variables_to_constants':
'tf.compat.v1.graph_util.convert_variables_to_constants',
'tf.graph_util.extract_sub_graph':
'tf.compat.v1.graph_util.extract_sub_graph',
'tf.graph_util.must_run_on_cpu':
'tf.compat.v1.graph_util.must_run_on_cpu',
'tf.graph_util.remove_training_nodes':
'tf.compat.v1.graph_util.remove_training_nodes',
'tf.graph_util.tensor_shape_from_node_def_name':
'tf.compat.v1.graph_util.tensor_shape_from_node_def_name',
'tf.ifft':
'tf.signal.ifft',
'tf.ifft2d':
'tf.signal.ifft2d',
'tf.ifft3d':
'tf.signal.ifft3d',
'tf.igamma':
'tf.math.igamma',
'tf.igammac':
'tf.math.igammac',
'tf.imag':
'tf.math.imag',
'tf.image.resize_area':
'tf.compat.v1.image.resize_area',
'tf.image.resize_bicubic':
'tf.compat.v1.image.resize_bicubic',
'tf.image.resize_bilinear':
'tf.compat.v1.image.resize_bilinear',
'tf.image.resize_image_with_crop_or_pad':
'tf.image.resize_with_crop_or_pad',
'tf.image.resize_image_with_pad':
'tf.compat.v1.image.resize_image_with_pad',
'tf.image.resize_nearest_neighbor':
'tf.compat.v1.image.resize_nearest_neighbor',
'tf.image.transpose_image':
'tf.image.transpose',
'tf.initialize_all_tables':
'tf.compat.v1.initialize_all_tables',
'tf.initialize_all_variables':
'tf.compat.v1.initialize_all_variables',
'tf.initialize_local_variables':
'tf.compat.v1.initialize_local_variables',
'tf.initialize_variables':
'tf.compat.v1.initialize_variables',
'tf.initializers.global_variables':
'tf.compat.v1.initializers.global_variables',
'tf.initializers.local_variables':
'tf.compat.v1.initializers.local_variables',
'tf.initializers.tables_initializer':
'tf.compat.v1.initializers.tables_initializer',
'tf.initializers.uniform_unit_scaling':
'tf.compat.v1.initializers.uniform_unit_scaling',
'tf.initializers.variables':
'tf.compat.v1.initializers.variables',
'tf.invert_permutation':
'tf.math.invert_permutation',
'tf.io.PaddingFIFOQueue':
'tf.queue.PaddingFIFOQueue',
'tf.io.PriorityQueue':
'tf.queue.PriorityQueue',
'tf.io.QueueBase':
'tf.queue.QueueBase',
'tf.io.RandomShuffleQueue':
'tf.queue.RandomShuffleQueue',
'tf.io.TFRecordCompressionType':
'tf.compat.v1.io.TFRecordCompressionType',
'tf.io.tf_record_iterator':
'tf.compat.v1.io.tf_record_iterator',
'tf.is_finite':
'tf.math.is_finite',
'tf.is_inf':
'tf.math.is_inf',
'tf.is_nan':
'tf.math.is_nan',
'tf.is_non_decreasing':
'tf.math.is_non_decreasing',
'tf.is_numeric_tensor':
'tf.debugging.is_numeric_tensor',
'tf.is_strictly_increasing':
'tf.math.is_strictly_increasing',
'tf.is_variable_initialized':
'tf.compat.v1.is_variable_initialized',
'tf.keras.backend.get_session':
'tf.compat.v1.keras.backend.get_session',
'tf.keras.backend.set_session':
'tf.compat.v1.keras.backend.set_session',
'tf.keras.experimental.export_saved_model':
'tf.compat.v1.keras.experimental.export_saved_model',
'tf.keras.experimental.load_from_saved_model':
'tf.compat.v1.keras.experimental.load_from_saved_model',
'tf.keras.layers.CuDNNGRU':
'tf.compat.v1.keras.layers.CuDNNGRU',
'tf.keras.layers.CuDNNLSTM':
'tf.compat.v1.keras.layers.CuDNNLSTM',
'tf.keras.layers.disable_v2_dtype_behavior':
'tf.compat.v1.keras.layers.disable_v2_dtype_behavior',
'tf.keras.layers.enable_v2_dtype_behavior':
'tf.compat.v1.keras.layers.enable_v2_dtype_behavior',
'tf.keras.losses.cosine':
'tf.keras.losses.cosine_similarity',
'tf.keras.losses.cosine_proximity':
'tf.keras.losses.cosine_similarity',
'tf.keras.metrics.cosine':
'tf.keras.losses.cosine_similarity',
'tf.keras.metrics.cosine_proximity':
'tf.keras.losses.cosine_similarity',
'tf.layers.AveragePooling1D':
'tf.compat.v1.layers.AveragePooling1D',
'tf.layers.AveragePooling2D':
'tf.compat.v1.layers.AveragePooling2D',
'tf.layers.AveragePooling3D':
'tf.compat.v1.layers.AveragePooling3D',
'tf.layers.BatchNormalization':
'tf.compat.v1.layers.BatchNormalization',
'tf.layers.Conv1D':
'tf.compat.v1.layers.Conv1D',
'tf.layers.Conv2D':
'tf.compat.v1.layers.Conv2D',
'tf.layers.Conv2DTranspose':
'tf.compat.v1.layers.Conv2DTranspose',
'tf.layers.Conv3D':
'tf.compat.v1.layers.Conv3D',
'tf.layers.Conv3DTranspose':
'tf.compat.v1.layers.Conv3DTranspose',
'tf.layers.Dense':
'tf.compat.v1.layers.Dense',
'tf.layers.Dropout':
'tf.compat.v1.layers.Dropout',
'tf.layers.Flatten':
'tf.compat.v1.layers.Flatten',
'tf.layers.InputSpec':
'tf.keras.layers.InputSpec',
'tf.layers.Layer':
'tf.compat.v1.layers.Layer',
'tf.layers.MaxPooling1D':
'tf.compat.v1.layers.MaxPooling1D',
'tf.layers.MaxPooling2D':
'tf.compat.v1.layers.MaxPooling2D',
'tf.layers.MaxPooling3D':
'tf.compat.v1.layers.MaxPooling3D',
'tf.layers.SeparableConv1D':
'tf.compat.v1.layers.SeparableConv1D',
'tf.layers.SeparableConv2D':
'tf.compat.v1.layers.SeparableConv2D',
'tf.layers.average_pooling1d':
'tf.compat.v1.layers.average_pooling1d',
'tf.layers.average_pooling2d':
'tf.compat.v1.layers.average_pooling2d',
'tf.layers.average_pooling3d':
'tf.compat.v1.layers.average_pooling3d',
'tf.layers.batch_normalization':
'tf.compat.v1.layers.batch_normalization',
'tf.layers.conv1d':
'tf.compat.v1.layers.conv1d',
'tf.layers.conv2d':
'tf.compat.v1.layers.conv2d',
'tf.layers.conv2d_transpose':
'tf.compat.v1.layers.conv2d_transpose',
'tf.layers.conv3d':
'tf.compat.v1.layers.conv3d',
'tf.layers.conv3d_transpose':
'tf.compat.v1.layers.conv3d_transpose',
'tf.layers.dense':
'tf.compat.v1.layers.dense',
'tf.layers.dropout':
'tf.compat.v1.layers.dropout',
'tf.layers.experimental.keras_style_scope':
'tf.compat.v1.layers.experimental.keras_style_scope',
'tf.layers.experimental.set_keras_style':
'tf.compat.v1.layers.experimental.set_keras_style',
'tf.layers.flatten':
'tf.compat.v1.layers.flatten',
'tf.layers.max_pooling1d':
'tf.compat.v1.layers.max_pooling1d',
'tf.layers.max_pooling2d':
'tf.compat.v1.layers.max_pooling2d',
'tf.layers.max_pooling3d':
'tf.compat.v1.layers.max_pooling3d',
'tf.layers.separable_conv1d':
'tf.compat.v1.layers.separable_conv1d',
'tf.layers.separable_conv2d':
'tf.compat.v1.layers.separable_conv2d',
'tf.lbeta':
'tf.math.lbeta',
'tf.lgamma':
'tf.math.lgamma',
'tf.lin_space':
'tf.linspace',
'tf.linalg.transpose':
'tf.linalg.matrix_transpose',
'tf.lite.OpHint':
'tf.compat.v1.lite.OpHint',
'tf.lite.TocoConverter':
'tf.compat.v1.lite.TocoConverter',
'tf.lite.constants.FLOAT16':
'tf.compat.v1.lite.constants.FLOAT16',
'tf.lite.constants.GRAPHVIZ_DOT':
'tf.compat.v1.lite.constants.GRAPHVIZ_DOT',
'tf.lite.constants.INT8':
'tf.compat.v1.lite.constants.INT8',
'tf.lite.constants.TFLITE':
'tf.compat.v1.lite.constants.TFLITE',
'tf.lite.experimental.convert_op_hints_to_stubs':
'tf.compat.v1.lite.experimental.convert_op_hints_to_stubs',
'tf.lite.experimental.get_potentially_supported_ops':
'tf.compat.v1.lite.experimental.get_potentially_supported_ops',
'tf.lite.experimental.nn.TFLiteLSTMCell':
'tf.compat.v1.lite.experimental.nn.TFLiteLSTMCell',
'tf.lite.experimental.nn.TfLiteRNNCell':
'tf.compat.v1.lite.experimental.nn.TfLiteRNNCell',
'tf.lite.experimental.nn.dynamic_rnn':
'tf.compat.v1.lite.experimental.nn.dynamic_rnn',
'tf.lite.toco_convert':
'tf.compat.v1.lite.toco_convert',
'tf.local_variables':
'tf.compat.v1.local_variables',
'tf.local_variables_initializer':
'tf.compat.v1.local_variables_initializer',
'tf.log':
'tf.math.log',
'tf.log1p':
'tf.math.log1p',
'tf.log_sigmoid':
'tf.math.log_sigmoid',
'tf.logging.DEBUG':
'tf.compat.v1.logging.DEBUG',
'tf.logging.ERROR':
'tf.compat.v1.logging.ERROR',
'tf.logging.FATAL':
'tf.compat.v1.logging.FATAL',
'tf.logging.INFO':
'tf.compat.v1.logging.INFO',
'tf.logging.TaskLevelStatusMessage':
'tf.compat.v1.logging.TaskLevelStatusMessage',
'tf.logging.WARN':
'tf.compat.v1.logging.WARN',
'tf.logging.debug':
'tf.compat.v1.logging.debug',
'tf.logging.error':
'tf.compat.v1.logging.error',
'tf.logging.fatal':
'tf.compat.v1.logging.fatal',
'tf.logging.flush':
'tf.compat.v1.logging.flush',
'tf.logging.get_verbosity':
'tf.compat.v1.logging.get_verbosity',
'tf.logging.info':
'tf.compat.v1.logging.info',
'tf.logging.log':
'tf.compat.v1.logging.log',
'tf.logging.log_every_n':
'tf.compat.v1.logging.log_every_n',
'tf.logging.log_first_n':
'tf.compat.v1.logging.log_first_n',
'tf.logging.log_if':
'tf.compat.v1.logging.log_if',
'tf.logging.set_verbosity':
'tf.compat.v1.logging.set_verbosity',
'tf.logging.vlog':
'tf.compat.v1.logging.vlog',
'tf.logging.warn':
'tf.compat.v1.logging.warn',
'tf.logging.warning':
'tf.compat.v1.logging.warning',
'tf.logical_xor':
'tf.math.logical_xor',
'tf.losses.Reduction':
'tf.compat.v1.losses.Reduction',
'tf.losses.absolute_difference':
'tf.compat.v1.losses.absolute_difference',
'tf.losses.add_loss':
'tf.compat.v1.losses.add_loss',
'tf.losses.compute_weighted_loss':
'tf.compat.v1.losses.compute_weighted_loss',
'tf.losses.cosine_distance':
'tf.compat.v1.losses.cosine_distance',
'tf.losses.get_losses':
'tf.compat.v1.losses.get_losses',
'tf.losses.get_regularization_loss':
'tf.compat.v1.losses.get_regularization_loss',
'tf.losses.get_regularization_losses':
'tf.compat.v1.losses.get_regularization_losses',
'tf.losses.get_total_loss':
'tf.compat.v1.losses.get_total_loss',
'tf.losses.hinge_loss':
'tf.compat.v1.losses.hinge_loss',
'tf.losses.huber_loss':
'tf.compat.v1.losses.huber_loss',
'tf.losses.log_loss':
'tf.compat.v1.losses.log_loss',
'tf.losses.mean_pairwise_squared_error':
'tf.compat.v1.losses.mean_pairwise_squared_error',
'tf.losses.mean_squared_error':
'tf.compat.v1.losses.mean_squared_error',
'tf.losses.sigmoid_cross_entropy':
'tf.compat.v1.losses.sigmoid_cross_entropy',
'tf.losses.softmax_cross_entropy':
'tf.compat.v1.losses.softmax_cross_entropy',
'tf.losses.sparse_softmax_cross_entropy':
'tf.compat.v1.losses.sparse_softmax_cross_entropy',
'tf.make_template':
'tf.compat.v1.make_template',
'tf.manip.gather_nd':
'tf.compat.v1.manip.gather_nd',
'tf.manip.reshape':
'tf.reshape',
'tf.manip.reverse':
'tf.reverse',
'tf.manip.roll':
'tf.roll',
'tf.manip.scatter_nd':
'tf.scatter_nd',
'tf.manip.space_to_batch_nd':
'tf.space_to_batch_nd',
'tf.manip.tile':
'tf.tile',
'tf.matching_files':
'tf.io.matching_files',
'tf.matrix_band_part':
'tf.linalg.band_part',
'tf.matrix_determinant':
'tf.linalg.det',
'tf.matrix_diag':
'tf.linalg.diag',
'tf.matrix_diag_part':
'tf.linalg.diag_part',
'tf.matrix_inverse':
'tf.linalg.inv',
'tf.matrix_set_diag':
'tf.linalg.set_diag',
'tf.matrix_solve':
'tf.linalg.solve',
'tf.matrix_solve_ls':
'tf.linalg.lstsq',
'tf.matrix_transpose':
'tf.linalg.matrix_transpose',
'tf.matrix_triangular_solve':
'tf.linalg.triangular_solve',
'tf.metrics.accuracy':
'tf.compat.v1.metrics.accuracy',
'tf.metrics.auc':
'tf.compat.v1.metrics.auc',
'tf.metrics.average_precision_at_k':
'tf.compat.v1.metrics.average_precision_at_k',
'tf.metrics.false_negatives':
'tf.compat.v1.metrics.false_negatives',
'tf.metrics.false_negatives_at_thresholds':
'tf.compat.v1.metrics.false_negatives_at_thresholds',
'tf.metrics.false_positives':
'tf.compat.v1.metrics.false_positives',
'tf.metrics.false_positives_at_thresholds':
'tf.compat.v1.metrics.false_positives_at_thresholds',
'tf.metrics.mean':
'tf.compat.v1.metrics.mean',
'tf.metrics.mean_absolute_error':
'tf.compat.v1.metrics.mean_absolute_error',
'tf.metrics.mean_cosine_distance':
'tf.compat.v1.metrics.mean_cosine_distance',
'tf.metrics.mean_iou':
'tf.compat.v1.metrics.mean_iou',
'tf.metrics.mean_per_class_accuracy':
'tf.compat.v1.metrics.mean_per_class_accuracy',
'tf.metrics.mean_relative_error':
'tf.compat.v1.metrics.mean_relative_error',
'tf.metrics.mean_squared_error':
'tf.compat.v1.metrics.mean_squared_error',
'tf.metrics.mean_tensor':
'tf.compat.v1.metrics.mean_tensor',
'tf.metrics.percentage_below':
'tf.compat.v1.metrics.percentage_below',
'tf.metrics.precision':
'tf.compat.v1.metrics.precision',
'tf.metrics.precision_at_k':
'tf.compat.v1.metrics.precision_at_k',
'tf.metrics.precision_at_thresholds':
'tf.compat.v1.metrics.precision_at_thresholds',
'tf.metrics.precision_at_top_k':
'tf.compat.v1.metrics.precision_at_top_k',
'tf.metrics.recall':
'tf.compat.v1.metrics.recall',
'tf.metrics.recall_at_k':
'tf.compat.v1.metrics.recall_at_k',
'tf.metrics.recall_at_thresholds':
'tf.compat.v1.metrics.recall_at_thresholds',
'tf.metrics.recall_at_top_k':
'tf.compat.v1.metrics.recall_at_top_k',
'tf.metrics.root_mean_squared_error':
'tf.compat.v1.metrics.root_mean_squared_error',
'tf.metrics.sensitivity_at_specificity':
'tf.compat.v1.metrics.sensitivity_at_specificity',
'tf.metrics.sparse_average_precision_at_k':
'tf.compat.v1.metrics.sparse_average_precision_at_k',
'tf.metrics.sparse_precision_at_k':
'tf.compat.v1.metrics.sparse_precision_at_k',
'tf.metrics.specificity_at_sensitivity':
'tf.compat.v1.metrics.specificity_at_sensitivity',
'tf.metrics.true_negatives':
'tf.compat.v1.metrics.true_negatives',
'tf.metrics.true_negatives_at_thresholds':
'tf.compat.v1.metrics.true_negatives_at_thresholds',
'tf.metrics.true_positives':
'tf.compat.v1.metrics.true_positives',
'tf.metrics.true_positives_at_thresholds':
'tf.compat.v1.metrics.true_positives_at_thresholds',
'tf.min_max_variable_partitioner':
'tf.compat.v1.min_max_variable_partitioner',
'tf.mod':
'tf.math.floormod',
'tf.model_variables':
'tf.compat.v1.model_variables',
'tf.moving_average_variables':
'tf.compat.v1.moving_average_variables',
'tf.nn.avg_pool_v2':
'tf.nn.avg_pool',
'tf.nn.bidirectional_dynamic_rnn':
'tf.compat.v1.nn.bidirectional_dynamic_rnn',
'tf.nn.conv2d_backprop_filter':
'tf.compat.v1.nn.conv2d_backprop_filter',
'tf.nn.conv3d_backprop_filter':
'tf.compat.v1.nn.conv3d_backprop_filter',
'tf.nn.conv3d_backprop_filter_v2':
'tf.compat.v1.nn.conv3d_backprop_filter_v2',
'tf.nn.ctc_beam_search_decoder_v2':
'tf.nn.ctc_beam_search_decoder',
'tf.nn.ctc_loss_v2':
'tf.compat.v1.nn.ctc_loss_v2',
'tf.nn.depthwise_conv2d_native':
'tf.compat.v1.nn.depthwise_conv2d_native',
'tf.nn.depthwise_conv2d_native_backprop_filter':
'tf.nn.depthwise_conv2d_backprop_filter',
'tf.nn.depthwise_conv2d_native_backprop_input':
'tf.nn.depthwise_conv2d_backprop_input',
'tf.nn.dynamic_rnn':
'tf.compat.v1.nn.dynamic_rnn',
'tf.nn.log_uniform_candidate_sampler':
'tf.random.log_uniform_candidate_sampler',
'tf.nn.max_pool_v2':
'tf.nn.max_pool',
'tf.nn.quantized_avg_pool':
'tf.compat.v1.nn.quantized_avg_pool',
'tf.nn.quantized_conv2d':
'tf.compat.v1.nn.quantized_conv2d',
'tf.nn.quantized_max_pool':
'tf.compat.v1.nn.quantized_max_pool',
'tf.nn.quantized_relu_x':
'tf.compat.v1.nn.quantized_relu_x',
'tf.nn.raw_rnn':
'tf.compat.v1.nn.raw_rnn',
'tf.nn.relu_layer':
'tf.compat.v1.nn.relu_layer',
'tf.nn.rnn_cell.BasicLSTMCell':
'tf.compat.v1.nn.rnn_cell.BasicLSTMCell',
'tf.nn.rnn_cell.BasicRNNCell':
'tf.compat.v1.nn.rnn_cell.BasicRNNCell',
'tf.nn.rnn_cell.DeviceWrapper':
'tf.compat.v1.nn.rnn_cell.DeviceWrapper',
'tf.nn.rnn_cell.DropoutWrapper':
'tf.compat.v1.nn.rnn_cell.DropoutWrapper',
'tf.nn.rnn_cell.GRUCell':
'tf.compat.v1.nn.rnn_cell.GRUCell',
'tf.nn.rnn_cell.LSTMCell':
'tf.compat.v1.nn.rnn_cell.LSTMCell',
'tf.nn.rnn_cell.LSTMStateTuple':
'tf.compat.v1.nn.rnn_cell.LSTMStateTuple',
'tf.nn.rnn_cell.MultiRNNCell':
'tf.compat.v1.nn.rnn_cell.MultiRNNCell',
'tf.nn.rnn_cell.RNNCell':
'tf.compat.v1.nn.rnn_cell.RNNCell',
'tf.nn.rnn_cell.ResidualWrapper':
'tf.compat.v1.nn.rnn_cell.ResidualWrapper',
'tf.nn.static_bidirectional_rnn':
'tf.compat.v1.nn.static_bidirectional_rnn',
'tf.nn.static_rnn':
'tf.compat.v1.nn.static_rnn',
'tf.nn.static_state_saving_rnn':
'tf.compat.v1.nn.static_state_saving_rnn',
'tf.nn.uniform_candidate_sampler':
'tf.random.uniform_candidate_sampler',
'tf.nn.xw_plus_b':
'tf.compat.v1.nn.xw_plus_b',
'tf.no_regularizer':
'tf.compat.v1.no_regularizer',
'tf.op_scope':
'tf.compat.v1.op_scope',
'tf.parse_single_sequence_example':
'tf.io.parse_single_sequence_example',
'tf.parse_tensor':
'tf.io.parse_tensor',
'tf.placeholder':
'tf.compat.v1.placeholder',
'tf.placeholder_with_default':
'tf.compat.v1.placeholder_with_default',
'tf.polygamma':
'tf.math.polygamma',
'tf.profiler.AdviceProto':
'tf.compat.v1.profiler.AdviceProto',
'tf.profiler.GraphNodeProto':
'tf.compat.v1.profiler.GraphNodeProto',
'tf.profiler.MultiGraphNodeProto':
'tf.compat.v1.profiler.MultiGraphNodeProto',
'tf.profiler.OpLogProto':
'tf.compat.v1.profiler.OpLogProto',
'tf.profiler.ProfileOptionBuilder':
'tf.compat.v1.profiler.ProfileOptionBuilder',
'tf.profiler.Profiler':
'tf.compat.v1.profiler.Profiler',
'tf.profiler.advise':
'tf.compat.v1.profiler.advise',
'tf.profiler.profile':
'tf.compat.v1.profiler.profile',
'tf.profiler.write_op_log':
'tf.compat.v1.profiler.write_op_log',
'tf.py_func':
'tf.compat.v1.py_func',
'tf.python_io.TFRecordCompressionType':
'tf.compat.v1.python_io.TFRecordCompressionType',
'tf.python_io.TFRecordOptions':
'tf.io.TFRecordOptions',
'tf.python_io.TFRecordWriter':
'tf.io.TFRecordWriter',
'tf.python_io.tf_record_iterator':
'tf.compat.v1.python_io.tf_record_iterator',
'tf.qr':
'tf.linalg.qr',
'tf.quantize':
'tf.quantization.quantize',
'tf.quantized_concat':
'tf.quantization.quantized_concat',
'tf.ragged.RaggedTensorValue':
'tf.compat.v1.ragged.RaggedTensorValue',
'tf.ragged.constant_value':
'tf.compat.v1.ragged.constant_value',
'tf.ragged.placeholder':
'tf.compat.v1.ragged.placeholder',
'tf.random.get_seed':
'tf.compat.v1.random.get_seed',
'tf.random.set_random_seed':
'tf.compat.v1.random.set_random_seed',
'tf.random_crop':
'tf.image.random_crop',
'tf.random_gamma':
'tf.random.gamma',
'tf.random_normal':
'tf.random.normal',
'tf.random_shuffle':
'tf.random.shuffle',
'tf.random_uniform':
'tf.random.uniform',
'tf.read_file':
'tf.io.read_file',
'tf.real':
'tf.math.real',
'tf.reciprocal':
'tf.math.reciprocal',
'tf.regex_replace':
'tf.strings.regex_replace',
'tf.report_uninitialized_variables':
'tf.compat.v1.report_uninitialized_variables',
'tf.reset_default_graph':
'tf.compat.v1.reset_default_graph',
'tf.resource_loader.get_data_files_path':
'tf.compat.v1.resource_loader.get_data_files_path',
'tf.resource_loader.get_path_to_datafile':
'tf.compat.v1.resource_loader.get_path_to_datafile',
'tf.resource_loader.get_root_dir_with_all_resources':
'tf.compat.v1.resource_loader.get_root_dir_with_all_resources',
'tf.resource_loader.load_resource':
'tf.compat.v1.resource_loader.load_resource',
'tf.resource_loader.readahead_file_path':
'tf.compat.v1.resource_loader.readahead_file_path',
'tf.resource_variables_enabled':
'tf.compat.v1.resource_variables_enabled',
'tf.reverse_v2':
'tf.reverse',
'tf.rint':
'tf.math.rint',
'tf.rsqrt':
'tf.math.rsqrt',
'tf.saved_model.Builder':
'tf.compat.v1.saved_model.Builder',
'tf.saved_model.LEGACY_INIT_OP_KEY':
'tf.compat.v1.saved_model.LEGACY_INIT_OP_KEY',
'tf.saved_model.MAIN_OP_KEY':
'tf.compat.v1.saved_model.MAIN_OP_KEY',
'tf.saved_model.build_signature_def':
'tf.compat.v1.saved_model.build_signature_def',
'tf.saved_model.build_tensor_info':
'tf.compat.v1.saved_model.build_tensor_info',
'tf.saved_model.builder.SavedModelBuilder':
'tf.compat.v1.saved_model.builder.SavedModelBuilder',
'tf.saved_model.classification_signature_def':
'tf.compat.v1.saved_model.classification_signature_def',
'tf.saved_model.constants.ASSETS_DIRECTORY':
'tf.saved_model.ASSETS_DIRECTORY',
'tf.saved_model.constants.ASSETS_KEY':
'tf.saved_model.ASSETS_KEY',
'tf.saved_model.constants.DEBUG_DIRECTORY':
'tf.saved_model.DEBUG_DIRECTORY',
'tf.saved_model.constants.DEBUG_INFO_FILENAME_PB':
'tf.saved_model.DEBUG_INFO_FILENAME_PB',
'tf.saved_model.constants.LEGACY_INIT_OP_KEY':
'tf.compat.v1.saved_model.constants.LEGACY_INIT_OP_KEY',
'tf.saved_model.constants.MAIN_OP_KEY':
'tf.compat.v1.saved_model.constants.MAIN_OP_KEY',
'tf.saved_model.constants.SAVED_MODEL_FILENAME_PB':
'tf.saved_model.SAVED_MODEL_FILENAME_PB',
'tf.saved_model.constants.SAVED_MODEL_FILENAME_PBTXT':
'tf.saved_model.SAVED_MODEL_FILENAME_PBTXT',
'tf.saved_model.constants.SAVED_MODEL_SCHEMA_VERSION':
'tf.saved_model.SAVED_MODEL_SCHEMA_VERSION',
'tf.saved_model.constants.VARIABLES_DIRECTORY':
'tf.saved_model.VARIABLES_DIRECTORY',
'tf.saved_model.constants.VARIABLES_FILENAME':
'tf.saved_model.VARIABLES_FILENAME',
'tf.saved_model.experimental.save':
'tf.saved_model.save',
'tf.saved_model.get_tensor_from_tensor_info':
'tf.compat.v1.saved_model.get_tensor_from_tensor_info',
'tf.saved_model.is_valid_signature':
'tf.compat.v1.saved_model.is_valid_signature',
'tf.saved_model.loader.load':
'tf.compat.v1.saved_model.loader.load',
'tf.saved_model.loader.maybe_saved_model_directory':
'tf.compat.v1.saved_model.loader.maybe_saved_model_directory',
'tf.saved_model.main_op.main_op':
'tf.compat.v1.saved_model.main_op.main_op',
'tf.saved_model.main_op.main_op_with_restore':
'tf.compat.v1.saved_model.main_op.main_op_with_restore',
'tf.saved_model.main_op_with_restore':
'tf.compat.v1.saved_model.main_op_with_restore',
'tf.saved_model.maybe_saved_model_directory':
'tf.compat.v1.saved_model.maybe_saved_model_directory',
'tf.saved_model.predict_signature_def':
'tf.compat.v1.saved_model.predict_signature_def',
'tf.saved_model.regression_signature_def':
'tf.compat.v1.saved_model.regression_signature_def',
'tf.saved_model.signature_constants.CLASSIFY_INPUTS':
'tf.saved_model.CLASSIFY_INPUTS',
'tf.saved_model.signature_constants.CLASSIFY_METHOD_NAME':
'tf.saved_model.CLASSIFY_METHOD_NAME',
'tf.saved_model.signature_constants.CLASSIFY_OUTPUT_CLASSES':
'tf.saved_model.CLASSIFY_OUTPUT_CLASSES',
'tf.saved_model.signature_constants.CLASSIFY_OUTPUT_SCORES':
'tf.saved_model.CLASSIFY_OUTPUT_SCORES',
'tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY':
'tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY',
'tf.saved_model.signature_constants.PREDICT_INPUTS':
'tf.saved_model.PREDICT_INPUTS',
'tf.saved_model.signature_constants.PREDICT_METHOD_NAME':
'tf.saved_model.PREDICT_METHOD_NAME',
'tf.saved_model.signature_constants.PREDICT_OUTPUTS':
'tf.saved_model.PREDICT_OUTPUTS',
'tf.saved_model.signature_constants.REGRESS_INPUTS':
'tf.saved_model.REGRESS_INPUTS',
'tf.saved_model.signature_constants.REGRESS_METHOD_NAME':
'tf.saved_model.REGRESS_METHOD_NAME',
'tf.saved_model.signature_constants.REGRESS_OUTPUTS':
'tf.saved_model.REGRESS_OUTPUTS',
'tf.saved_model.signature_def_utils.MethodNameUpdater':
'tf.compat.v1.saved_model.signature_def_utils.MethodNameUpdater',
'tf.saved_model.signature_def_utils.build_signature_def':
'tf.compat.v1.saved_model.signature_def_utils.build_signature_def',
'tf.saved_model.signature_def_utils.classification_signature_def':
'tf.compat.v1.saved_model.signature_def_utils.classification_signature_def',
'tf.saved_model.signature_def_utils.is_valid_signature':
'tf.compat.v1.saved_model.signature_def_utils.is_valid_signature',
'tf.saved_model.signature_def_utils.predict_signature_def':
'tf.compat.v1.saved_model.signature_def_utils.predict_signature_def',
'tf.saved_model.signature_def_utils.regression_signature_def':
'tf.compat.v1.saved_model.signature_def_utils.regression_signature_def',
'tf.saved_model.simple_save':
'tf.compat.v1.saved_model.simple_save',
'tf.saved_model.tag_constants.GPU':
'tf.saved_model.GPU',
'tf.saved_model.tag_constants.SERVING':
'tf.saved_model.SERVING',
'tf.saved_model.tag_constants.TPU':
'tf.saved_model.TPU',
'tf.saved_model.tag_constants.TRAINING':
'tf.saved_model.TRAINING',
'tf.saved_model.utils.build_tensor_info':
'tf.compat.v1.saved_model.utils.build_tensor_info',
'tf.saved_model.utils.get_tensor_from_tensor_info':
'tf.compat.v1.saved_model.utils.get_tensor_from_tensor_info',
'tf.scatter_add':
'tf.compat.v1.scatter_add',
'tf.scatter_div':
'tf.compat.v1.scatter_div',
'tf.scatter_max':
'tf.compat.v1.scatter_max',
'tf.scatter_min':
'tf.compat.v1.scatter_min',
'tf.scatter_mul':
'tf.compat.v1.scatter_mul',
'tf.scatter_nd_add':
'tf.compat.v1.scatter_nd_add',
'tf.scatter_nd_sub':
'tf.compat.v1.scatter_nd_sub',
'tf.scatter_nd_max':
'tf.compat.v1.scatter_nd_max',
'tf.scatter_nd_min':
'tf.compat.v1.scatter_nd_min',
'tf.scatter_nd_update':
'tf.compat.v1.scatter_nd_update',
'tf.scatter_sub':
'tf.compat.v1.scatter_sub',
'tf.scatter_update':
'tf.compat.v1.scatter_update',
'tf.segment_max':
'tf.math.segment_max',
'tf.segment_mean':
'tf.math.segment_mean',
'tf.segment_min':
'tf.math.segment_min',
'tf.segment_prod':
'tf.math.segment_prod',
'tf.segment_sum':
'tf.math.segment_sum',
'tf.self_adjoint_eig':
'tf.linalg.eigh',
'tf.self_adjoint_eigvals':
'tf.linalg.eigvalsh',
'tf.serialize_many_sparse':
'tf.compat.v1.serialize_many_sparse',
'tf.serialize_sparse':
'tf.compat.v1.serialize_sparse',
'tf.serialize_tensor':
'tf.io.serialize_tensor',
'tf.set_random_seed':
'tf.compat.v1.set_random_seed',
'tf.setdiff1d':
'tf.compat.v1.setdiff1d',
'tf.sets.set_difference':
'tf.sets.difference',
'tf.sets.set_intersection':
'tf.sets.intersection',
'tf.sets.set_size':
'tf.sets.size',
'tf.sets.set_union':
'tf.sets.union',
'tf.space_to_depth':
'tf.compat.v1.space_to_depth',
'tf.sparse.SparseConditionalAccumulator':
'tf.compat.v1.sparse.SparseConditionalAccumulator',
'tf.sparse.matmul':
'tf.sparse.sparse_dense_matmul',
'tf.sparse.merge':
'tf.compat.v1.sparse.merge',
'tf.sparse.placeholder':
'tf.compat.v1.sparse.placeholder',
'tf.sparse.reduce_max_sparse':
'tf.compat.v1.sparse.reduce_max_sparse',
'tf.sparse.reduce_sum_sparse':
'tf.compat.v1.sparse.reduce_sum_sparse',
'tf.sparse_fill_empty_rows':
'tf.sparse.fill_empty_rows',
'tf.sparse_mask':
'tf.sparse.mask',
'tf.sparse_maximum':
'tf.sparse.maximum',
'tf.sparse_merge':
'tf.compat.v1.sparse_merge',
'tf.sparse_minimum':
'tf.sparse.minimum',
'tf.sparse_placeholder':
'tf.compat.v1.sparse_placeholder',
'tf.sparse_reduce_max_sparse':
'tf.compat.v1.sparse_reduce_max_sparse',
'tf.sparse_reduce_sum_sparse':
'tf.compat.v1.sparse_reduce_sum_sparse',
'tf.sparse_reorder':
'tf.sparse.reorder',
'tf.sparse_reset_shape':
'tf.sparse.reset_shape',
'tf.sparse_reshape':
'tf.sparse.reshape',
'tf.sparse_retain':
'tf.sparse.retain',
'tf.sparse_segment_mean':
'tf.compat.v1.sparse_segment_mean',
'tf.sparse_segment_sqrt_n':
'tf.compat.v1.sparse_segment_sqrt_n',
'tf.sparse_segment_sum':
'tf.compat.v1.sparse_segment_sum',
'tf.sparse_slice':
'tf.sparse.slice',
'tf.sparse_softmax':
'tf.sparse.softmax',
'tf.sparse_tensor_dense_matmul':
'tf.sparse.sparse_dense_matmul',
'tf.sparse_tensor_to_dense':
'tf.sparse.to_dense',
'tf.sparse_to_dense':
'tf.compat.v1.sparse_to_dense',
'tf.sparse_to_indicator':
'tf.sparse.to_indicator',
'tf.sparse_transpose':
'tf.sparse.transpose',
'tf.spectral.dct':
'tf.signal.dct',
'tf.spectral.fft':
'tf.signal.fft',
'tf.spectral.fft2d':
'tf.signal.fft2d',
'tf.spectral.fft3d':
'tf.signal.fft3d',
'tf.spectral.idct':
'tf.signal.idct',
'tf.spectral.ifft':
'tf.signal.ifft',
'tf.spectral.ifft2d':
'tf.signal.ifft2d',
'tf.spectral.ifft3d':
'tf.signal.ifft3d',
'tf.spectral.irfft':
'tf.signal.irfft',
'tf.spectral.irfft2d':
'tf.signal.irfft2d',
'tf.spectral.irfft3d':
'tf.signal.irfft3d',
'tf.spectral.rfft':
'tf.signal.rfft',
'tf.spectral.rfft2d':
'tf.signal.rfft2d',
'tf.spectral.rfft3d':
'tf.signal.rfft3d',
'tf.squared_difference':
'tf.math.squared_difference',
'tf.string_join':
'tf.strings.join',
'tf.string_strip':
'tf.strings.strip',
'tf.string_to_hash_bucket_fast':
'tf.strings.to_hash_bucket_fast',
'tf.string_to_hash_bucket_strong':
'tf.strings.to_hash_bucket_strong',
'tf.summary.Event':
'tf.compat.v1.summary.Event',
'tf.summary.FileWriter':
'tf.compat.v1.summary.FileWriter',
'tf.summary.FileWriterCache':
'tf.compat.v1.summary.FileWriterCache',
'tf.summary.SessionLog':
'tf.compat.v1.summary.SessionLog',
'tf.summary.Summary':
'tf.compat.v1.summary.Summary',
'tf.summary.SummaryDescription':
'tf.compat.v1.summary.SummaryDescription',
'tf.summary.TaggedRunMetadata':
'tf.compat.v1.summary.TaggedRunMetadata',
'tf.summary.all_v2_summary_ops':
'tf.compat.v1.summary.all_v2_summary_ops',
'tf.summary.audio':
'tf.compat.v1.summary.audio',
'tf.summary.get_summary_description':
'tf.compat.v1.summary.get_summary_description',
'tf.summary.histogram':
'tf.compat.v1.summary.histogram',
'tf.summary.image':
'tf.compat.v1.summary.image',
'tf.summary.initialize':
'tf.compat.v1.summary.initialize',
'tf.summary.merge':
'tf.compat.v1.summary.merge',
'tf.summary.merge_all':
'tf.compat.v1.summary.merge_all',
'tf.summary.scalar':
'tf.compat.v1.summary.scalar',
'tf.summary.tensor_summary':
'tf.compat.v1.summary.tensor_summary',
'tf.summary.text':
'tf.compat.v1.summary.text',
'tf.svd':
'tf.linalg.svd',
'tf.tables_initializer':
'tf.compat.v1.tables_initializer',
'tf.tensor_scatter_add':
'tf.tensor_scatter_nd_add',
'tf.tensor_scatter_sub':
'tf.tensor_scatter_nd_sub',
'tf.tensor_scatter_update':
'tf.tensor_scatter_nd_update',
'tf.test.StubOutForTesting':
'tf.compat.v1.test.StubOutForTesting',
'tf.test.compute_gradient_error':
'tf.compat.v1.test.compute_gradient_error',
'tf.test.get_temp_dir':
'tf.compat.v1.test.get_temp_dir',
'tf.test.mock':
'tf.compat.v1.test.mock',
'tf.test.test_src_dir_path':
'tf.compat.v1.test.test_src_dir_path',
'tf.to_bfloat16':
'tf.compat.v1.to_bfloat16',
'tf.to_complex128':
'tf.compat.v1.to_complex128',
'tf.to_complex64':
'tf.compat.v1.to_complex64',
'tf.to_double':
'tf.compat.v1.to_double',
'tf.to_float':
'tf.compat.v1.to_float',
'tf.to_int32':
'tf.compat.v1.to_int32',
'tf.to_int64':
'tf.compat.v1.to_int64',
'tf.tpu.CrossShardOptimizer':
'tf.compat.v1.tpu.CrossShardOptimizer',
'tf.tpu.PaddingSpec':
'tf.compat.v1.tpu.PaddingSpec',
'tf.tpu.batch_parallel':
'tf.compat.v1.tpu.batch_parallel',
'tf.tpu.bfloat16_scope':
'tf.compat.v1.tpu.bfloat16_scope',
'tf.tpu.core':
'tf.compat.v1.tpu.core',
'tf.tpu.cross_replica_sum':
'tf.compat.v1.tpu.cross_replica_sum',
'tf.tpu.experimental.AdagradParameters':
'tf.compat.v1.tpu.experimental.AdagradParameters',
'tf.tpu.experimental.AdamParameters':
'tf.compat.v1.tpu.experimental.AdamParameters',
'tf.tpu.experimental.FtrlParameters':
'tf.compat.v1.tpu.experimental.FtrlParameters',
'tf.tpu.experimental.StochasticGradientDescentParameters':
'tf.compat.v1.tpu.experimental.StochasticGradientDescentParameters',
'tf.tpu.experimental.embedding_column':
'tf.compat.v1.tpu.experimental.embedding_column',
'tf.tpu.experimental.shared_embedding_columns':
'tf.compat.v1.tpu.experimental.shared_embedding_columns',
'tf.tpu.initialize_system':
'tf.compat.v1.tpu.initialize_system',
'tf.tpu.outside_compilation':
'tf.compat.v1.tpu.outside_compilation',
'tf.tpu.replicate':
'tf.compat.v1.tpu.replicate',
'tf.tpu.rewrite':
'tf.compat.v1.tpu.rewrite',
'tf.tpu.shard':
'tf.compat.v1.tpu.shard',
'tf.tpu.shutdown_system':
'tf.compat.v1.tpu.shutdown_system',
'tf.tpu.XLAOptions':
'tf.compat.v1.tpu.XLAOptions',
'tf.trace':
'tf.linalg.trace',
'tf.train.AdadeltaOptimizer':
'tf.compat.v1.train.AdadeltaOptimizer',
'tf.train.AdagradDAOptimizer':
'tf.compat.v1.train.AdagradDAOptimizer',
'tf.train.AdagradOptimizer':
'tf.compat.v1.train.AdagradOptimizer',
'tf.train.AdamOptimizer':
'tf.compat.v1.train.AdamOptimizer',
'tf.train.CheckpointSaverHook':
'tf.estimator.CheckpointSaverHook',
'tf.train.CheckpointSaverListener':
'tf.estimator.CheckpointSaverListener',
'tf.train.ChiefSessionCreator':
'tf.compat.v1.train.ChiefSessionCreator',
'tf.train.FeedFnHook':
'tf.estimator.FeedFnHook',
'tf.train.FinalOpsHook':
'tf.estimator.FinalOpsHook',
'tf.train.FtrlOptimizer':
'tf.compat.v1.train.FtrlOptimizer',
'tf.train.GlobalStepWaiterHook':
'tf.estimator.GlobalStepWaiterHook',
'tf.train.GradientDescentOptimizer':
'tf.compat.v1.train.GradientDescentOptimizer',
'tf.train.LoggingTensorHook':
'tf.estimator.LoggingTensorHook',
'tf.train.LooperThread':
'tf.compat.v1.train.LooperThread',
'tf.train.MomentumOptimizer':
'tf.compat.v1.train.MomentumOptimizer',
'tf.train.MonitoredSession':
'tf.compat.v1.train.MonitoredSession',
'tf.train.MonitoredTrainingSession':
'tf.compat.v1.train.MonitoredTrainingSession',
'tf.train.NanLossDuringTrainingError':
'tf.estimator.NanLossDuringTrainingError',
'tf.train.NanTensorHook':
'tf.estimator.NanTensorHook',
'tf.train.NewCheckpointReader':
'tf.compat.v1.train.NewCheckpointReader',
'tf.train.Optimizer':
'tf.compat.v1.train.Optimizer',
'tf.train.ProfilerHook':
'tf.estimator.ProfilerHook',
'tf.train.ProximalAdagradOptimizer':
'tf.compat.v1.train.ProximalAdagradOptimizer',
'tf.train.ProximalGradientDescentOptimizer':
'tf.compat.v1.train.ProximalGradientDescentOptimizer',
'tf.train.QueueRunner':
'tf.compat.v1.train.QueueRunner',
'tf.train.RMSPropOptimizer':
'tf.compat.v1.train.RMSPropOptimizer',
'tf.train.Saver':
'tf.compat.v1.train.Saver',
'tf.train.SaverDef':
'tf.compat.v1.train.SaverDef',
'tf.train.Scaffold':
'tf.compat.v1.train.Scaffold',
'tf.train.SecondOrStepTimer':
'tf.estimator.SecondOrStepTimer',
'tf.train.Server':
'tf.distribute.Server',
'tf.train.SessionCreator':
'tf.compat.v1.train.SessionCreator',
'tf.train.SessionManager':
'tf.compat.v1.train.SessionManager',
'tf.train.SessionRunArgs':
'tf.estimator.SessionRunArgs',
'tf.train.SessionRunContext':
'tf.estimator.SessionRunContext',
'tf.train.SessionRunHook':
'tf.estimator.SessionRunHook',
'tf.train.SessionRunValues':
'tf.estimator.SessionRunValues',
'tf.train.SingularMonitoredSession':
'tf.compat.v1.train.SingularMonitoredSession',
'tf.train.StepCounterHook':
'tf.estimator.StepCounterHook',
'tf.train.StopAtStepHook':
'tf.estimator.StopAtStepHook',
'tf.train.SummarySaverHook':
'tf.estimator.SummarySaverHook',
'tf.train.Supervisor':
'tf.compat.v1.train.Supervisor',
'tf.train.SyncReplicasOptimizer':
'tf.compat.v1.train.SyncReplicasOptimizer',
'tf.train.VocabInfo':
'tf.estimator.VocabInfo',
'tf.train.WorkerSessionCreator':
'tf.compat.v1.train.WorkerSessionCreator',
'tf.train.add_queue_runner':
'tf.compat.v1.train.add_queue_runner',
'tf.train.assert_global_step':
'tf.compat.v1.train.assert_global_step',
'tf.train.basic_train_loop':
'tf.compat.v1.train.basic_train_loop',
'tf.train.batch':
'tf.compat.v1.train.batch',
'tf.train.batch_join':
'tf.compat.v1.train.batch_join',
'tf.train.checkpoint_exists':
'tf.compat.v1.train.checkpoint_exists',
'tf.train.cosine_decay':
'tf.compat.v1.train.cosine_decay',
'tf.train.cosine_decay_restarts':
'tf.compat.v1.train.cosine_decay_restarts',
'tf.train.create_global_step':
'tf.compat.v1.train.create_global_step',
'tf.train.do_quantize_training_on_graphdef':
'tf.compat.v1.train.do_quantize_training_on_graphdef',
'tf.train.experimental.MixedPrecisionLossScaleOptimizer':
'tf.compat.v1.train.experimental.MixedPrecisionLossScaleOptimizer',
'tf.train.exponential_decay':
'tf.compat.v1.train.exponential_decay',
'tf.train.export_meta_graph':
'tf.compat.v1.train.export_meta_graph',
'tf.train.generate_checkpoint_state_proto':
'tf.compat.v1.train.generate_checkpoint_state_proto',
'tf.train.get_checkpoint_mtimes':
'tf.compat.v1.train.get_checkpoint_mtimes',
'tf.train.get_global_step':
'tf.compat.v1.train.get_global_step',
'tf.train.get_or_create_global_step':
'tf.compat.v1.train.get_or_create_global_step',
'tf.train.global_step':
'tf.compat.v1.train.global_step',
'tf.train.import_meta_graph':
'tf.compat.v1.train.import_meta_graph',
'tf.train.init_from_checkpoint':
'tf.compat.v1.train.init_from_checkpoint',
'tf.train.input_producer':
'tf.compat.v1.train.input_producer',
'tf.train.inverse_time_decay':
'tf.compat.v1.train.inverse_time_decay',
'tf.train.limit_epochs':
'tf.compat.v1.train.limit_epochs',
'tf.train.linear_cosine_decay':
'tf.compat.v1.train.linear_cosine_decay',
'tf.train.match_filenames_once':
'tf.io.match_filenames_once',
'tf.train.maybe_batch':
'tf.compat.v1.train.maybe_batch',
'tf.train.maybe_batch_join':
'tf.compat.v1.train.maybe_batch_join',
'tf.train.maybe_shuffle_batch':
'tf.compat.v1.train.maybe_shuffle_batch',
'tf.train.maybe_shuffle_batch_join':
'tf.compat.v1.train.maybe_shuffle_batch_join',
'tf.train.natural_exp_decay':
'tf.compat.v1.train.natural_exp_decay',
'tf.train.noisy_linear_cosine_decay':
'tf.compat.v1.train.noisy_linear_cosine_decay',
'tf.train.piecewise_constant':
'tf.compat.v1.train.piecewise_constant',
'tf.train.piecewise_constant_decay':
'tf.compat.v1.train.piecewise_constant_decay',
'tf.train.polynomial_decay':
'tf.compat.v1.train.polynomial_decay',
'tf.train.queue_runner.QueueRunner':
'tf.compat.v1.train.queue_runner.QueueRunner',
'tf.train.queue_runner.add_queue_runner':
'tf.compat.v1.train.queue_runner.add_queue_runner',
'tf.train.queue_runner.start_queue_runners':
'tf.compat.v1.train.queue_runner.start_queue_runners',
'tf.train.range_input_producer':
'tf.compat.v1.train.range_input_producer',
'tf.train.remove_checkpoint':
'tf.compat.v1.train.remove_checkpoint',
'tf.train.replica_device_setter':
'tf.compat.v1.train.replica_device_setter',
'tf.train.shuffle_batch':
'tf.compat.v1.train.shuffle_batch',
'tf.train.shuffle_batch_join':
'tf.compat.v1.train.shuffle_batch_join',
'tf.train.slice_input_producer':
'tf.compat.v1.train.slice_input_producer',
'tf.train.start_queue_runners':
'tf.compat.v1.train.start_queue_runners',
'tf.train.string_input_producer':
'tf.compat.v1.train.string_input_producer',
'tf.train.summary_iterator':
'tf.compat.v1.train.summary_iterator',
'tf.train.update_checkpoint_state':
'tf.compat.v1.train.update_checkpoint_state',
'tf.train.warm_start':
'tf.compat.v1.train.warm_start',
'tf.train.write_graph':
'tf.io.write_graph',
'tf.trainable_variables':
'tf.compat.v1.trainable_variables',
'tf.truncated_normal':
'tf.random.truncated_normal',
'tf.uniform_unit_scaling_initializer':
'tf.compat.v1.uniform_unit_scaling_initializer',
'tf.unsorted_segment_max':
'tf.math.unsorted_segment_max',
'tf.unsorted_segment_mean':
'tf.math.unsorted_segment_mean',
'tf.unsorted_segment_min':
'tf.math.unsorted_segment_min',
'tf.unsorted_segment_prod':
'tf.math.unsorted_segment_prod',
'tf.unsorted_segment_sqrt_n':
'tf.math.unsorted_segment_sqrt_n',
'tf.unsorted_segment_sum':
'tf.math.unsorted_segment_sum',
'tf.variable_axis_size_partitioner':
'tf.compat.v1.variable_axis_size_partitioner',
'tf.variable_op_scope':
'tf.compat.v1.variable_op_scope',
'tf.variable_scope':
'tf.compat.v1.variable_scope',
'tf.variables_initializer':
'tf.compat.v1.variables_initializer',
'tf.verify_tensor_all_finite':
'tf.compat.v1.verify_tensor_all_finite',
'tf.wrap_function':
'tf.compat.v1.wrap_function',
'tf.write_file':
'tf.io.write_file',
'tf.zeta':
'tf.math.zeta'
}
|
|
data = (
'jjwaels', # 0x00
'jjwaelt', # 0x01
'jjwaelp', # 0x02
'jjwaelh', # 0x03
'jjwaem', # 0x04
'jjwaeb', # 0x05
'jjwaebs', # 0x06
'jjwaes', # 0x07
'jjwaess', # 0x08
'jjwaeng', # 0x09
'jjwaej', # 0x0a
'jjwaec', # 0x0b
'jjwaek', # 0x0c
'jjwaet', # 0x0d
'jjwaep', # 0x0e
'jjwaeh', # 0x0f
'jjoe', # 0x10
'jjoeg', # 0x11
'jjoegg', # 0x12
'jjoegs', # 0x13
'jjoen', # 0x14
'jjoenj', # 0x15
'jjoenh', # 0x16
'jjoed', # 0x17
'jjoel', # 0x18
'jjoelg', # 0x19
'jjoelm', # 0x1a
'jjoelb', # 0x1b
'jjoels', # 0x1c
'jjoelt', # 0x1d
'jjoelp', # 0x1e
'jjoelh', # 0x1f
'jjoem', # 0x20
'jjoeb', # 0x21
'jjoebs', # 0x22
'jjoes', # 0x23
'jjoess', # 0x24
'jjoeng', # 0x25
'jjoej', # 0x26
'jjoec', # 0x27
'jjoek', # 0x28
'jjoet', # 0x29
'jjoep', # 0x2a
'jjoeh', # 0x2b
'jjyo', # 0x2c
'jjyog', # 0x2d
'jjyogg', # 0x2e
'jjyogs', # 0x2f
'jjyon', # 0x30
'jjyonj', # 0x31
'jjyonh', # 0x32
'jjyod', # 0x33
'jjyol', # 0x34
'jjyolg', # 0x35
'jjyolm', # 0x36
'jjyolb', # 0x37
'jjyols', # 0x38
'jjyolt', # 0x39
'jjyolp', # 0x3a
'jjyolh', # 0x3b
'jjyom', # 0x3c
'jjyob', # 0x3d
'jjyobs', # 0x3e
'jjyos', # 0x3f
'jjyoss', # 0x40
'jjyong', # 0x41
'jjyoj', # 0x42
'jjyoc', # 0x43
'jjyok', # 0x44
'jjyot', # 0x45
'jjyop', # 0x46
'jjyoh', # 0x47
'jju', # 0x48
'jjug', # 0x49
'jjugg', # 0x4a
'jjugs', # 0x4b
'jjun', # 0x4c
'jjunj', # 0x4d
'jjunh', # 0x4e
'jjud', # 0x4f
'jjul', # 0x50
'jjulg', # 0x51
'jjulm', # 0x52
'jjulb', # 0x53
'jjuls', # 0x54
'jjult', # 0x55
'jjulp', # 0x56
'jjulh', # 0x57
'jjum', # 0x58
'jjub', # 0x59
'jjubs', # 0x5a
'jjus', # 0x5b
'jjuss', # 0x5c
'jjung', # 0x5d
'jjuj', # 0x5e
'jjuc', # 0x5f
'jjuk', # 0x60
'jjut', # 0x61
'jjup', # 0x62
'jjuh', # 0x63
'jjweo', # 0x64
'jjweog', # 0x65
'jjweogg', # 0x66
'jjweogs', # 0x67
'jjweon', # 0x68
'jjweonj', # 0x69
'jjweonh', # 0x6a
'jjweod', # 0x6b
'jjweol', # 0x6c
'jjweolg', # 0x6d
'jjweolm', # 0x6e
'jjweolb', # 0x6f
'jjweols', # 0x70
'jjweolt', # 0x71
'jjweolp', # 0x72
'jjweolh', # 0x73
'jjweom', # 0x74
'jjweob', # 0x75
'jjweobs', # 0x76
'jjweos', # 0x77
'jjweoss', # 0x78
'jjweong', # 0x79
'jjweoj', # 0x7a
'jjweoc', # 0x7b
'jjweok', # 0x7c
'jjweot', # 0x7d
'jjweop', # 0x7e
'jjweoh', # 0x7f
'jjwe', # 0x80
'jjweg', # 0x81
'jjwegg', # 0x82
'jjwegs', # 0x83
'jjwen', # 0x84
'jjwenj', # 0x85
'jjwenh', # 0x86
'jjwed', # 0x87
'jjwel', # 0x88
'jjwelg', # 0x89
'jjwelm', # 0x8a
'jjwelb', # 0x8b
'jjwels', # 0x8c
'jjwelt', # 0x8d
'jjwelp', # 0x8e
'jjwelh', # 0x8f
'jjwem', # 0x90
'jjweb', # 0x91
'jjwebs', # 0x92
'jjwes', # 0x93
'jjwess', # 0x94
'jjweng', # 0x95
'jjwej', # 0x96
'jjwec', # 0x97
'jjwek', # 0x98
'jjwet', # 0x99
'jjwep', # 0x9a
'jjweh', # 0x9b
'jjwi', # 0x9c
'jjwig', # 0x9d
'jjwigg', # 0x9e
'jjwigs', # 0x9f
'jjwin', # 0xa0
'jjwinj', # 0xa1
'jjwinh', # 0xa2
'jjwid', # 0xa3
'jjwil', # 0xa4
'jjwilg', # 0xa5
'jjwilm', # 0xa6
'jjwilb', # 0xa7
'jjwils', # 0xa8
'jjwilt', # 0xa9
'jjwilp', # 0xaa
'jjwilh', # 0xab
'jjwim', # 0xac
'jjwib', # 0xad
'jjwibs', # 0xae
'jjwis', # 0xaf
'jjwiss', # 0xb0
'jjwing', # 0xb1
'jjwij', # 0xb2
'jjwic', # 0xb3
'jjwik', # 0xb4
'jjwit', # 0xb5
'jjwip', # 0xb6
'jjwih', # 0xb7
'jjyu', # 0xb8
'jjyug', # 0xb9
'jjyugg', # 0xba
'jjyugs', # 0xbb
'jjyun', # 0xbc
'jjyunj', # 0xbd
'jjyunh', # 0xbe
'jjyud', # 0xbf
'jjyul', # 0xc0
'jjyulg', # 0xc1
'jjyulm', # 0xc2
'jjyulb', # 0xc3
'jjyuls', # 0xc4
'jjyult', # 0xc5
'jjyulp', # 0xc6
'jjyulh', # 0xc7
'jjyum', # 0xc8
'jjyub', # 0xc9
'jjyubs', # 0xca
'jjyus', # 0xcb
'jjyuss', # 0xcc
'jjyung', # 0xcd
'jjyuj', # 0xce
'jjyuc', # 0xcf
'jjyuk', # 0xd0
'jjyut', # 0xd1
'jjyup', # 0xd2
'jjyuh', # 0xd3
'jjeu', # 0xd4
'jjeug', # 0xd5
'jjeugg', # 0xd6
'jjeugs', # 0xd7
'jjeun', # 0xd8
'jjeunj', # 0xd9
'jjeunh', # 0xda
'jjeud', # 0xdb
'jjeul', # 0xdc
'jjeulg', # 0xdd
'jjeulm', # 0xde
'jjeulb', # 0xdf
'jjeuls', # 0xe0
'jjeult', # 0xe1
'jjeulp', # 0xe2
'jjeulh', # 0xe3
'jjeum', # 0xe4
'jjeub', # 0xe5
'jjeubs', # 0xe6
'jjeus', # 0xe7
'jjeuss', # 0xe8
'jjeung', # 0xe9
'jjeuj', # 0xea
'jjeuc', # 0xeb
'jjeuk', # 0xec
'jjeut', # 0xed
'jjeup', # 0xee
'jjeuh', # 0xef
'jjyi', # 0xf0
'jjyig', # 0xf1
'jjyigg', # 0xf2
'jjyigs', # 0xf3
'jjyin', # 0xf4
'jjyinj', # 0xf5
'jjyinh', # 0xf6
'jjyid', # 0xf7
'jjyil', # 0xf8
'jjyilg', # 0xf9
'jjyilm', # 0xfa
'jjyilb', # 0xfb
'jjyils', # 0xfc
'jjyilt', # 0xfd
'jjyilp', # 0xfe
'jjyilh', # 0xff
)
|
|
"""
Utility classes and methods to pickle parts of symbolic graph.
These pickled graphs can be used, for instance, as cases for
unit tests or regression tests.
"""
import numpy
import os
import pickle
import sys
import tempfile
import zipfile
import warnings
from collections import defaultdict
from contextlib import closing
from pickle import HIGHEST_PROTOCOL
from six import BytesIO
try:
from pickle import DEFAULT_PROTOCOL
except ImportError:
DEFAULT_PROTOCOL = HIGHEST_PROTOCOL
import theano
from theano import config
from theano.compat import PY3
from six import string_types
from theano.compile.sharedvalue import SharedVariable
try:
from theano.sandbox.cuda import cuda_ndarray
except ImportError:
cuda_ndarray = None
__docformat__ = "restructuredtext en"
__authors__ = "Pascal Lamblin"
__copyright__ = "Copyright 2013, Universite de Montreal"
__license__ = "3-clause BSD"
sys.setrecursionlimit(3000)
Pickler = pickle.Pickler
class StripPickler(Pickler):
"""
Subclass of Pickler that strips unnecessary attributes from Theano objects.
.. versionadded:: 0.8
Example of use::
fn_args = dict(inputs=inputs,
outputs=outputs,
updates=updates)
dest_pkl = 'my_test.pkl'
f = open(dest_pkl, 'wb')
strip_pickler = StripPickler(f, protocol=-1)
strip_pickler.dump(fn_args)
f.close()
"""
def __init__(self, file, protocol=0, extra_tag_to_remove=None):
# Can't use super as Pickler isn't a new style class
Pickler.__init__(self, file, protocol)
self.tag_to_remove = ['trace', 'test_value']
if extra_tag_to_remove:
self.tag_to_remove.extend(extra_tag_to_remove)
def save(self, obj):
# Remove the tag.trace attribute from Variable and Apply nodes
if isinstance(obj, theano.gof.utils.scratchpad):
for tag in self.tag_to_remove:
if hasattr(obj, tag):
del obj.__dict__[tag]
# Remove manually-added docstring of Elemwise ops
elif (isinstance(obj, theano.tensor.Elemwise)):
if '__doc__' in obj.__dict__:
del obj.__dict__['__doc__']
return Pickler.save(self, obj)
# Make an unpickler that tries encoding byte streams before raising TypeError.
# This is useful with python 3, in order to unpickle files created with
# python 2.
# This code is taken from Pandas, https://github.com/pydata/pandas,
# under the same 3-clause BSD license.
def load_reduce(self):
stack = self.stack
args = stack.pop()
func = stack[-1]
try:
value = func(*args)
except Exception:
# try to reencode the arguments
if self.encoding is not None:
new_args = []
for arg in args:
if isinstance(arg, string_types):
new_args.append(arg.encode(self.encoding))
else:
new_args.append(arg)
args = tuple(new_args)
try:
stack[-1] = func(*args)
return
except Exception:
pass
# if self.is_verbose:
# print(sys.exc_info())
# print(func, args)
raise
stack[-1] = value
if PY3:
class CompatUnpickler(pickle._Unpickler):
"""
Allow to reload in python 3 some pickled numpy ndarray.
.. versionadded:: 0.8
Examples
--------
::
with open(fname, 'rb') as fp:
if PY3:
u = CompatUnpickler(fp, encoding="latin1")
else:
u = CompatUnpickler(fp)
mat = u.load()
"""
pass
# Register `load_reduce` defined above in CompatUnpickler
CompatUnpickler.dispatch[pickle.REDUCE[0]] = load_reduce
else:
class CompatUnpickler(pickle.Unpickler):
"""
Allow to reload in python 3 some pickled numpy ndarray.
.. versionadded:: 0.8
Examples
--------
::
with open(fname, 'rb') as fp:
if PY3:
u = CompatUnpickler(fp, encoding="latin1")
else:
u = CompatUnpickler(fp)
mat = u.load()
"""
pass
class PersistentNdarrayID(object):
"""Persist ndarrays in an object by saving them to a zip file.
:param zip_file: A zip file handle that the NumPy arrays will be saved to.
:type zip_file: :class:`zipfile.ZipFile`
.. note:
The convention for persistent ids given by this class and its derived
classes is that the name should take the form `type.name` where `type`
can be used by the persistent loader to determine how to load the
object, while `name` is human-readable and as descriptive as possible.
"""
def __init__(self, zip_file):
self.zip_file = zip_file
self.count = 0
self.seen = {}
def _resolve_name(self, obj):
"""Determine the name the object should be saved under."""
name = 'array_{0}'.format(self.count)
self.count += 1
return name
def __call__(self, obj):
if type(obj) is numpy.ndarray:
if id(obj) not in self.seen:
def write_array(f):
numpy.lib.format.write_array(f, obj)
name = self._resolve_name(obj)
zipadd(write_array, self.zip_file, name)
self.seen[id(obj)] = 'ndarray.{0}'.format(name)
return self.seen[id(obj)]
class PersistentCudaNdarrayID(PersistentNdarrayID):
def __call__(self, obj):
if (cuda_ndarray is not None and
type(obj) is cuda_ndarray.cuda_ndarray.CudaNdarray):
if id(obj) not in self.seen:
def write_array(f):
numpy.lib.format.write_array(f, numpy.asarray(obj))
name = self._resolve_name(obj)
zipadd(write_array, self.zip_file, name)
self.seen[id(obj)] = 'cuda_ndarray.{0}'.format(name)
return self.seen[id(obj)]
return super(PersistentCudaNdarrayID, self).__call__(obj)
class PersistentSharedVariableID(PersistentCudaNdarrayID):
"""Uses shared variable names when persisting to zip file.
If a shared variable has a name, this name is used as the name of the
NPY file inside of the zip file. NumPy arrays that aren't matched to a
shared variable are persisted as usual (i.e. `array_0`, `array_1`,
etc.)
:param allow_unnamed: Allow shared variables without a name to be
persisted. Defaults to ``True``.
:type allow_unnamed: bool, optional
:param allow_duplicates: Allow multiple shared variables to have the same
name, in which case they will be numbered e.g. `x`, `x_2`, `x_3`, etc.
Defaults to ``True``.
:type allow_duplicates: bool, optional
:raises ValueError
If an unnamed shared variable is encountered and `allow_unnamed` is
``False``, or if two shared variables have the same name, and
`allow_duplicates` is ``False``.
"""
def __init__(self, zip_file, allow_unnamed=True, allow_duplicates=True):
super(PersistentSharedVariableID, self).__init__(zip_file)
self.name_counter = defaultdict(int)
self.ndarray_names = {}
self.allow_unnamed = allow_unnamed
self.allow_duplicates = allow_duplicates
def _resolve_name(self, obj):
if id(obj) in self.ndarray_names:
name = self.ndarray_names[id(obj)]
count = self.name_counter[name]
self.name_counter[name] += 1
if count:
if not self.allow_duplicates:
raise ValueError("multiple shared variables with the name "
"`{0}` found".format(name))
name = '{0}_{1}'.format(name, count + 1)
return name
return super(PersistentSharedVariableID, self)._resolve_name(obj)
def __call__(self, obj):
if isinstance(obj, SharedVariable):
if obj.name:
if obj.name == 'pkl':
ValueError("can't pickle shared variable with name `pkl`")
self.ndarray_names[id(obj.container.storage[0])] = obj.name
elif not self.allow_unnamed:
raise ValueError("unnamed shared variable, {0}".format(obj))
return super(PersistentSharedVariableID, self).__call__(obj)
class PersistentNdarrayLoad(object):
"""Load NumPy arrays that were persisted to a zip file when pickling.
:param zip_file: The zip file handle in which the NumPy arrays are saved.
:type zip_file: :class:`zipfile.ZipFile`
"""
def __init__(self, zip_file):
self.zip_file = zip_file
def __call__(self, persid):
array_type, name = persid.split('.')
array = numpy.lib.format.read_array(self.zip_file.open(name))
if array_type == 'cuda_ndarray':
if config.experimental.unpickle_gpu_on_cpu:
# directly return numpy array
warnings.warn("config.experimental.unpickle_gpu_on_cpu is set "
"to True. Unpickling CudaNdarray as "
"numpy.ndarray")
return array
elif cuda_ndarray:
return cuda_ndarray.cuda_ndarray.CudaNdarray(array)
else:
raise ImportError("Cuda not found. Cannot unpickle "
"CudaNdarray")
else:
return array
def dump(obj, file_handler, protocol=DEFAULT_PROTOCOL,
persistent_id=PersistentSharedVariableID):
"""Pickles an object to a zip file using external persistence.
:param obj: The object to pickle.
:type obj: object
:param file_handler: The file handle to save the object to.
:type file_handler: file
:param protocol: The pickling protocol to use. Unlike Python's built-in
pickle, the default is set to `2` instead of 0 for Python 2. The
Python 3 default (level 3) is maintained.
:type protocol: int, optional
:param persistent_id: The callable that persists certain objects in the
object hierarchy to separate files inside of the zip file. For example,
:class:`PersistentNdarrayID` saves any :class:`numpy.ndarray` to a
separate NPY file inside of the zip file.
:type persistent_id: callable
.. versionadded:: 0.8
.. note::
The final file is simply a zipped file containing at least one file,
`pkl`, which contains the pickled object. It can contain any other
number of external objects. Note that the zip files are compatible with
NumPy's :func:`numpy.load` function.
>>> import theano
>>> foo_1 = theano.shared(0, name='foo')
>>> foo_2 = theano.shared(1, name='foo')
>>> with open('model.zip', 'wb') as f:
... dump((foo_1, foo_2, numpy.array(2)), f)
>>> numpy.load('model.zip').keys()
['foo', 'foo_2', 'array_0', 'pkl']
>>> numpy.load('model.zip')['foo']
array(0)
>>> with open('model.zip', 'rb') as f:
... foo_1, foo_2, array = load(f)
>>> array
array(2)
"""
with closing(zipfile.ZipFile(file_handler, 'w', zipfile.ZIP_DEFLATED,
allowZip64=True)) as zip_file:
def func(f):
p = pickle.Pickler(f, protocol=protocol)
p.persistent_id = persistent_id(zip_file)
p.dump(obj)
zipadd(func, zip_file, 'pkl')
def load(f, persistent_load=PersistentNdarrayLoad):
"""Load a file that was dumped to a zip file.
:param f: The file handle to the zip file to load the object from.
:type f: file
:param persistent_load: The persistent loading function to use for
unpickling. This must be compatible with the `persisten_id` function
used when pickling.
:type persistent_load: callable, optional
.. versionadded:: 0.8
"""
with closing(zipfile.ZipFile(f, 'r')) as zip_file:
p = pickle.Unpickler(BytesIO(zip_file.open('pkl').read()))
p.persistent_load = persistent_load(zip_file)
return p.load()
def zipadd(func, zip_file, name):
"""Calls a function with a file object, saving it to a zip file.
:param func: The function to call.
:type func: callable
:param zip_file: The zip file that `func` should write its data to.
:type zip_file: :class:`zipfile.ZipFile`
:param name: The name of the file inside of the zipped archive that `func`
should save its data to.
:type name: str
"""
with tempfile.NamedTemporaryFile('wb', delete=False) as temp_file:
func(temp_file)
temp_file.close()
zip_file.write(temp_file.name, arcname=name)
if os.path.isfile(temp_file.name):
os.remove(temp_file.name)
|
|
"""Tools to generate simulated TESS images."""
import numpy as np
import zachopy.utils
import astropy.io.fits
import scipy.ndimage.measurements
import os
import matplotlib.pylab as plt
import matplotlib.gridspec as gridspec
import Cosmics
import Stamper
import logging
from settings import log_file_handler
logger = logging.getLogger(__name__)
logger.addHandler(log_file_handler)
# setup basic output options for this Python session
np.set_printoptions(threshold=1e6, linewidth=300)
zipsuffix = ''
# define mapping between CCD number and quadrant
quadrants = {1: (1, 1), 2: (-1, 1), 3: (-1, -1), 4: (1, -1), 0: None}
class CCD(object):
def __init__(self, number=1, camera=None, subarray=None, label='', display=False):
"""Turn on a TESS CCD, which can be used to make simulated images.
camera=None is parent TESS camera for this CCD, required for *any* conversion from (RA, Dec) to (x,y)
number=1 is the CCD number of the detector is it? (1-4, like mathematical quadrants)
subarray=None is the size of a square subarray that can optionally be defined for speediness
label='' is a special label you can add for experimenting
"""
super(CCD, self).__init__()
# keep track of where this image is
self.number = number
self.camera = camera
self.subarray = subarray
# some labeling for the image
self.label = label
self.note = ''
# define the size and location of the detector within the field
if subarray is None:
# image size is the whole detector
self.npix = 2048
# image center (in focalxy coordinates) is set by the image size and the camera's CCD gap
self.center = self.camera.gapinpixels / 2.0 + self.npix / 2.0 * np.array(quadrants[number])
else:
# image size is the specified subarray size
self.npix = subarray
# image center is the center of the field (for now)
self.center = np.array([0.0, 0.0])
# some numbers for the image
self.xsize = self.npix
self.ysize = self.npix
self.xmin = 0
self.xmax = self.xmin + self.xsize
self.ymin = 0
self.ymax = self.ymin + self.ysize
# pull out the camera's position string, for saving images
self.pos_string = self.camera.pos_string()
# a few misc diagnostics
self.display = display
self.plot = False
logger.info('created CCD #{}, of size {}x{}'.format(
self.number, self.xsize, self.ysize))
# start populating the image header (seems like we can't do this until we're sure camera has pointed somewhere)
# self.populateHeader()
def show(self):
"""Display the current (possibly in-progress image.)"""
if self.display:
try:
self.ds9
except AttributeError:
from zachopy.displays.ds9 import ds9
self.ds9 = ds9(self.name.replace(' ', ''))
self.ds9.clear()
frame = self.camera.counter / self.camera.counterstep
self.maxds9frames = 9
if frame > self.maxds9frames:
frame = 9
note = ' (maxed out at {} frames)'.format(self.maxds9frames)
else:
note = ''
self.ds9.one(self.image, frame=frame)
logger.info('showing exposure {} of {} in frame {}'.format(
self.camera.counter, self.name, frame))
if note != '':
logger.info(note)
if self.camera.counter == 0:
# set the scales, for the first time
self.ds9.limits = np.percentile(self.image, [0, 99])
self.ds9.scale(scale='log', limits=self.ds9.limits)
@property
def name(self):
"""Simple name for this CCD, for saving and printing."""
# define a base name, whether this is a subarray or a full ccd
if self.number == 0:
str = 'sub{0:d}x{0:d}'.format(self.subarray)
else:
str = 'ccd{0:d}'.format(self.number)
# add label, if one exists
if self.label != '':
str += '_' + self.label
return str
@property
def directory(self):
"""Directory for saving all images from this CCD."""
d = os.path.join(self.camera.directory, self.name)
zachopy.utils.mkdir(d)
return d
def photons(self, mag):
"""Use magnitude to calculate photons per second that will be recorded on a CCD."""
# this doesn't work great for M dwarfs, need to include multiband information at some point
return self.camera.effective_area * 10 ** (-0.4 * mag) * 1.7e6
def populateHeader(self, ccd=None):
"""Populate the header structure with information about the CCD image, and the inputs."""
# create an empty header
try:
self.camera.header
except:
self.camera.populateHeader() # astropy.io.fits.Header()
self.header = self.camera.header
# fill it with some CCD details
self.header['CCD'] = ''
self.header['CCDNOTE'] = ('', 'Details of this individual image')
self.header['EXPTIME'] = (self.camera.cadence, '[s] exposure time ')
self.header['NREADS'] = (
np.round(self.camera.cadence / self.camera.singleread).astype(np.int), '# of reads summed')
self.header['SUBEXPTI'] = (self.camera.singleread, '[s] exposure in a single subexposure')
self.header['SATURATE'] = (
self.camera.saturation * self.camera.cadence / self.camera.singleread,
'[e-] saturation level in this image')
self.header['READNOIS'] = (self.camera.read_noise, '[e-] read noise (per individual read)')
self.header['READTIME'] = (self.camera.readouttime, '[s] time to transer to frame store')
self.header['CCDNUM'] = (self.number, 'CCD number (1,2,3,4 or 0=fake subarray)')
self.header['CCDSIZE'] = (self.npix, '[pix] size of one CCD')
# fill in the timestamp for this CCD image
self.setTime()
def addInputLabels(self):
try:
self.header['INPUTS']
except KeyError:
# leave space to talk about the inputs to this image
self.header['INPUTS'] = ''
self.header['INPUNOTE'] = ('', 'Ingredients for simulated images.')
def setTime(self):
"""Based on the camera counter, apply a timestamp to this image."""
# add time to the image (in a very simplistic way -- no accounting for the spacecraft orbit)
self.header['COUNTER'] = self.camera.counter, '# of exposures since start, for this field'
self.camera.bjd = self.camera.counterToBJD(self.camera.counter)
self.camera.bjdantisun = self.camera.bjd0 + 13.7
self.header['BJD0'] = self.camera.bjd0, '[day] base time subtracted from all BJD'
self.header['BJD'] = self.camera.bjd - self.camera.bjd0, '[day] mid-exposure time - BJD0'
self.header['ANTISUN'] = self.camera.bjdantisun - self.camera.bjd0, '[day] time of antisun - BJD0'
self.epoch = (self.camera.bjd - 2451544.5) / 365.25 + 2000.0
self.header['EPOCH'] = self.epoch, '[years] epoch of mid-exposure time'
def pixels(self):
"""Give grids of x and y values (2D arrays) of the image pixels."""
# use meshgrid to create (x,y) arrays, using default 'xy' indexing (x increases with column, y increases with row)
x, y = np.meshgrid(np.arange(self.xsize) + self.xmin, np.arange(self.ysize) + self.ymin) # , indexing='ij')
pix = self.camera.cartographer.point(x, y, 'ccdxy')
return pix
def zeros(self):
"""Create an image of zeros, the same size as the CCD."""
return np.zeros((self.xsize, self.ysize))
def ones(self):
"""Create an image of ones, the same size as the CCD."""
return np.ones((self.xsize, self.ysize))
def zodicalBackground(self, elon, elat):
"""Calcaulte the zodiacal background at a given celestial (lat, long)."""
# from Josh and Peter's memo
Vmax = 23.345
DeltaV = 1.148
b = np.abs(elat)
V = Vmax - DeltaV * ((b - 90.0) / 90.0) ** 2
assert ((b < 90).all())
return 10 ** (-0.4 * (V - 22.8)) * (2.56e-3) * self.camera.effective_area * self.camera.pixel_solid_area
def unresolvedBackground(self, glon, glat, complicated=False):
"""Calculate the unresolved stellar background at a given galactic (lat, long)."""
# from Josh and Peter's memo
flip = glon > 180.0
if np.sum(flip):
glon[flip] -= 360.0
if complicated:
L = np.abs(glon / 180.0)
B = np.abs(glat / 90.0)
a1 = 18.7
a2 = 4.3
a3 = 0.52
a4 = 10.2
a5 = 0.46
a6 = -3.74
I_surface_brightness = a1 + a2 * (1.0 - np.exp(-L / a3)) + a4 * (1.0 - np.exp(-B / a5)) + a6 * np.sqrt(
L * B)
else:
a0 = 18.9733
a1 = 8.833
a2 = 4.007
a3 = 0.805
I_surface_brightness = a0 + a1 * (np.abs(glat) / 40.0) + a2 * (np.abs(glon) / 180.0) ** a3
return 10 ** (-0.4 * I_surface_brightness) * 1.7e6 * self.camera.effective_area * self.camera.pixel_solid_area
def writeToFITS(self, image, path, split=False, savetype=np.float32, cancompress=True):
"""General FITS writer for this CCD."""
# print status
logger.info('saving {0}x{0} image with type {1} to'.format(image.shape[0], savetype.__name__))
logger.info(' {}'.format(path))
cr = self.camera.cartographer.point(0.0, 0.0, 'focalxy')
x, y = cr.ccdxy.tuple
# quad = quadrants[self.number]
# if quad is not None:
# offset = self.npix
# x += quad[0]*
# modify the camera's WCS, based on the CCD number
self.header['CRPIX1'] = x + 0.5
self.header['CRPIX2'] = y + 0.5
# write the file to FITS
# astropy.io.fits.PrimaryHDU(np.transpose(savetype(image)), header=self.header).writeto(filename, clobber=True)
astropy.io.fits.PrimaryHDU(savetype(image), header=self.header).writeto(path, clobber=True)
if self.compress[self.camera.cadence] and cancompress:
os.system('gzip -vf {}'.format(path))
def loadFromFITS(self, path):
"""General FITS loader for this CCD."""
# print status
logger.info('trying to load image from {0}'.format(path))
try:
# can we get by without the transposes?
# image = np.transpose(astropy.io.fits.open(path)[0].data)
image = astropy.io.fits.open(path)[0].data
logger.info(" ...success!")
return image
except IOError:
logger.info(" ...failed")
raise IOError('failed tring to load {}'.format(path))
@property
def fileidentifier(self):
return '{pos_string}_{name}_{counter:06.0f}'.format(pos_string=self.camera.pos_string(), name=self.name,
counter=self.camera.counter)
def writeFinal(self, lean=True):
"""Write the final image from this CCD."""
# self.header.extend(self.camera.header)
# self.header.extend(self.camera.psf.header)
# make filename for this image
self.note = 'simulated_' + self.fileidentifier
finalfilename = os.path.join(self.directory, self.note + '.fits' + zipsuffix)
# write the image to FITS
logger.info('saving simulated exposure {} for {}'.format(
self.camera.counter, self.name))
self.writeToFITS(self.image, finalfilename, savetype=np.int32)
# optionally, write some other outputs too!
if lean == False:
self.note = 'withoutbackground_{0:06.0f}'.format(self.camera.counter)
self.writeToFITS(self.image - self.backgroundimage, os.path.join(self.directory, self.note + '.fits'))
def stampify(self):
if self.stamps is not None:
self.image *= self.stampimage != 0
def setupCatalog(self, write=True):
"""setup the CCD's catalog, by creating it from the camera and then trimming"""
logger.info("setting up this CCD's starmap")
# make sure the camera has a catalog defined
try:
self.camera.catalog
logger.info("The camera already had a catalog of {0} elements defined; using it!".format(
len(self.camera.catalog.ra)))
except AttributeError:
logger.info("populating a new catalog for the camera")
self.camera.populateCatalog()
# we want to make a trimmed catalog for this CCD.
# first figure out which ones are on the CCD
# pull out positions, magnitudes, and temperatures at the time of the first exposure
logger.info('taking an intial snapshot at {0} = {1}'.format(self.camera.bjd, self.epoch))
ras, decs, tmag, temperatures = self.camera.catalog.snapshot(self.camera.bjd,
exptime=self.camera.cadence / 60.0 / 60.0 / 24.0)
assert (ras.shape == tmag.shape)
# assign the cartrographer's CCD to this one
self.camera.cartographer.ccd = self
# create coordinate object for the stars
stars = self.camera.cartographer.point(ras, decs, 'celestial')
x, y = stars.ccdxy.tuple
# trim everything down to only those stars that could be relevant for this ccd
buffer = 0 # 10
onccd = (x > -buffer) & (x < self.xsize + buffer) & (y > -buffer) & (y < self.ysize + buffer)
# trim to postage stamps, if desired
self.stamper = Stamper.Stamper(specifier=self.camera.stamps[self.camera.cadence], ccd=self)
# keep track of whether this is a stamp catalog or not
self.stamps = self.camera.stamps[self.camera.cadence]
# create the CCD catalog
self.catalog = self.stamper.trimCatalog(self.camera.catalog)
def writeIngredients(self):
# write the catalog out to a text file
outfile = os.path.join(self.directory,
'catalog_{pos}_{name}_atepoch{epoch:.3f}.txt'.format(pos=self.pos_string,
name=self.name,
epoch=self.epoch))
self.camera.catalog.writeProjected(ccd=self, outfile=outfile)
jitteroutfile = os.path.join(
self.directory, 'jitternudges_{cadence:.0f}s_{name}.txt'.format(cadence=self.camera.cadence,
name=self.name))
self.camera.jitter.writeNudges(jitteroutfile)
focusoutfile = os.path.join(
self.directory, 'focustimeseries_{name}.txt'.format(name=self.name))
self.camera.focus.writeModel(focusoutfile)
def projectCatalog(self, write=True):
"""Create using the camera's star catalog, and project stars using this CCD."""
logger.info('projecting the starmap onto CCD')
# make sure the camera has a catalog defined
try:
self.catalog
assert (self.stamps == self.camera.stamps[self.camera.cadence])
except (AttributeError, AssertionError):
self.setupCatalog()
# pull out positions, magnitudes, and temperatures
logger.info('taking a snapshot at {0} = {1}'.format(self.camera.bjd, self.epoch))
ras, decs, tmag, temperatures = self.catalog.snapshot(self.camera.bjd,
exptime=self.camera.cadence / 60.0 / 60.0 / 24.0)
logger.info(' done!')
assert (ras.shape == tmag.shape)
self.camera.cartographer.ccd = self
# create coordinate object for the stars
stars = self.camera.cartographer.point(ras, decs, 'celestial')
x, y = stars.ccdxy.tuple
# apply differential velocity aberration, based on the time offset from antisun
if self.camera.aberrate:
logger.info('applying differental velocity aberration (relative to this camera only)')
dt = self.camera.bjd - self.camera.bjdantisun
dx, dy = self.aberrations(stars, dt)
fieldcenter = self.camera.cartographer.point(0, 0, 'focalxy')
centerx, centery = self.aberrations(fieldcenter, dt)
x += dx - np.mean(dx)
y += dy - np.mean(dy)
else:
logger.info('skipping differental velocity aberration')
# trim everything down to only those stars that could be relevant for this camera
buffer = 10
ok = (x > -buffer) & (x < self.xsize + buffer) & (y > -buffer) & (y < self.ysize + buffer)
# assign this CCD's stars
self.starx = x[ok]
self.stary = y[ok]
self.starmag = np.array(tmag)[ok]
self.startemp = np.array(temperatures)[ok]
self.starlc = np.array(self.camera.catalog.lightcurvecodes)[ok]
self.starbasemag = np.array(self.camera.catalog.tmag)[ok]
# keep track of which CCD we projected onto
self.starsareon = self.name
def aberrations(self, stars, dt):
# make sure the cartographer is defined
try:
assert (self.aberrator.ccd == self)
except AttributeError:
self.aberrator = Aberrator(self.camera.cartographer)
if self.camera.counter == 0:
self.aberrator.plotPossibilities()
self.header['ABERRATE'] = ''
self.header['AB_NOTE'] = '', 'Velocity aberration ingredients.'
self.header['AB_DEF'] = 'd?=BETA*cos(L-FCLON+DLON)*AB?FUNC(x,y)-FCD?', "[L=stars' ec. lon.]"
for pix in ['x', 'y']:
self.header['ABD{0}FUNC'.format(
pix.upper())] = 'C + CX*x + CY*y + CXX*x**2 + CYY*y**2 + CXY*x*y' # '{0:+.3f}{1:+.3f}*x{2:+.3f}*y{3:+.3f}*x**2{4:+.3f}*y**2{5:+.3f}*x*y'.format(*self.aberrator.coefs[pix])
self.header['ABD{0}_C'.format(pix.upper())] = self.aberrator.coefs[pix][0]
self.header['ABD{0}_CX'.format(pix.upper())] = self.aberrator.coefs[pix][1]
self.header['ABD{0}_CY'.format(pix.upper())] = self.aberrator.coefs[pix][2]
self.header['ABD{0}_CXX'.format(pix.upper())] = self.aberrator.coefs[pix][3]
self.header['ABD{0}_CYY'.format(pix.upper())] = self.aberrator.coefs[pix][4]
self.header['ABD{0}_CXY'.format(pix.upper())] = self.aberrator.coefs[pix][5]
# sign will definitely be wrong on this
beta = 29.8 * zachopy.units.km / zachopy.units.c # unitless (radians)
if self.camera.warpspaceandtime:
warp = self.camera.warpspaceandtime
beta /= warp
self.header['AB_WARP'] = 'speed of light is {0}X what it should be'.format(warp), '(for testing)'
self.header['AB_BETA'] = beta, '[radians] v/c (from orbit tangential velocity)'
# what is the celestial longitude of the field center?
fieldcenter = self.camera.cartographer.point(0, 0, type='focalxy')
self.header['AB_FCLON'] = fieldcenter.ecliptic.elon, '[deg] ecliptic lon. of focal plane center'
# how much is each star offset from antisun, at this time?
dtheta = 360.0 * dt / 365.25
theta = stars.ecliptic.elon - fieldcenter.ecliptic.elon + dtheta
self.header['AB_DLON'] = dtheta, '[deg] motion of Earth (in ecliptic lon.)'
# calculate the current positions and the nudge of celestial longitude
x, y = stars.ccdxy.tuple # pixels
delon = beta * np.cos(theta * np.pi / 180.0) * 180 / np.pi # degrees
# calculate the aberration at the center of the camera FOV
fcx, fcy = fieldcenter.ccdxy.tuple
dfcelon = beta * np.cos(dtheta * np.pi / 180.0) * 180 / np.pi
fcdx, fcdy = self.aberrator.derivatives['x'](fcx, fcy) * dfcelon, self.aberrator.derivatives['y'](fcx,
fcy) * dfcelon
self.header['AB_FCDX'] = fcdx, '[pix] dx of FOV center'
self.header['AB_FCDY'] = fcdy, '[pix] dy of FOV center'
self.delon = delon # for testing
self.fcdx, self.fcdy = fcdx, fcdy
# calculate the absolute velocity aberration
# the linear plane model is probably going to break down at the pole!
vax, vay = self.aberrator.derivatives['x'](x, y) * delon, self.aberrator.derivatives['y'](x, y) * delon
# subtract the field centers
dx, dy = vax - fcdx, vay - fcdy
return dx, dy
def addStar(self, ccdx, ccdy, mag, temp, verbose=False, plot=False):
"""Add one star to an image, given position, magnitude, and effective temperature."""
# logger.info("adding stars at ({0}, {1}) with magnitude of {2}".format(ccdx, ccdy, mag))
# logger.info(" ({0}/{1})".format(self.starcounter,self.nstars))
# (this is probably a slow way of doing things -- speed it up!)
ccdxy = self.camera.cartographer.point(ccdx + self.camera.nudge['x'] / self.camera.pixelscale,
ccdy + self.camera.nudge['y'] / self.camera.pixelscale, 'ccdxy')
focalx, focaly = ccdxy.focalxy.tuple
normalized, xindex, yindex = self.camera.psf.pixelizedPSF(ccdxy, stellartemp=temp, focus=self.currentfocus)
binned = normalized * self.camera.cadence * self.photons(mag)
# binned = unnormed*self.camera.cadence*self.photons(mag)/np.sum(unnormed)
'''if plot:
try:
self.ax_prnu.figure.clf()
except:
fi, ax = plt.subplots(1,4,figsize=(20,4), sharex=True, sharey=True)
self.ax_prnu = ax[0]
self.ax_psf = ax[1]
self.ax_highres = ax[2]
self.ax_prf = ax[3]
extent = [self.psf.xgrid.min(), self.psf.xgrid.max(), self.psf.ygrid.min(), self.psf.ygrid.max()]
self.ax_prnu.imshow(intrapixel,extent=extent,cmap='gray_r',interpolation='nearest')
self.ax_psf.imshow(subgrid_psf,extent=extent,cmap='gray_r',interpolation='nearest')
self.ax_highres.imshow(subgrid_psf*intrapixel,extent=extent,cmap='gray_r',interpolation='nearest')
self.ax_prf.imshow(binned,extent=extent,cmap='gray_r',interpolation='nearest')
self.ax_prf.set_xlim(-self.psf.dx_pixels, self.psf.dy_pixels)
self.ax_prf.set_ylim(-self.psf.dx_pixels, self.psf.dy_pixels)
self.ax_prf.set_aspect(1)
self.ax_prf.figure.savefig(os.path.join(settings.plots, 'prnu_demo.pdf'))
plt.draw()'''
ok = (xindex >= self.xmin) * (xindex < self.xsize) * (yindex >= self.ymin) * (yindex < self.ysize)
self.starimage[yindex[ok], xindex[ok]] += binned[ok]
# a = self.input('just added {}'.format(ccdxy))
def addStars(self, remake=True, jitter=False, magnitudethreshold=None):
# logger.info("adding stars")
self.starcounter = 0
self.nstars = 0
if jitter or self.camera.aberrate or self.camera.variablefocus:
remake = True
self.camera.cartographer._pithy = True
# define a grid of magnitude thresholds, will save an image containing all stars brighter than each
dthreshold = 1
magnitude_thresholds = np.arange(6, 18, dthreshold)
# if the final star image already exists, just load it
try:
assert (remake == False)
self.note = 'starsbrighterthan{0}'.format(np.max(magnitude_thresholds))
starsfilename = os.path.join(self.directory, self.note + '.fits')
try:
self.starimage
except:
self.starimage = self.loadFromFITS(starsfilename)
# otherwise loop through thresholds, adding stars at each
except:
self.starimage = self.zeros()
# propagate proper motions and project onto the detector
self.projectCatalog()
if True:
if magnitudethreshold is None:
magnitudethreshold = np.max(magnitude_thresholds)
# define a filename for this magnitude range
self.note = 'starsbrighterthan{0:02d}'.format(magnitudethreshold)
starsfilename = os.path.join(self.directory, self.note + '.fits')
# load the existing stellar image, if possible
try:
assert (remake == False)
self.starimage = self.loadFromFITS(starsfilename)
except (IOError, AssertionError):
# if this is the smallest threshold, include all the stars brighter than it
# if threshold == np.min(magnitude_thresholds):
minimum = -100
# else:
# minimum = threshold - dthreshold
# pick the stars to add to the image on this pass through
ok = (self.starx + self.camera.psf.dx_pixels_axis[-1] >= self.xmin) * \
(self.starx + self.camera.psf.dx_pixels_axis[0] <= self.xmax) * \
(self.stary + self.camera.psf.dy_pixels_axis[-1] >= self.ymin) * \
(self.stary + self.camera.psf.dy_pixels_axis[0] <= self.ymax) * \
(self.starmag < magnitudethreshold) * (self.starmag >= minimum)
x = self.starx[ok]
y = self.stary[ok]
mag = self.starmag[ok]
temp = self.startemp[ok]
logger.info('adding {0} stars between {1:.1f} and {2:.1f} magnitudes'.format(
len(x), np.min(mag), np.max(mag)))
self.currentfocus = self.camera.focus.model(self.camera.counter)
logger.info("the camera's focus is set to {}".format(self.currentfocus))
self.header['FOCUS'] = (self.currentfocus, 'distance from optimal focus (microns)')
if np.sum(ok) > 0:
self.nstars += np.sum(ok)
for i in range(len(x)):
self.addStar(x[i], y[i], mag[i], temp[i])
self.starcounter += 1
# if jitter == False:
# self.writeToFITS(self.starimage, starsfilename)
self.image += self.starimage
self.addInputLabels()
if self.camera.testpattern:
self.header['ISTARS'] = ('True', 'stars from a test pattern')
else:
self.header['ISTARS'] = ('True', 'stars from UCAC4')
if jitter:
self.header['IJITTER'] = ('True', 'spacecraft jitter, motion between images')
else:
self.header['IJITTER'] = ('False', 'no spacecraft jitter apply')
if self.camera.aberrate:
self.header['IVELABER'] = ('True'), 'differential velocity aberration applied'
else:
self.header['IVELABER'] = ('False'), 'no differential velocity aberration'
if self.camera.variablefocus:
self.header['IVARFOCU'] = ('True'), 'camera focus allowed to vary'
else:
self.header['IVARFOCU'] = ('False'), 'camera focus allowed to vary'
return self.starimage
def addGalaxies(self):
pass
# looks like I should use http://vizier.cfa.harvard.edu/viz-bin/Cat?VII/155 for a source catalog?
def addCosmics(self, gradient=False, version='fancy', diffusion=False, write=False, rate=5.0, correctcosmics=True):
"""Add cosmic rays to image."""
# print update
logger.info('adding cosmic rays')
# filenames, in case saving is required
# use Al's code to generate cosmic ray image of the correct size
image = Cosmics.cosmicImage(exptime=self.camera.cadence, size=self.npix,
gradient=gradient, diffusion=diffusion, rate=rate)
# (optionally), write cosmic ray image
if write:
self.note = 'cosmics_' + self.fileidentifier
cosmicsfilename = os.path.join(self.directory, self.note + '.fits' + zipsuffix)
self.writeToFITS(image, cosmicsfilename)
# add the cosmics into the running image
self.addInputLabels()
if (correctcosmics == False) or self.camera.cadence <= 2:
self.image += image
self.header['ICOSMICS'] = ('True', 'cosmic rays injected')
else:
self.header['ICOSMICS'] = ('False', 'cosmic rays injected')
return image
def bleedSaturated(self, plot=False):
"""Bleed saturated pixels in the image."""
logger.info('bleeding saturated pixels')
# keep track of the original image
untouched = self.image + 0.0
original = np.sum(self.image)
# set the saturation limit based on the number of individual reads included
saturation_limit = self.camera.saturation * self.camera.cadence / self.camera.singleread
stilloversaturated = True
# keep looping until all saturation problems are gone
count = 0
while stilloversaturated:
# keep track of iterations, to prevent infinite loops!
count += 1
# identify saturated pixels
oversaturated = self.image > saturation_limit
saturated = self.image >= saturation_limit
# loop over columns, treating each separately
for x in range(self.image.shape[1]):
# identify continuous saturated regions
regions, nregions = scipy.ndimage.measurements.label(saturated[:, x])
if nregions > 0:
for i in np.arange(nregions) + 1:
y = (regions == i).nonzero()[0]
if oversaturated[y, x].any():
# logger.info('')
# logger.info('in column {0}'.format(x))
# in this saturated region, how much flux needs to be redistributed?
fluxtodistribute = np.sum(self.image[y, x])
# logger.info('must distribute {0} electrons'.format(fluxtodistribute))
# how many pixels would this correspond to? (including the original pixels)
npixels = (fluxtodistribute / saturation_limit)
# logger.info('which we could do over {0} pixels'.format(npixels))
# find the center of the saturated region
center = np.mean(y)
# find how far we away from center we can totally saturate pixel
grow = (npixels - 1.0) / 2.0
# noinspection PyTypeChecker
indices = np.arange(np.maximum(np.ceil(center - grow).astype(np.int), 0),
np.minimum(np.floor(center + grow).astype(np.int),
self.image.shape[0] - 1) + 1)
# logger.info('with indices of {0}'.format(indices))
assert (y[0] in indices)
# record the flux we're starting with in this region
existingflux = np.sum(self.image[indices, x])
# saturate the pixels needed
self.image[indices, x] = saturation_limit
leftoverflux = existingflux - indices.shape[0] * saturation_limit
# logger.info('leaving {0} behind'.format(leftoverflux))
'''if leftoverflux > 0:
leftedge = indices.min() -1
rightedge = indices.max() +1
else:'''
leftedge = indices.min() - 1
rightedge = indices.max() + 1
try:
try:
self.image[leftedge, x] += leftoverflux / 2.0
except:
self.image[rightedge, x] += leftoverflux / 2.0
try:
self.image[rightedge, x] += leftoverflux / 2.0
except:
self.image[leftedge, x] += leftoverflux / 2.0
except:
logger.info("this star seems to saturate the entire detector!")
logger.info(" on pass #{0} through saturation filter:".format(count))
logger.info(
" the max saturation fraction is {1:.2f}; flux change over entire image is {2:.2f} electrons".format(
count, np.max(self.image) / saturation_limit, np.sum(self.image) - original))
# KLUDGE to prevent endless loops
stilloversaturated = (self.image > saturation_limit).any() and count < 10
self.note = 'saturation_{0}K'.format(self.camera.saturation).replace('.', 'p')
saturationfilename = os.path.join(self.directory, self.note + '.fits')
if not os.path.exists(saturationfilename):
self.writeToFITS(self.image - untouched, saturationfilename)
# update image header
self.addInputLabels()
self.header['ISATURAT'] = ('True', 'bleed trails for saturated pixels')
def addBackgrounds(self):
"""Add smooth backgrounds (zodiacal light and unresolved stars) to background."""
# set up filenames for saving background, if need be
self.note = 'backgrounds'
backgroundsfilename = os.path.join(self.directory, self.note + '.fits')
logger.info("adding backgrounds")
# if the background image already exists, just load it
try:
self.backgroundimage
assert (self.camera.counter != 0)
except (AttributeError, AssertionError):
try:
self.backgroundimage = self.loadFromFITS(backgroundsfilename)
# otherwise compute the backgrounds from scratch, and save them for next time
except IOError:
# define a blank background image
self.backgroundimage = self.zeros()
# define coordinates (equatorial, Galactic, celestial) at every pixel in the image
ra, dec = self.pixels().celestial.tuple
elon, elat = self.pixels().celestial.tuple
glon, glat = self.pixels().galactic.tuple
# add the zodiacal light, using the simple model from Josh and Peter on the TESS wiki
logger.info(" including smooth model for zodiacal light")
elon, elat
self.backgroundimage += self.zodicalBackground(elon, elat) * self.camera.cadence
self.addInputLabels()
self.header['IZODIACA'] = ('True', 'zodiacal light, treated as smooth')
# add unresolved background light, using the simple model from Josh and Peter on the TESS wiki
logger.info(" including smooth model for unresolved stars in the Galaxy")
self.backgroundimage += self.unresolvedBackground(glon, glat) * self.camera.cadence
self.header['IUNRESOL'] = ('True', 'unresolved stars, treated as smooth background')
# write the image, so it can just be loaded easily next time
self.writeToFITS(self.backgroundimage, backgroundsfilename, cancompress=False)
# add the background image to the total image
self.image += self.backgroundimage
def addPhotonNoise(self):
"""Add photon noise into an image."""
self.noiseimage = self.zeros()
logger.info("adding photon noise [sqrt(photons from stars and various backgrounds)]")
noise_variance = self.image
ok = noise_variance > 0
noise = np.zeros_like(self.image)
noise[ok] = np.sqrt(noise_variance[ok])
assert (np.isfinite(noise).all())
self.image += noise * np.random.randn(self.xsize, self.ysize)
self.noiseimage = noise
self.note = 'photonnoise'
noisefilename = os.path.join(self.directory, self.note + '.fits')
if not os.path.exists(noisefilename):
self.writeToFITS(noise, noisefilename)
self.addInputLabels()
self.header['IPHOTNOI'] = ('True', 'photon noise')
def addReadNoise(self):
"""Add read noise to image."""
logger.info("adding read noise")
logger.info(" = quadrature sum of {2:.0f} reads with {3} e- each.".format(self.camera.cadence,
self.camera.singleread,
self.camera.cadence / self.camera.singleread,
self.camera.read_noise))
# calculate the variance due to read noise
noise_variance = self.camera.cadence / self.camera.singleread * self.camera.read_noise ** 2
# add noise into image
self.image += np.sqrt(noise_variance) * np.random.randn(self.xsize, self.ysize)
try:
self.noiseimage = np.sqrt(self.noiseimage ** 2 + noise_variance)
except:
self.noiseimage = np.sqrt(noise_variance)
# update image header
self.addInputLabels()
self.header['IREADNOI'] = ('True', 'read noise')
def addSmear(self):
"""Smear the image along the readout direction."""
logger.info("adding readout smear")
logger.info(" assuming {0} second readout times on {1} second exposures.".format(self.camera.readouttime,
self.camera.singleread))
untouched = self.image + 0.0
mean = np.mean(self.image, 0).reshape(1, self.image.shape[0]) * self.ones()
self.image += mean * self.camera.readouttime / self.camera.singleread
self.note = 'readoutsmear'
smearfilename = os.path.join(self.directory, self.note + '.fits')
if not os.path.exists(smearfilename):
self.writeToFITS(self.image - untouched, smearfilename)
# update header
self.addInputLabels()
self.header['ISMEAR'] = ('True', 'smearing during transer to frame store')
def expose(self,
plot=False, # should we make plots with this exposure?
jitter=False, # should this exposure be jittered?
writesimulated=False, # should this exposure write to file?
compress={2: True, 120: True, 1800: False},
remake=True, # should we remake stars (might try not to)?
smear=True, # should readout smear be included?
cosmicsversion='fancy', # what kind of cosmics should be included?
cosmicsdiffusion=False, # should diffusion of cosmics be done?
skipcosmics=False, # should we skip cosmic injection?
correctcosmics=False, # should we pretend cosmics don't exist?
writecosmics=False, # should the cosmics image write to file?
writenoiseless=False, # should we write an image with no noise?
jitterscale=1.0, # should we rescale the jitter?
display=False, # should we display this image in ds9?,
magnitudethreshold=999,
advancecounter=True,
**kwargs):
"""Expose an image on this CCD."""
self.plot = plot
self.display = display
self.compress = compress
# temp kludge
cosmics, stars = None, None
# create a blank image
self.image = self.zeros()
# populate the basics of the header
self.populateHeader()
# write out the ingredients, if this is the first exposure
if self.camera.counter == 0:
self.writeIngredients()
# jitter the camera, or at least update the
if jitter:
self.camera.jitter.applyNudge(self.camera.counter, header=self.header)
# add stars to the image
self.addStars(jitter=jitter, remake=remake, magnitudethreshold=magnitudethreshold)
# add galaxies to the image
self.addGalaxies()
# add background to the image
self.addBackgrounds()
if writesimulated == False:
stars = self.image + 0.0
if writenoiseless:
# make filename for this image
self.note = 'noiseless_' + self.fileidentifier
noiselessfilename = os.path.join(self.directory, self.note + '.fits' + zipsuffix)
# write the image to FITS
logger.info('saving noiseless TESS image')
self.writeToFITS(self.image, noiselessfilename, savetype=np.int32)
# add the photon noise from stars, galaxies, and backgrounds
self.addPhotonNoise()
if skipcosmics == False:
# add cosmic rays to the image (after noise, because the *sub-Poisson* noise is already modeled with the Fano factor)
cosmics = self.addCosmics(write=writecosmics, version=cosmicsversion, diffusion=cosmicsdiffusion,
correctcosmics=correctcosmics)
# add smear from the finite frame transfer time
if smear:
self.addSmear()
# create saturation bleed trails
self.bleedSaturated()
# add read noise, constant across detector
self.addReadNoise()
# finally, update the header for the image
# self.populateHeader()
try:
self.stampify()
except AttributeError:
logger.info('no stamps found; skipping stampify!')
# write the image
if writesimulated:
self.writeFinal()
logger.info("created image #{counter:07d} of {pos_string} with {cadence:.0f}s cadence".format(
counter=self.camera.counter, pos_string=self.pos_string, cadence=self.camera.cadence))
# advance the camera's counter (and therefore timestep) if this is the last of the CCDs
if self == self.camera.ccds[-1] and advancecounter:
self.camera.advanceCounter()
self.show()
if writesimulated == False:
return self.image, cosmics, stars
class Aberrator(object):
"""object to keep track of how to apply velocity abberation; must be reset for each CCD"""
def __init__(self, cartographer):
super(Aberrator, self).__init__()
# create a grid of stars spanning the CCD
ccd = cartographer.ccd
self.ccd = ccd
ngrid = 20
xgrid, ygrid = np.meshgrid(np.linspace(ccd.xmin, ccd.xmax, ngrid),
np.linspace(ccd.ymin, ccd.ymax, ngrid))
x, y = xgrid.flatten(), ygrid.flatten()
# estimate dx/delon and dy/delon
self.strings, self.derivatives, self.coefs, self.inputs = {}, {}, {}, {}
self.raw = {}
template = '{0:+.10f}*ccdx{1:+.10f}*ccdy{2:+.10f}'
A = np.vstack([np.ones(len(x)), x, y, x ** 2, y ** 2, x * y]).T
delta = 1.0 / 60.0 / 60.0 # step, in degrees, for calculating numerical derivative
notnudged = cartographer.point(x, y, type='ccdxy')
elon, elat = notnudged.celestial.tuple
nudgedinlon = cartographer.point(elon + delta, elat, type='celestial')
dxdelon = (nudgedinlon.ccdxy.x - x) / delta
self.coefs['x'] = np.linalg.lstsq(A, dxdelon)[0]
def model_dxdelon(x, y):
return self.coefs['x'][0] + self.coefs['x'][1] * x + self.coefs['x'][2] * y + self.coefs['x'][3] * x ** 2 + \
self.coefs['x'][4] * y ** 2 + self.coefs['x'][5] * x * y # pixels/degree
self.derivatives['x'] = model_dxdelon
self.strings['x'] = template.format(*self.coefs['x'])
self.raw['x'] = dxdelon
self.inputs['x'] = x
dydelon = (nudgedinlon.ccdxy.y - y) / delta
self.coefs['y'] = np.linalg.lstsq(A, dydelon)[0]
def model_dydelon(x, y):
return self.coefs['y'][0] + self.coefs['y'][1] * x + self.coefs['y'][2] * y + self.coefs['y'][3] * x ** 2 + \
self.coefs['y'][4] * y ** 2 + self.coefs['y'][5] * x * y # pixels/degree
self.derivatives['y'] = model_dydelon
self.strings['y'] = template.format(*self.coefs['y'])
self.raw['y'] = dydelon
self.inputs['y'] = y
# plt.ion()
'''
plt.figure()
plt.scatter(x, 3600.0/dxdelon, c=y)
plt.ylabel('arcsec of longitude/xpix')
plt.figure()
plt.scatter(x, 3600.0/dxdelat, c=y)
plt.ylabel('arcsec of longitude/xpix')
assert(False)
'''
plt.figure(figsize=(20, 10))
gs = gridspec.GridSpec(2, 3, hspace=0.3, bottom=.2)
for i, k in enumerate(['x', 'y']):
plt.subplot(gs[i, 0])
plt.scatter(self.inputs[k], self.raw[k], c=y, edgecolor='none')
plt.ylabel('dccd{0}/delon (pixels/degrees)'.format(k))
if i == 1:
plt.xlabel('directly\ncalculated')
plt.subplot(gs[i, 1])
plt.scatter(self.inputs[k], self.derivatives[k](x, y), c=y, edgecolor='none')
if i == 1:
plt.xlabel('model')
plt.subplot(gs[i, 2])
plt.scatter(self.inputs[k], self.raw[k] - self.derivatives[k](x, y), c=y, edgecolor='none')
if i == 1:
plt.xlabel('residuals')
plt.savefig(os.path.join(self.ccd.directory, 'aberrationgeometry.pdf'))
def plotPossibilities(self, n=100):
x, y = np.random.uniform(0, self.ccd.xsize, n), np.random.uniform(0, self.ccd.ysize, n)
stars = self.ccd.camera.cartographer.point(x, y, type='ccdxy')
dx, dy, delon = [], [], []
fcdx, fcdy = [], []
bjds = np.linspace(0, 365, 1000) + self.ccd.camera.bjd0
for bjd in bjds:
nudges = self.ccd.aberrations(stars, bjd)
dx.append(nudges[0])
dy.append(nudges[1])
fcdx.append(self.ccd.fcdx)
fcdy.append(self.ccd.fcdy)
delon.append(self.ccd.delon)
dx = np.array(dx)
dy = np.array(dy)
fcdx = np.array(fcdx)
fcdy = np.array(fcdy)
plt.figure(figsize=(8, 8))
gs = gridspec.GridSpec(2, 2, left=0.15, wspace=0.3)
bjds -= min(bjds)
# top row is uncorrected
ax = plt.subplot(gs[0, 0])
plt.axvline(27.4, color='gray', alpha=1)
ax.plot(bjds, dx + fcdx.reshape(len(bjds), 1), alpha=0.3)
plt.ylabel('velocity\naberration (pixels)')
plt.title('x')
ax = plt.subplot(gs[0, 1], sharey=ax, sharex=ax)
ax.plot(bjds, dy + fcdy.reshape(len(bjds), 1), alpha=0.3)
plt.axvline(27.4, color='gray', alpha=1)
plt.xlim(-1 + min(bjds), max(bjds) + 1)
plt.title('y')
ax = plt.subplot(gs[1, 0])
ax.plot(bjds, dx, alpha=0.3)
plt.axvline(27.4, color='gray', alpha=1)
plt.xlabel('Time (days)')
plt.ylabel('differential velocity\naberration (pixels)')
ax = plt.subplot(gs[1, 1], sharey=ax, sharex=ax)
ax.plot(bjds, dy, alpha=0.3)
plt.axvline(27.4, color='gray', alpha=1)
plt.xlim(-1 + min(bjds), max(bjds) + 1)
plt.xlabel('Time (days)')
path = os.path.join(self.ccd.directory, 'aberrationoveroneyear.pdf')
plt.savefig(path)
logger.info('saved a plot of the aberration over one year to {}'.format(path))
def gauss(x, y, xcenter, ycenter):
rsquared = (x - xcenter) ** 2 + (y - ycenter) ** 2
sigma = 1.0
return np.exp(-0.5 * rsquared / sigma ** 2)
def lorentz(x, y, xcenter, ycenter):
rsquared = (x - xcenter) ** 2 + (y - ycenter) ** 2
sigma = 1.0
return 1.0 / (rsquared / sigma ** 2 + 1.0)
def smoothedge(x, y, xcenter, ycenter, edge):
rsquared = (x - xcenter) ** 2 + (y - ycenter) ** 2
c = 1.0
a = -c / edge ** 2
return (a * rsquared + c) * (rsquared <= edge ** 2)
|
|
"""SIESTA calculator interface."""
# Copyright (C) 2015 Henrique Pereira Coutada Miranda
# All rights reserved.
#
# This file is part of phonopy.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the phonopy project nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import re
import sys
import numpy as np
from phonopy.file_IO import iter_collect_forces
from phonopy.interface.vasp import check_forces, get_drift_forces
from phonopy.structure.atoms import PhonopyAtoms as Atoms
from phonopy.units import Bohr
def parse_set_of_forces(num_atoms, forces_filenames, verbose=True):
"""Parse forces from output files."""
hook = "" # Just for skipping the first line
is_parsed = True
force_sets = []
for i, filename in enumerate(forces_filenames):
if verbose:
sys.stdout.write("%d. " % (i + 1))
siesta_forces = iter_collect_forces(
filename, num_atoms, hook, [1, 2, 3], word=""
)
if check_forces(siesta_forces, num_atoms, filename, verbose=verbose):
drift_force = get_drift_forces(
siesta_forces, filename=filename, verbose=verbose
)
force_sets.append(np.array(siesta_forces) - drift_force)
else:
is_parsed = False
if is_parsed:
return force_sets
else:
return []
def read_siesta(filename):
"""Read crystal structure."""
siesta_in = SiestaIn(open(filename).read())
numbers = siesta_in._tags["atomicnumbers"]
alat = siesta_in._tags["latticeconstant"]
lattice = siesta_in._tags["latticevectors"]
positions = siesta_in._tags["atomiccoordinates"]
atypes = siesta_in._tags["chemicalspecieslabel"]
cell = Atoms(numbers=numbers, cell=lattice, scaled_positions=positions)
coordformat = siesta_in._tags["atomiccoordinatesformat"]
if coordformat == "fractional" or coordformat == "scaledbylatticevectors":
cell.set_scaled_positions(positions)
elif coordformat == "scaledcartesian":
cell.set_positions(np.array(positions) * alat)
elif coordformat == "notscaledcartesianang" or coordformat == "ang":
cell.set_positions(np.array(positions) / Bohr)
elif coordformat == "notscaledcartesianbohr" or coordformat == "bohr":
cell.set_positions(np.array(positions))
else:
print(
"The format %s for the AtomicCoordinatesFormat is not "
"implemented." % coordformat
)
sys.exit(1)
return cell, atypes
def write_siesta(filename, cell, atypes):
"""Write cell to file."""
with open(filename, "w") as w:
w.write(get_siesta_structure(cell, atypes))
def write_supercells_with_displacements(
supercell, cells_with_displacements, ids, atypes, pre_filename="supercell", width=3
):
"""Write supercells with displacements to files."""
write_siesta("%s.fdf" % pre_filename, supercell, atypes)
for i, cell in zip(ids, cells_with_displacements):
filename = "{pre_filename}-{0:0{width}}.fdf".format(
i, pre_filename=pre_filename, width=width
)
write_siesta(filename, cell, atypes)
def get_siesta_structure(cell, atypes):
"""Return SIESTA structure in text."""
lattice = cell.get_cell()
positions = cell.get_scaled_positions()
chemical_symbols = cell.get_chemical_symbols()
lines = ""
lines += "NumberOfAtoms %d\n\n" % len(positions)
lines += "%block LatticeVectors\n"
lines += ((" %21.16f" * 3 + "\n") * 3) % tuple(lattice.ravel())
lines += "%endblock LatticeVectors\n\n"
lines += "AtomicCoordinatesFormat Fractional\n\n"
lines += "LatticeConstant 1.0 Bohr\n\n"
lines += "%block AtomicCoordinatesAndAtomicSpecies\n"
for pos, i in zip(positions, chemical_symbols):
lines += ("%21.16lf" * 3 + " %d\n") % tuple(pos.tolist() + [atypes[i]])
lines += "%endblock AtomicCoordinatesAndAtomicSpecies\n"
return lines
class SiestaIn:
"""Class to create SIESTA input file."""
_num_regex = r"([+-]?\d+(?:\.\d*)?(?:[eE][-+]?\d+)?)"
_tags = {
"latticeconstant": 1.0,
"latticeconstantunit": None,
"chemicalspecieslabel": None,
"atomiccoordinatesformat": None,
"atomicnumbers": None,
"atomicspecies": None,
"atomiccoordinates": None,
}
def __init__(self, lines):
"""Init method."""
self._collect(lines)
def _collect(self, lines):
"""Collect values.
This routine reads the following from the Siesta file:
- atomic positions
- cell_parameters
- atomic_species
"""
for tag, value, unit in re.findall(
r"([\.A-Za-z]+)\s+%s\s+([A-Za-z]+)?" % self._num_regex, lines
):
tag = tag.lower()
unit = unit.lower()
if tag == "latticeconstant":
self._tags["latticeconstantunit"] = unit.capitalize()
if unit == "ang":
self._tags[tag] = float(value) / Bohr
elif unit == "bohr":
self._tags[tag] = float(value)
else:
raise ValueError("Unknown LatticeConstant unit: {}".format(unit))
for tag, value in re.findall(r"([\.A-Za-z]+)[ \t]+([a-zA-Z]+)", lines):
tag = tag.replace("_", "").lower()
if tag == "atomiccoordinatesformat":
self._tags[tag] = value.strip().lower()
# check if the necessary tags are present
self._check_present("atomiccoordinatesformat")
acell = self._tags["latticeconstant"]
# capture the blocks
blocks = re.findall(
r"%block\s+([A-Za-z_]+)\s*\n((?:.+\n)+?(?=(?:\s+)?%endblock))",
lines,
re.MULTILINE,
)
for tag, block in blocks:
tag = tag.replace("_", "").lower()
if tag == "chemicalspecieslabel":
block_array = block.split("\n")[:-1]
self._tags["atomicnumbers"] = dict(
[map(int, species.split()[:2]) for species in block_array]
)
self._tags[tag] = dict(
[
(lambda x: (x[2], int(x[0])))(species.split())
for species in block_array
]
)
elif tag == "latticevectors":
self._tags[tag] = [
[float(v) * acell for v in vector.split()]
for vector in block.split("\n")[:3]
]
elif tag == "atomiccoordinatesandatomicspecies":
block_array = block.split("\n")[:-1]
self._tags["atomiccoordinates"] = [
[float(x) for x in atom.split()[:3]] for atom in block_array
]
self._tags["atomicspecies"] = [
int(atom.split()[3]) for atom in block_array
]
# check if the block are present
self._check_present("atomicspecies")
self._check_present("atomiccoordinates")
self._check_present("latticevectors")
self._check_present("chemicalspecieslabel")
# translate the atomicspecies to atomic numbers
self._tags["atomicnumbers"] = [
self._tags["atomicnumbers"][atype] for atype in self._tags["atomicspecies"]
]
def _check_present(self, tag):
if not self._tags[tag]:
print("%s not present" % tag)
sys.exit(1)
def __str__(self):
"""Return tags."""
return self._tags
if __name__ == "__main__":
from phonopy.structure.symmetry import Symmetry
cell, atypes = read_siesta(sys.argv[1])
symmetry = Symmetry(cell)
print("# %s" % symmetry.get_international_table())
print(get_siesta_structure(cell, atypes))
|
|
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""Raw data collector for coverage.py."""
import os, sys
from coverage import env
from coverage.backward import iitems
from coverage.files import abs_file
from coverage.misc import CoverageException
from coverage.pytracer import PyTracer
try:
# Use the C extension code when we can, for speed.
from coverage.tracer import CTracer, CFileDisposition # pylint: disable=no-name-in-module
except ImportError:
# Couldn't import the C extension, maybe it isn't built.
if os.getenv('COVERAGE_TEST_TRACER') == 'c':
# During testing, we use the COVERAGE_TEST_TRACER environment variable
# to indicate that we've fiddled with the environment to test this
# fallback code. If we thought we had a C tracer, but couldn't import
# it, then exit quickly and clearly instead of dribbling confusing
# errors. I'm using sys.exit here instead of an exception because an
# exception here causes all sorts of other noise in unittest.
sys.stderr.write("*** COVERAGE_TEST_TRACER is 'c' but can't import CTracer!\n")
sys.exit(1)
CTracer = None
class FileDisposition(object):
"""A simple value type for recording what to do with a file."""
pass
class Collector(object):
"""Collects trace data.
Creates a Tracer object for each thread, since they track stack
information. Each Tracer points to the same shared data, contributing
traced data points.
When the Collector is started, it creates a Tracer for the current thread,
and installs a function to create Tracers for each new thread started.
When the Collector is stopped, all active Tracers are stopped.
Threads started while the Collector is stopped will never have Tracers
associated with them.
"""
# The stack of active Collectors. Collectors are added here when started,
# and popped when stopped. Collectors on the stack are paused when not
# the top, and resumed when they become the top again.
_collectors = []
def __init__(self, should_trace, check_include, timid, branch, warn, concurrency):
"""Create a collector.
`should_trace` is a function, taking a file name, and returning a
`coverage.FileDisposition object`.
`check_include` is a function taking a file name and a frame. It returns
a boolean: True if the file should be traced, False if not.
If `timid` is true, then a slower simpler trace function will be
used. This is important for some environments where manipulation of
tracing functions make the faster more sophisticated trace function not
operate properly.
If `branch` is true, then branches will be measured. This involves
collecting data on which statements followed each other (arcs). Use
`get_arc_data` to get the arc data.
`warn` is a warning function, taking a single string message argument,
to be used if a warning needs to be issued.
`concurrency` is a string indicating the concurrency library in use.
Valid values are "greenlet", "eventlet", "gevent", or "thread" (the
default).
"""
self.should_trace = should_trace
self.check_include = check_include
self.warn = warn
self.branch = branch
self.threading = None
self.concurrency = concurrency
self.concur_id_func = None
try:
if concurrency == "greenlet":
import greenlet
self.concur_id_func = greenlet.getcurrent
elif concurrency == "eventlet":
import eventlet.greenthread # pylint: disable=import-error,useless-suppression
self.concur_id_func = eventlet.greenthread.getcurrent
elif concurrency == "gevent":
import gevent # pylint: disable=import-error,useless-suppression
self.concur_id_func = gevent.getcurrent
elif concurrency == "thread" or not concurrency:
# It's important to import threading only if we need it. If
# it's imported early, and the program being measured uses
# gevent, then gevent's monkey-patching won't work properly.
import threading
self.threading = threading
else:
raise CoverageException("Don't understand concurrency=%s" % concurrency)
except ImportError:
raise CoverageException(
"Couldn't trace with concurrency=%s, the module isn't installed." % concurrency
)
self.reset()
if timid:
# Being timid: use the simple Python trace function.
self._trace_class = PyTracer
else:
# Being fast: use the C Tracer if it is available, else the Python
# trace function.
self._trace_class = CTracer or PyTracer
if self._trace_class is CTracer:
self.file_disposition_class = CFileDisposition
self.supports_plugins = True
else:
self.file_disposition_class = FileDisposition
self.supports_plugins = False
def __repr__(self):
return "<Collector at 0x%x: %s>" % (id(self), self.tracer_name())
def tracer_name(self):
"""Return the class name of the tracer we're using."""
return self._trace_class.__name__
def reset(self):
"""Clear collected data, and prepare to collect more."""
# A dictionary mapping file names to dicts with line number keys (if not
# branch coverage), or mapping file names to dicts with line number
# pairs as keys (if branch coverage).
self.data = {}
# A dictionary mapping file names to file tracer plugin names that will
# handle them.
self.file_tracers = {}
# The .should_trace_cache attribute is a cache from file names to
# coverage.FileDisposition objects, or None. When a file is first
# considered for tracing, a FileDisposition is obtained from
# Coverage.should_trace. Its .trace attribute indicates whether the
# file should be traced or not. If it should be, a plugin with dynamic
# file names can decide not to trace it based on the dynamic file name
# being excluded by the inclusion rules, in which case the
# FileDisposition will be replaced by None in the cache.
if env.PYPY:
import __pypy__ # pylint: disable=import-error
# Alex Gaynor said:
# should_trace_cache is a strictly growing key: once a key is in
# it, it never changes. Further, the keys used to access it are
# generally constant, given sufficient context. That is to say, at
# any given point _trace() is called, pypy is able to know the key.
# This is because the key is determined by the physical source code
# line, and that's invariant with the call site.
#
# This property of a dict with immutable keys, combined with
# call-site-constant keys is a match for PyPy's module dict,
# which is optimized for such workloads.
#
# This gives a 20% benefit on the workload described at
# https://bitbucket.org/pypy/pypy/issue/1871/10x-slower-than-cpython-under-coverage
self.should_trace_cache = __pypy__.newdict("module")
else:
self.should_trace_cache = {}
# Our active Tracers.
self.tracers = []
def _start_tracer(self):
"""Start a new Tracer object, and store it in self.tracers."""
tracer = self._trace_class()
tracer.data = self.data
tracer.trace_arcs = self.branch
tracer.should_trace = self.should_trace
tracer.should_trace_cache = self.should_trace_cache
tracer.warn = self.warn
if hasattr(tracer, 'concur_id_func'):
tracer.concur_id_func = self.concur_id_func
elif self.concur_id_func:
raise CoverageException(
"Can't support concurrency=%s with %s, only threads are supported" % (
self.concurrency, self.tracer_name(),
)
)
if hasattr(tracer, 'file_tracers'):
tracer.file_tracers = self.file_tracers
if hasattr(tracer, 'threading'):
tracer.threading = self.threading
if hasattr(tracer, 'check_include'):
tracer.check_include = self.check_include
fn = tracer.start()
self.tracers.append(tracer)
return fn
# The trace function has to be set individually on each thread before
# execution begins. Ironically, the only support the threading module has
# for running code before the thread main is the tracing function. So we
# install this as a trace function, and the first time it's called, it does
# the real trace installation.
def _installation_trace(self, frame, event, arg):
"""Called on new threads, installs the real tracer."""
# Remove ourselves as the trace function.
sys.settrace(None)
# Install the real tracer.
fn = self._start_tracer()
# Invoke the real trace function with the current event, to be sure
# not to lose an event.
if fn:
fn = fn(frame, event, arg)
# Return the new trace function to continue tracing in this scope.
return fn
def start(self):
"""Start collecting trace information."""
if self._collectors:
self._collectors[-1].pause()
# Check to see whether we had a fullcoverage tracer installed. If so,
# get the stack frames it stashed away for us.
traces0 = []
fn0 = sys.gettrace()
if fn0:
tracer0 = getattr(fn0, '__self__', None)
if tracer0:
traces0 = getattr(tracer0, 'traces', [])
try:
# Install the tracer on this thread.
fn = self._start_tracer()
except:
if self._collectors:
self._collectors[-1].resume()
raise
# If _start_tracer succeeded, then we add ourselves to the global
# stack of collectors.
self._collectors.append(self)
# Replay all the events from fullcoverage into the new trace function.
for args in traces0:
(frame, event, arg), lineno = args
try:
fn(frame, event, arg, lineno=lineno)
except TypeError:
raise Exception("fullcoverage must be run with the C trace function.")
# Install our installation tracer in threading, to jump start other
# threads.
if self.threading:
self.threading.settrace(self._installation_trace)
def stop(self):
"""Stop collecting trace information."""
assert self._collectors
assert self._collectors[-1] is self, (
"Expected current collector to be %r, but it's %r" % (self, self._collectors[-1])
)
self.pause()
self.tracers = []
# Remove this Collector from the stack, and resume the one underneath
# (if any).
self._collectors.pop()
if self._collectors:
self._collectors[-1].resume()
def pause(self):
"""Pause tracing, but be prepared to `resume`."""
for tracer in self.tracers:
tracer.stop()
stats = tracer.get_stats()
if stats:
print("\nCoverage.py tracer stats:")
for k in sorted(stats.keys()):
print("%16s: %s" % (k, stats[k]))
if self.threading:
self.threading.settrace(None)
def resume(self):
"""Resume tracing after a `pause`."""
for tracer in self.tracers:
tracer.start()
if self.threading:
self.threading.settrace(self._installation_trace)
else:
self._start_tracer()
def save_data(self, covdata):
"""Save the collected data to a `CoverageData`.
Also resets the collector.
"""
def abs_file_dict(d):
"""Return a dict like d, but with keys modified by `abs_file`."""
return dict((abs_file(k), v) for k, v in iitems(d))
if self.branch:
covdata.add_arcs(abs_file_dict(self.data))
else:
covdata.add_lines(abs_file_dict(self.data))
covdata.add_file_tracers(abs_file_dict(self.file_tracers))
self.reset()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Software License Agreement (BSD License)
#
# Copyright (c) 2014, Ocean Systems Laboratory, Heriot-Watt University, UK.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Heriot-Watt University nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Original authors:
# Valerio De Carolis, Marian Andrecki, Corina Barbalata, Gordon Frost
"""Lawnmower pattern generator module for Autonomous Underwater Vehicle.
This module ...
"""
from __future__ import division
import numpy as np
np.set_printoptions(precision=3, suppress=True)
from vehicle_core.path import trajectory_tools as tt
def pattern_from_rect(width, height, delta=0.0, start=0):
"""
area = [
A[n ,e, d],
B[n ,e, d],
C[n ,e, d],
D[n ,e, d]
]
area: represent the bounding box of the lawnmower pattern
A ------------------- B
| |
| |
| |
D ------------------- C
returns (points, n_cols):
where points are the lawnmower pattern points within the bounding box
1 4 ----- 5
| | |
| | |
2 ----- 3 6 ----- *
:param width:
:param height:
:param delta:
:param start:
:return:
"""
# trajectory matrix
n_cols = np.ceil(width / delta) + 1 # number of columns (+ final)
n_npc = 2 # number of points per column
n_points = n_npc * n_cols # number of matrix rows
# init empty trajectory
trajectory = np.zeros((n_points, 6))
depth = 0
east = 0
idx = 0
# trajectory loop
for n in np.arange(0, n_cols):
idx = n_npc * n
if n % 2 == 0:
# first leg
trajectory[idx, :] = np.array([0, east, depth, 0, 0, np.pi])
trajectory[idx+1, :] = np.array([-height, east, depth, 0, 0, np.pi])
else:
# second leg
trajectory[idx, :] = np.array([-height, east, depth, 0, 0, 0])
trajectory[idx+1, :] = np.array([0, east, depth, 0, 0, 0])
# update east for next pair of legs
east += delta
# handle last column
if trajectory[idx, 1] > width:
trajectory[idx, 1] = width
trajectory[idx+1, 1] = width
# rotate and translate according to the starting point
if start == 1:
# starting point is B
trajectory[:,1] = (-trajectory[:,1]) + width
elif start == 2:
# starting point is C
trajectory[:,0] = (-trajectory[:,0]) - height
trajectory[:,1] = (-trajectory[:,1]) + width
elif start == 3:
# starting point is D
trajectory[:,0] = (-trajectory[:,0]) - height
else:
pass
return (trajectory, n_cols)
def pattern_from_ned(area, start=0, spacing=2.0, overlap=0.0):
"""
:param area:
:param start:
:param spacing:
:param overlap:
:return:
"""
# # check A,B,C,D are in the expected order and A,B,C,D[depth] are equals
# conditions = [
# np.all(area[0,0] > area[2:4,0]), # A[north] > C,D[north]
# np.all(area[0,1] < area[1:3,1]), # A[east] < B,C[east]
# area[0,0] == area[1,0], # A[north] == B[north]
# area[0,1] == area[3,1], # A[east] == D[east]
#
# np.all(area[2,0] < area[0:2,0]), # C[north] < A,B[north]
# np.all(area[2,1] > area[[0,3],1]), # C[east] > A,D[east]
# area[2,0] == area[3,0], # C[north] == D[north]
# area[2,1] == area[1,1], # C[east] == B[east]
#
# np.all(area[0,2] == area[:,2]) # depth are equals
# ]
#
# if not np.all(conditions):
# raise ValueError('area is not rectangular shaped!')
# TODO: insert another condition like ab == cd and bc == da
a = np.linalg.norm(area[0,0:2] - area[1,0:2]) # AB
b = np.linalg.norm(area[1,0:2] - area[2,0:2]) # BC
c = np.linalg.norm(area[2,0:2] - area[3,0:2]) # CD
d = np.linalg.norm(area[3,0:2] - area[0,0:2]) # DA
diag_1 = (a**2 + b**2)
diag_2 = (c**2 + d**2)
if not np.abs(diag_2 - diag_1) < 1e-6:
raise ValueError('area is not rectangular shaped!')
# calculate bounding box dimensions
dW = area[1,:2] - area[0,:2] # B - A (xy)
dH = area[3,:2] - area[0,:2] # D - A (xy)
width = np.sqrt(np.dot(dW, dW))
height = np.sqrt(np.dot(dH, dH))
depth = np.abs(area[0,2])
# calculate delta from range and overlap
delta = float(spacing - (spacing * overlap))
# generate using simple geometry
(fixes, n_cols) = pattern_from_rect(width, height, delta, start)
# adjust depth
fixes[:,2] = depth
# translations (using A point to get correct position in space)
fixes[:,0] += area[0,0]
fixes[:,1] += area[0,1]
# correct initial yaw
dE = area[1,1] - area[0,1] # delta_east between B and A
alpha = np.arccos(dE / width) # angle between B and A respect to NED reference
fixes[:,5] += alpha # add the rotation to fixes
# rotate waypoints
ROT = np.eye(6)
# set the rotation using current attitude
ROT[0:2, 0:2] = [
[np.cos(alpha), np.sin(alpha)],
[-np.sin(alpha), np.cos(alpha)]
]
for n in range(fixes.shape[0]):
fixes[n,:] = np.dot(ROT, fixes[n,:])
return (fixes, n_cols)
if __name__ == '__main__':
import matplotlib.pyplot as plt
np.set_printoptions(precision=3, suppress=True) # reduce numpy output length
area = np.array([
[4.4, -9.8, 1.0], # A
[4.4, -1.8, 1.0], # B
[1.0, -1.8, 1.0], # C
[1.0, -9.8, 1.0] # D
])
# # rotation case
# area = np.array([
# [0,0,2], # A
# [10,10,2], # B
# [0, np.sqrt(2) * 10,2], # C
# [-10,10,2] # D
# ])
# sonar parameters
sonar_field = 1 # meters
sonar_overlap = 0 # 0 to 1
# get global points
(fixes_a, n_cols) = pattern_from_ned(area, start=0, spacing=sonar_field, overlap=sonar_overlap)
# print(fixes_d)
# (fixes_b, n_cols) = pattern_from_ned(area, start=1, spacing=sonar_field, overlap=sonar_overlap)
# print(fixes_d)
#(fixes_c, n_cols) = pattern_from_ned(area, start=2, spacing=sonar_field, overlap=sonar_overlap)
#print(fixes_c)
#(fixes_d, n_cols) = pattern_from_ned(area, start=3, spacing=sonar_field, overlap=sonar_overlap)
#print(fixes_d)
tt.plot_trajectory(fixes_a)
plt.show()
import json
data = dict()
data['points'] = fixes_a.tolist()
print(json.dumps(data))
# import matplotlib.pyplot as plt
#
# fig = plt.figure()
# #plt.plot(fixes_a[:,1], fixes_a[:,0], 'or-')
# #plt.plot(fixes_b[:,1], fixes_b[:,0], '*g--')
# #plt.plot(fixes_c[:,1], fixes_c[:,0], 'ob-')
# plt.plot(fixes_d[:,1], fixes_d[:,0], 'oy-')
# plt.grid()
# plt.show()
|
|
from collections import namedtuple
from games import (Game)
from queue import PriorityQueue
from copy import deepcopy
class GameState:
def __init__(self, to_move, board, label=None, depth=8):
self.to_move = to_move
self.board = board
self.label = label
self.maxDepth = depth
def __str__(self):
if self.label == None:
return super(GameState, self).__str__()
return self.label
class FlagrantCopy(Game):
"""A flagrant copy of Connect4, from game.py
It's simplified, so that moves and utility are calculated as needed
Play Connect4 on an h x v board, with Max (first player) playing 'R'.
A state has the player to move and a board, in the form of
a dict of {(x, y): Player} entries, where Player is 'R' or 'B'."""
def __init__(self, h=4, v=4, k=4):
self.h = h
self.v = v
self.k = k
self.initial = GameState(to_move='R', board={})
def actions(self, state):
try:
return state.moves
except:
pass
moves = []
for x in range(1, self.v + 1):
for y in range(self.h, 0, -1):
if (y, x) not in state.board.keys():
moves.append((y, x))
break
state.moves = moves
return moves
# defines the order of play
def opponent(self, player):
if player == 'R':
return 'B'
if player == 'B':
return 'R'
return None
def result(self, state, move):
if move not in self.actions(state):
return state # Illegal move has no effect
board = state.board.copy()
player = state.to_move
board[move] = player
next_mover = self.opponent(player)
return GameState(to_move=next_mover, board=board)
def utility(self, state, player):
"Return the value to player; 1 for win, -1 for loss, 0 otherwise."
try:
return state.utility if player == 'R' else -state.utility
except:
pass
board = state.board
util = self.check_win(board, 'R')
if util == 0:
util = -self.check_win(board, 'B')
state.utility = util
return util if player == 'R' else -util
# Did I win?
def check_win(self, board, player):
# check rows
for y in range(1, self.v + 1):
if self.k_in_row(board, (1,y), player, (1,0)):
return 1
# check columns
for x in range(1, self.h + 1):
if self.k_in_row(board, (x,1), player, (0,1)):
return 1
# check \ diagonal
if self.k_in_row(board, (1,1), player, (1,1)):
return 1
# check / diagonal
if self.k_in_row(board, (3,1), player, (-1,1)):
return 1
return 0
# does player have K in a row? return 1 if so, 0 if not
def k_in_row(self, board, start, player, direction):
"Return true if there is a line through start on board for player."
(delta_x, delta_y) = direction
x, y = start
n = 0 # n is number of moves in row
while board.get((x, y)) == player:
n += 1
x, y = x + delta_x, y + delta_y
x, y = start
while board.get((x, y)) == player:
n += 1
x, y = x - delta_x, y - delta_y
n -= 1 # Because we counted start itself twice
return n >= self.k
def terminal_test(self, state):
"A state is terminal if it is won or there are no empty squares."
return self.utility(state, 'R') != 0 or len(self.actions(state)) == 0
def display(self, state):
board = state.board
for x in range(1, self.h + 1):
for y in range(1, self.v + 1):
print(board.get((x, y), '_'), end=' ')
print()
myGame = FlagrantCopy()
won = GameState(
to_move = 'B',
board = {(1,1): 'R', (2,1): 'R', (3,1): 'R', (4,1): 'R',
(2,2): 'B', (3,2): 'B', (4,2): 'B',
},
label = 'won'
)
winin1 = GameState(
to_move = 'R',
board = {(2,1): 'R', (3,1): 'R', (4,1): 'R',
(2,2): 'B', (3,2): 'B', (4,2): 'B',
},
label = 'winin1'
)
losein1 = GameState(
to_move = 'B',
board = {(4,1): 'R', (3,1): 'R', (2,1): 'R', (4,3): 'R',
(2,2): 'B', (3,2): 'B', (4,2): 'B',
},
label = 'losein1'
)
winin3 = GameState(
to_move = 'R',
board = {(4,1): 'R', (3,1): 'R',
(3,2): 'B', (4,2): 'B',
},
label = 'winin3'
)
losein3 = GameState(
to_move = 'B',
board = {(4,1): 'R', (4,3): 'R',
(3,2): 'B', (4,2): 'B',
},
label = 'losein3'
)
winin5 = GameState(
to_move = 'R',
board = {(4,1): 'R', (3,1): 'R',
(4,2): 'B',
},
label = 'winin5'
)
lost = GameState(
to_move = 'R',
board = {(4,1): 'R', (3,1): 'R', (2,1): 'R', (4,3): 'R',
(4,2): 'B', (3,2): 'B', (2,2): 'B', (1,2): 'B',
},
label = 'lost'
)
#
# class TemplateState: # one way to define the state of a minimal game.
#
# def __init__(self, player): # add parameters as needed.
# self.to_move = player
# self.label = str(id(self)) # change this to something easier to read
# # add code and self.variables as needed.
#
# def __str__(self): # use this exact signature
# return self.label
#
# # class TemplateAction:
# # '''
# # It is not necessary to define an action.
# # Start with actions as simple as a label (e.g., 'Down')
# # or a pair of coordinates (e.g., (1,2)).
# #
# # Don't un-comment this until you already have a working game,
# # and want to play smarter.
# # '''
# # def __lt__(self, other): # use this exact signature
# # # return True when self is a better move than other.
# # return False
#
# class TemplateGame(Game):
# '''
# This is a minimal Game definition,
# the shortest implementation I could run without errors.
# '''
#
# def __init__(self, initial): # add parameters if needed.
# self.initial = initial
# # add code and self.variables if needed.
#
# def actions(self, state): # use this exact signature.
# acts = []
# # append all moves, which are legal in this state,
# # to the list of acts.
# return acts
#
# def result(self, state, move): # use this exact signature.
# newState = deepcopy(state)
# # use the move to modify the newState
# return newState
#
# def terminal_test(self, state): # use this exact signature.
# # return True only when the state of the game is over.
# return True
#
# def utility(self, state, player): # use this exact signature.
# ''' return:
# >0 if the player is winning,
# <0 if the player is losing,
# 0 if the state is a tie.
# '''
# return 0
#
# def display(self, state): # use this exact signature.
# # pretty-print the game state, using ASCII art,
# # to help a human player understand his options.
# print(state)
#
# tg = TemplateGame(TemplateState('A')) # this is the game we play interactively.
myGames = {
myGame: [
won,
winin1, losein1, winin3, losein3, winin5,
lost,
],
# tg: [
# # these are the states we tabulate when we test AB(1), AB(2), etc.
# TemplateState('B'),
# TemplateState('C'),
# ]
}
|
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras-based gated feedforward layer."""
# pylint: disable=g-classes-have-attributes
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function
import gin
import tensorflow as tf
@tf.keras.utils.register_keras_serializable(package="Text")
@gin.configurable
class GatedFeedforward(tf.keras.layers.Layer):
"""Gated linear feedforward layer.
This layer follows the paper "GLU Variants Improve Transformer"
(https://arxiv.org/abs/2002.05202). In additional, it allows to stack
multiple feedforward blocks and specify the position of dropout layer.
Arguments:
intermediate_size: Size of the intermediate layer.
intermediate_activation: Activation for the intermediate layer.
dropout: Dropout probability for the output dropout.
use_gate: Whether to use gated linear units. If True, assuming `GELU` as
the activation and omitting bias, will apply
`GEGLU(x, W, V, W_2) = (GEGLU(xW) * xV)W2`; if False, will follow
"Attention Is All You Need" (https://arxiv.org/abs/1706.03762) paper
and apply `FFN(x, W, W_2) = GELU(xW_1)W_2.`
num_blocks: The number of feedforward blocks to stack. Each block contains
a (gated) linear layer and a fully connected layer followed by dropout,
layer norm and residual.
dropout_position: Where to apply the dropout, the value can be either
`before_residual` or `after_residual`. If `before_residual`, will apply
`layer_output = layer_norm(dropout(layer_output) + layer_input)`;
if `after residual`, will apply
`layer_output = dropout(layer_norm(layer_output + layer_input))`.
kernel_initializer: Initializer for dense layer kernels.
bias_initializer: Initializer for dense layer biases.
kernel_regularizer: Regularizer for dense layer kernels.
bias_regularizer: Regularizer for dense layer biases.
activity_regularizer: Regularizer for dense layer activity.
kernel_constraint: Constraint for dense layer kernels.
bias_constraint: Constraint for dense layer kernels.
"""
def __init__(self,
intermediate_size,
intermediate_activation,
dropout,
use_gate=True,
num_blocks=1,
dropout_position="before_residual",
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(GatedFeedforward, self).__init__(**kwargs)
self._intermediate_size = intermediate_size
self._intermediate_activation = intermediate_activation
self._dropout = dropout
self._use_gate = use_gate
self._num_blocks = num_blocks
self._dropout_position = dropout_position
if self._dropout_position not in ("before_residual", "after_residual"):
raise ValueError(
"The dropout_position should be either `before_residual` or"
"`after_residual`, got: %s" % self._dropout_position)
self._kernel_initializer = tf.keras.initializers.get(kernel_initializer)
self._bias_initializer = tf.keras.initializers.get(bias_initializer)
self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)
self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer)
self._activity_regularizer = tf.keras.regularizers.get(activity_regularizer)
self._kernel_constraint = tf.keras.constraints.get(kernel_constraint)
self._bias_constraint = tf.keras.constraints.get(bias_constraint)
def build(self, input_shape):
hidden_size = input_shape.as_list()[-1]
common_kwargs = dict(
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activity_regularizer=self._activity_regularizer,
kernel_constraint=self._kernel_constraint,
bias_constraint=self._bias_constraint)
self._intermediate_dense = []
self._intermediate_activation_layers = []
self._gate_dense = []
self._output_dense = []
self._output_dropout = []
self._output_layer_norm = []
activation_policy = tf.keras.mixed_precision.experimental.global_policy()
if activation_policy.name == "mixed_bfloat16":
# bfloat16 causes BERT with the LAMB optimizer to not converge
# as well, so we use float32.
# TODO(b/154538392): Investigate this.
activation_policy = tf.float32
for i in range(self._num_blocks):
self._intermediate_dense.append(
tf.keras.layers.experimental.EinsumDense(
"abc,cd->abd",
output_shape=(None, self._intermediate_size),
bias_axes="d",
name="intermediate_%d" % i,
**common_kwargs))
self._intermediate_activation_layers.append(tf.keras.layers.Activation(
self._intermediate_activation, dtype=activation_policy))
if self._use_gate:
self._gate_dense.append(
tf.keras.layers.experimental.EinsumDense(
"abc,cd->abd",
output_shape=(None, self._intermediate_size),
bias_axes="d",
name="gate_%d" % i,
**common_kwargs))
self._output_dense.append(
tf.keras.layers.experimental.EinsumDense(
"abc,cd->abd",
output_shape=(None, hidden_size),
bias_axes="d",
name="output_%d" % i,
**common_kwargs))
self._output_dropout.append(
tf.keras.layers.Dropout(rate=self._dropout))
# Use float32 in layernorm for numeric stability.
self._output_layer_norm.append(
tf.keras.layers.LayerNormalization(
name="output_layer_norm_%d" % i,
axis=-1,
epsilon=1e-12,
dtype=tf.float32))
def get_config(self):
config = {
"intermediate_size":
self._intermediate_size,
"intermediate_activation":
self._intermediate_activation,
"dropout":
self._dropout,
"use_gate":
self._use_gate,
"num_blocks":
self._num_blocks,
"dropout_position":
self._dropout_position,
"kernel_initializer":
tf.keras.initializers.serialize(self._kernel_initializer),
"bias_initializer":
tf.keras.initializers.serialize(self._bias_initializer),
"kernel_regularizer":
tf.keras.regularizers.serialize(self._kernel_regularizer),
"bias_regularizer":
tf.keras.regularizers.serialize(self._bias_regularizer),
"activity_regularizer":
tf.keras.regularizers.serialize(self._activity_regularizer),
"kernel_constraint":
tf.keras.constraints.serialize(self._kernel_constraint),
"bias_constraint":
tf.keras.constraints.serialize(self._bias_constraint)
}
base_config = super(GatedFeedforward, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs):
layer_output = inputs
for i in range(self._num_blocks):
layer_input = layer_output
intermediate_output = self._intermediate_dense[i](layer_input)
intermediate_output = self._intermediate_activation_layers[i](
intermediate_output)
if self._use_gate:
gated_linear = self._gate_dense[i](layer_input)
intermediate_output = intermediate_output * gated_linear
layer_output = self._output_dense[i](intermediate_output)
if self._dropout_position == "before_residual":
layer_output = self._output_dropout[i](layer_output)
# During mixed precision training, `layer_input` may be from layer norm.
# If so, it is always fp32. Cast layer_output to fp32 for the subsequent
# add.
if layer_input.dtype == tf.float32:
layer_output = tf.cast(layer_output, tf.float32)
layer_output = self._output_layer_norm[i](layer_output + layer_input)
if self._dropout_position == "after_residual":
layer_output = self._output_dropout[i](layer_output)
return layer_output
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, IO, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._cloud_service_role_instances_operations import build_delete_request_initial, build_get_instance_view_request, build_get_remote_desktop_file_request, build_get_request, build_list_request, build_rebuild_request_initial, build_reimage_request_initial, build_restart_request_initial
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class CloudServiceRoleInstancesOperations:
"""CloudServiceRoleInstancesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2020_10_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
role_instance_name: str,
resource_group_name: str,
cloud_service_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
role_instance_name=role_instance_name,
resource_group_name=resource_group_name,
cloud_service_name=cloud_service_name,
subscription_id=self._config.subscription_id,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances/{roleInstanceName}'} # type: ignore
@distributed_trace_async
async def begin_delete(
self,
role_instance_name: str,
resource_group_name: str,
cloud_service_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a role instance from a cloud service.
:param role_instance_name: Name of the role instance.
:type role_instance_name: str
:param resource_group_name:
:type resource_group_name: str
:param cloud_service_name:
:type cloud_service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
role_instance_name=role_instance_name,
resource_group_name=resource_group_name,
cloud_service_name=cloud_service_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances/{roleInstanceName}'} # type: ignore
@distributed_trace_async
async def get(
self,
role_instance_name: str,
resource_group_name: str,
cloud_service_name: str,
expand: Optional[str] = "instanceView",
**kwargs: Any
) -> "_models.RoleInstance":
"""Gets a role instance from a cloud service.
:param role_instance_name: Name of the role instance.
:type role_instance_name: str
:param resource_group_name:
:type resource_group_name: str
:param cloud_service_name:
:type cloud_service_name: str
:param expand: The expand expression to apply to the operation. The default value is
"instanceView".
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RoleInstance, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2020_10_01_preview.models.RoleInstance
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RoleInstance"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
role_instance_name=role_instance_name,
resource_group_name=resource_group_name,
cloud_service_name=cloud_service_name,
subscription_id=self._config.subscription_id,
expand=expand,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RoleInstance', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances/{roleInstanceName}'} # type: ignore
@distributed_trace_async
async def get_instance_view(
self,
role_instance_name: str,
resource_group_name: str,
cloud_service_name: str,
**kwargs: Any
) -> "_models.RoleInstanceView":
"""Retrieves information about the run-time state of a role instance in a cloud service.
:param role_instance_name: Name of the role instance.
:type role_instance_name: str
:param resource_group_name:
:type resource_group_name: str
:param cloud_service_name:
:type cloud_service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RoleInstanceView, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2020_10_01_preview.models.RoleInstanceView
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RoleInstanceView"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_instance_view_request(
role_instance_name=role_instance_name,
resource_group_name=resource_group_name,
cloud_service_name=cloud_service_name,
subscription_id=self._config.subscription_id,
template_url=self.get_instance_view.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RoleInstanceView', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_instance_view.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances/{roleInstanceName}/instanceView'} # type: ignore
@distributed_trace
def list(
self,
resource_group_name: str,
cloud_service_name: str,
expand: Optional[str] = "instanceView",
**kwargs: Any
) -> AsyncIterable["_models.RoleInstanceListResult"]:
"""Gets the list of all role instances in a cloud service. Use nextLink property in the response
to get the next page of role instances. Do this till nextLink is null to fetch all the role
instances.
:param resource_group_name:
:type resource_group_name: str
:param cloud_service_name:
:type cloud_service_name: str
:param expand: The expand expression to apply to the operation. The default value is
"instanceView".
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RoleInstanceListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2020_10_01_preview.models.RoleInstanceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RoleInstanceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
cloud_service_name=cloud_service_name,
subscription_id=self._config.subscription_id,
expand=expand,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
resource_group_name=resource_group_name,
cloud_service_name=cloud_service_name,
subscription_id=self._config.subscription_id,
expand=expand,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("RoleInstanceListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances'} # type: ignore
async def _restart_initial(
self,
role_instance_name: str,
resource_group_name: str,
cloud_service_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_restart_request_initial(
role_instance_name=role_instance_name,
resource_group_name=resource_group_name,
cloud_service_name=cloud_service_name,
subscription_id=self._config.subscription_id,
template_url=self._restart_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_restart_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances/{roleInstanceName}/restart'} # type: ignore
@distributed_trace_async
async def begin_restart(
self,
role_instance_name: str,
resource_group_name: str,
cloud_service_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""The Reboot Role Instance asynchronous operation requests a reboot of a role instance in the
cloud service.
:param role_instance_name: Name of the role instance.
:type role_instance_name: str
:param resource_group_name:
:type resource_group_name: str
:param cloud_service_name:
:type cloud_service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._restart_initial(
role_instance_name=role_instance_name,
resource_group_name=resource_group_name,
cloud_service_name=cloud_service_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_restart.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances/{roleInstanceName}/restart'} # type: ignore
async def _reimage_initial(
self,
role_instance_name: str,
resource_group_name: str,
cloud_service_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_reimage_request_initial(
role_instance_name=role_instance_name,
resource_group_name=resource_group_name,
cloud_service_name=cloud_service_name,
subscription_id=self._config.subscription_id,
template_url=self._reimage_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_reimage_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances/{roleInstanceName}/reimage'} # type: ignore
@distributed_trace_async
async def begin_reimage(
self,
role_instance_name: str,
resource_group_name: str,
cloud_service_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""The Reimage Role Instance asynchronous operation reinstalls the operating system on instances
of web roles or worker roles.
:param role_instance_name: Name of the role instance.
:type role_instance_name: str
:param resource_group_name:
:type resource_group_name: str
:param cloud_service_name:
:type cloud_service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._reimage_initial(
role_instance_name=role_instance_name,
resource_group_name=resource_group_name,
cloud_service_name=cloud_service_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reimage.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances/{roleInstanceName}/reimage'} # type: ignore
async def _rebuild_initial(
self,
role_instance_name: str,
resource_group_name: str,
cloud_service_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_rebuild_request_initial(
role_instance_name=role_instance_name,
resource_group_name=resource_group_name,
cloud_service_name=cloud_service_name,
subscription_id=self._config.subscription_id,
template_url=self._rebuild_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_rebuild_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances/{roleInstanceName}/rebuild'} # type: ignore
@distributed_trace_async
async def begin_rebuild(
self,
role_instance_name: str,
resource_group_name: str,
cloud_service_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""The Rebuild Role Instance asynchronous operation reinstalls the operating system on instances
of web roles or worker roles and initializes the storage resources that are used by them. If
you do not want to initialize storage resources, you can use Reimage Role Instance.
:param role_instance_name: Name of the role instance.
:type role_instance_name: str
:param resource_group_name:
:type resource_group_name: str
:param cloud_service_name:
:type cloud_service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._rebuild_initial(
role_instance_name=role_instance_name,
resource_group_name=resource_group_name,
cloud_service_name=cloud_service_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_rebuild.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances/{roleInstanceName}/rebuild'} # type: ignore
@distributed_trace_async
async def get_remote_desktop_file(
self,
role_instance_name: str,
resource_group_name: str,
cloud_service_name: str,
**kwargs: Any
) -> IO:
"""Gets a remote desktop file for a role instance in a cloud service.
:param role_instance_name: Name of the role instance.
:type role_instance_name: str
:param resource_group_name:
:type resource_group_name: str
:param cloud_service_name:
:type cloud_service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IO, or the result of cls(response)
:rtype: IO
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[IO]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_remote_desktop_file_request(
role_instance_name=role_instance_name,
resource_group_name=resource_group_name,
cloud_service_name=cloud_service_name,
subscription_id=self._config.subscription_id,
template_url=self.get_remote_desktop_file.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = response.stream_download(self._client._pipeline)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_remote_desktop_file.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances/{roleInstanceName}/remoteDesktopFile'} # type: ignore
|
|
import serial
# import time
# import sys
import platform
class sms_motor(object):
def __init__(self, motorId, resolution_bits, limits, homing_position_ticks):
self.motorId = motorId
self.resolution_bits = resolution_bits
self.limits = limits
self.homing_position_ticks = homing_position_ticks
@staticmethod
def checksum(cmd, bc, x):
ret = 0
ret ^= (cmd ^ bc)
for i in range(0, bc):
ret ^= x[i]
return ret
@staticmethod
def getAnalogInResponse():
success = False
b = ser.read(6)
# The first 6 bytes are useless for us (2 header bytes + addressed and owned id + command id and byte count = 6 bytes)
ain = [0, 0, 0, 0]
if ord(b[5]) != 4:
return success, ain
success = True
for i in range(0, 4):
shift = 0
for j in range(0, 2):
b += ser.read()
ain[i] += (ord(b[2 * i + j + 6]) & 0xFF) << shift
shift += 8
b += ser.read()
# necessary for not leaving any garbage in the serial read interface (we have to read the lrc byte)
return success, ain
@staticmethod
def getDigitalIOResponse():
success = False
b = ser.read(6)
dio1 = 0
dio2 = 0
dio3 = 0
# The first 6 bytes are useless for us (2 header bytes + addressed and owned id + command id and byte count = 6 bytes)
if ord(b[5]) != 1:
return success, dio1, dio2, dio3
else:
success = True
b += ser.read()
dio1 = ord(b[6]) & 0x01
dio2 = ord(b[6]) & 0x02
dio3 = ord(b[6]) & 0x04
b += ser.read()
# necessary for not leaving any garbage in the serial read interface (we have to read the lrc byte)
return success, dio1, dio2, dio3
@staticmethod
def getResponse():
carrying_data = False
b = ser.read(6)
# The first 6 bytes are useless for us (2 header bytes + addressed and owned id + command id and byte count = 6 bytes)
num = 0
print("Read 6 bytes and byte count is:", ord(b[5]))
for i in range(0, ord(b[5])):
carrying_data = True
b += ser.read()
shift = i * 8
num += (ord(b[i + 6]) & 0xFF) << shift
b += ser.read()
# necessary for not leaving any garbage in the serial read interface (we have to read the lrc byte)
return carrying_data, num
# @staticmethod
# def print_message():
# print("Usage: [sudo] python SMSLibrary.py port_number command [Node Id] [value]\n")
# print("Commands are: start [Node Id], stop [Node Id], reset [Node Id], startall, stopall, resetall,\n")
# print("getvel [Node Id], getacc [Node Id], getpos [Node Id],\n")
# print("move [Node Id] [start] [goal], setvel [Node Id] [value], setacc [Node Id] [value], test [Node Id]")
def send8Bytes(self, cmd_id, value):
data = {}
data[7] = (value >> 56) & 0xff
data[6] = (value >> 48) & 0xff
data[5] = (value >> 40) & 0xff
data[4] = (value >> 32) & 0xff
data[3] = (value >> 24) & 0xff
data[2] = (value >> 16) & 0xff
data[1] = (value >> 8) & 0xff
data[0] = value & 0xff
lrc = self.checksum(cmd_id, 8, data)
command = bytearray(
[h_0, h_1, self.motorId, '\x01', chr(cmd_id), '\x08', data[0], data[1], data[2], data[3], data[4], data[5], data[6],
data[7], lrc])
ser.write(command)
def send4Bytes(self, cmd_id, value):
data = {}
data[3] = (value >> 24) & 0xff
data[2] = (value >> 16) & 0xff
data[1] = (value >> 8) & 0xff
data[0] = value & 0xff
lrc = self.checksum(cmd_id, 4, data)
command = bytearray(
[h_0, h_1, self.motorId, '\x01', chr(cmd_id), '\x04', data[0], data[1], data[2], data[3], lrc])
ser.write(command)
def send2Bytes(self, cmd_id, value):
data = {}
data[1] = (value >> 8) & 0xff
data[0] = value & 0xff
lrc = self.checksum(cmd_id, 2, data)
command = bytearray([h_0, h_1, self.motorId, '\x01', chr(cmd_id), '\x02', data[0], data[1], lrc])
ser.write(command)
def sendByte(self, cmd_id, value):
lrc = self.checksum(cmd_id, 1, value)
command = bytearray([h_0, h_1, self.motorId, '\x01', chr(cmd_id), '\x01', value, lrc])
ser.write(command)
def sendCommand(self, cmd_id):
command = bytearray([h_0, h_1, self.motorId, '\x01', chr(cmd_id), '\x00', chr(cmd_id)])
ser.write(command)
###############################################################################################################################
# Important! In Set Commands we return the not getResponse()[0] because getResponse() returns as it's first argument a boolean
# variable, carrying_data, which represents if the response carries any data. Because the response of the Set Commands
# doesn't carry any data, the carrying_data will be False (but the response to the command was successful).
# So we negate the getResponse()[0] to give as True for these commands.
###############################################################################################################################
def setPIDgainP(self, val):
self.send2Bytes(0, val)
return not self.getResponse()[0]
def setPIDgainI(self, val):
self.send2Bytes(1, val)
return not self.getResponse()[0]
def setPIDgainD(self, val):
self.send2Bytes(2, val)
return not self.getResponse()[0]
def setProfileAcceleration(self, val):
self.send4Bytes(3, val)
return not self.getResponse()[0]
def setProfileConstantVelocity(self, val):
self.send4Bytes(4, val)
return not self.getResponse()[0]
def setCurrentLimit(self, val):
self.send2Bytes(5, val)
return not self.getResponse()[0]
def setDurationForCurrentLimit(self, val):
self.send2Bytes(6, val)
return not self.getResponse()[0]
def moveWithVelocity(self, val):
self.send4Bytes(7, val)
return not self.getResponse()[0]
def moveToAbsolutePosition(self, pos):
self.send8Bytes(8, pos)
return not self.getResponse()[0]
def moveToRelativePosition(self, pos):
self.send8Bytes(9, pos)
return not self.getResponse()[0]
def profiledMoveWithVelocity(self, val):
self.send4Bytes(10, val)
return not self.getResponse()[0]
def profiledMoveToAbsolutePosition(self, pos):
self.send8Bytes(11, pos)
return not self.getResponse()[0]
def profiledMoveToRelativePosition(self, pos):
self.send8Bytes(12, pos)
return not self.getResponse()[0]
def setVelocitySetpoint(self, val):
self.send4Bytes(13, val)
return not self.getResponse()[0]
def setAbsolutePositionSetpoint(self, pos):
self.send8Bytes(14, pos)
return not self.getResponse()[0]
def setRelativePositionSetpoint(self, pos):
self.send8Bytes(15, pos)
return not self.getResponse()[0]
def setProfiledVelocitySetpoint(self, val):
self.send4Bytes(16, val)
return not self.getResponse()[0]
def setProfiledAbsolutePositionSetpoint(self, pos):
self.send8Bytes(17, pos)
return not self.getResponse()[0]
def setProfiledRelativePositionSetpoint(self, pos):
self.send8Bytes(18, pos)
return not self.getResponse()[0]
def configureDigitalIOs(self, dio1, dio2, dio3):
data = {}
data[0] = 0
if (dio1):
data[0] |= 0x01
if (dio2):
data[0] |= 0x02
if (dio3):
data[0] |= 0x04
self.sendByte(19, data[0])
return not self.getResponse()[0]
def setDigitalOutputs(self, dio1, dio2, dio3):
data = {}
data[0] = 0
if (dio1):
data[0] |= 0x01
if (dio2):
data[0] |= 0x02
if (dio3):
data[0] |= 0x04
self.sendByte(20, data[0])
return not self.getResponse()[0]
def setNodeID(self, oldNodeId, newNodeId):
data = {}
data[0] = newNodeId
lrc = self.checksum(21, 1, data)
command = bytearray(['\x55', '\xAA', oldNodeId, '\x01', '\x15', '\x01', data[0], lrc])
ser.write(command)
return not self.getResponse()[0]
def resetIncrementalPosition(self):
self.sendCommand(24)
return not self.getResponse()[0]
def start(self):
self.sendCommand(25)
# time.sleep(0.02)
return not self.getResponse()[0]
def halt(self):
self.sendCommand(26)
# time.sleep(0.02)
return not self.getResponse()[0]
def stop(self):
self.sendCommand(27)
# time.sleep(0.02)
return not self.getResponse()[0]
def resetErrors(self):
self.sendCommand(30)
# time.sleep(0.02)
return not self.getResponse()[0]
def getPIDgainP(self):
self.sendCommand(100)
return self.getResponse()
def getPIDgainI(self):
self.sendCommand(101)
return self.getResponse()
def getPIDgainD(self):
self.sendCommand(102)
return self.getResponse()
def getProfileAcceleration(self):
self.sendCommand(103)
return self.getResponse()
def getProfileConstantVelocity(self):
self.sendCommand(104)
return self.getResponse()
def getCurrentLimit(self):
self.sendCommand(105)
return self.getResponse()
def getCurrentLimitDuration(self):
self.sendCommand(106)
return self.getResponse()
def getDigitalIOConfiguration(self, dio):
self.sendCommand(107)
return self.getDigitalIOResponse()
def getDigitalIn(self, din):
self.sendCommand(109)
return self.getDigitalIOResponse()
def getAnalogIn(self, ain):
self.sendCommand(110)
return self.getAnalogInResponse()
def getPosition(self):
self.sendCommand(111)
return self.getResponse()
def getAbsolutePosition(self):
self.sendCommand(112)
return self.getResponse()
def getVelocity(self):
self.sendCommand(113)
return self.getResponse()
def getCurrent(self):
self.sendCommand(114)
return self.getResponse()
def broadcastDoMove():
command = bytearray([0x55, 0xAA, 0x00, 0x01, 0xC8, 0x00, 0xC8])
ser.write(command)
return True
def broadcastStart():
command = bytearray([0x55, 0xAA, 0x00, 0x01, 0xC9, 0x00, 0xC9])
ser.write(command)
return True
def broadcastHalt():
command = bytearray([0x55, 0xAA, 0x00, 0x01, 0xCA, 0x00, 0xCA])
ser.write(command)
return True
def broadcastStop():
command = bytearray([0x55, 0xAA, 0x00, 0x01, 0xCB, 0x00, 0xCB])
ser.write(command)
return True
def init(port):
global ser, h_0, h_1
h_0 = '\x55'
h_1 = '\xAA'
port_string = ''
if(platform.system() == "Windows"):
port_string = 'COM%d' % port
elif(platform.system() == "Linux"):
port_string = '/dev/ttyUSB%d' % port
ser = serial.Serial(port_string, 57600, serial.EIGHTBITS, serial.PARITY_NONE, serial.STOPBITS_ONE)
def shut_down(port):
global ser
if ser.isOpen():
ser.close()
exit(0)
# def startall(startId, stopId):
# for i in range(startId, stopId + 1):
# start(i)
# def stopall(startId, stopId):
# for i in range(startId, stopId + 1):
# stop(i)
# def resetall(startId, stopId):
# for i in range(startId, stopId + 1):
# print(resetErrors(i))
# def main():
# if (len(sys.argv) < 3):
# print("Too few input arguments.\n")
# print_message()
# if(len(sys.argv) < 2):
# exit(0)
# else:
# shut_down(int(sys.argv[1]))
# elif (len(sys.argv) > 5):
# print("Too many input arguments.\n")
# print_message()
# shut_down(int(sys.argv[1]))
# init(int(sys.argv[1]))
# if (sys.argv[2] == "getvel"):
# ret = getVelocity(int(sys.argv[3]))
# for b in ret:
# print(ord(b))
# shut_down(int(sys.argv[1]))
# if (sys.argv[2] == "getpos"):
# mid = int(sys.argv[3])
# pos = getPosition(mid)
# print(pos[0], float(pos[1]))
# shut_down(int(sys.argv[1]))
# if (sys.argv[2] == "getacc"):
# ret = getProfileAcceleration(int(sys.argv[3]))
# for b in ret:
# print(ord(b))
# shut_down(int(sys.argv[1]))
# if (sys.argv[2] == "setvel"):
# setProfileConstantVelocity(int(sys.argv[3]), int(sys.argv[4]))
# shut_down(int(sys.argv[1]))
# if (sys.argv[2] == "setacc"):
# setProfileAcceleration(int(sys.argv[3]), int(sys.argv[4]))
# shut_down(int(sys.argv[1]))
# if (sys.argv[2] == "startall"):
# broadcastStart()
# shut_down(int(sys.argv[1]))
# if (sys.argv[2] == "start"):
# start(int(sys.argv[3]))
# shut_down(int(sys.argv[1]))
# if (sys.argv[2] == "stopall"):
# broadcastStop()
# shut_down(int(sys.argv[1]))
# if (sys.argv[2] == "stop"):
# stop(int(sys.argv[3]))
# shut_down(int(sys.argv[1]))
# if (sys.argv[2] == "resetall"):
# resetall(4, 6)
# shut_down(int(sys.argv[1]))
# if (sys.argv[2] == "reset"):
# resetErrors(int(sys.argv[3]))
# shut_down(int(sys.argv[1]))
# if (sys.argv[2] == "move"):
# profiledMoveToAbsolutePosition(int(sys.argv[3]), int(sys.argv[4]))
# time.sleep(0.02)
# shut_down(int(sys.argv[1]))
# if (sys.argv[2] == "test"):
# angle = 0
# direction = 1
# step = 10
# while 1:
# # move(5, 1722*angle)
# # time.sleep(0.01)
# profiledMoveToAbsolutePosition(int(sys.argv[3]), angle)
# print(angle)
# angle = angle + direction * step
# if int(angle) == 0:
# direction = 1
# if int(angle) == 15000:
# direction = -1
# time.sleep(0.03)
# if __name__ == '__main__':
# main()
|
|
#
# Copyright (c) 1996-2005, SR Research Ltd., All Rights Reserved
#
#
# For use by SR Research licencees only. Redistribution and use in source
# and binary forms, with or without modification, are NOT permitted.
#
#
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the distribution.
#
# Neither name of SR Research Ltd nor the name of contributors may be used
# to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS
# IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# $Date: 2007/08/29 18:48:21 $
#
#
#########################################
# 2017-02-12 Michael Eickenberg:
# There was an issue with the pylink27 package as stored in path append below, namely
# that it was a 32 bit version, whereas the python environment (anaconda install) was
# 64 bit. This led to a dll error referring to the fact that "this dll is not a valid win32 app blabla"
# The solution was either to downgrade anaconda to 32 bits or to use a different pylink package:
# Note that the folder C:\\Users\\Public\\Documents\\EyeLink\\SampleExperiments contains many different
# pylink versions for many different python versions. We chose "python27-amd64" to make it work.
# ****CRUCIAL****: The pacakage contains absolute imports! The package thinks it is called "pylink", so you
# have to give it that name, otherwise internal imports will attempt to load something from somewhere else.
# While this is obviously poor programming style, we have to make do with this and work around it.
# The solution in place at the moment is a copy of pylink27-amd64 in the site-packages of anaconda.
# Upon anaconda reinstall this will have to be redone.
# Alternatively, the below commented code can be uncommented and then one has to make sure that the "pylink"
# folder contains the right version of the package (not the case by default, but is the case right now.
# At eyelink reinstal this will have to be redone.)
## ad hoc addition of path to eyelink lib
## comment out if something doesnt work
## Michael Eickenberg 25/9/2015
#import sys
#sys.path.append(u'C:\\Users\\Public\\Documents\\EyeLink\\SampleExperiments\\Python')
#########################################
from pylink import *
#from eyelink import EyeLink # I don't know why this was here. The class Eyelink is in pylink. (Michael 12/02/17)
import pygame
import time
import gc
import sys
import gcwindow_movie_trials
import os
import numpy as np
##### INITITALIZE VIDEO STUFF from PLAY.PY
cwd = os.path.abspath(os.path.split(__file__)[0])
#cwd = "C:\Data\agrilopi\movies\"
types = dict(val=("val%03d_3min", "valseq3minby10_%02d.index"), trn=("trn%03d", "trnseq.index"),
eyetrack=("val%03d_3min", "val_1min.index"))
seq = [("trn", 1), ("val", 1), ("trn", 2), ("val", 2), ("trn", 3), ("val", 3), ("trn", 4), ("val",5), ("eyetrack", 3)]
### added ("val",5) for a 10 x 1-min repeatability test run
### 9/3/2012 SN
pname, session, run, subject = sys.argv
t, r = seq[int(run)-1]
impath, idxfile = types[t]
if t == "val":
impath = impath%int(session)
idxfile = idxfile % r
elif t == "trn":
impath = impath%((int(session)-1)*4+r)
else:
impath = impath % int(session)
impath = os.path.join(cwd, impath)+'/'
idxfile = os.path.join(cwd, idxfile)
print impath
print idxfile
gcwindow_movie_trials.fixationcolor = ((255, 80, 80), (80,255,80), (80, 80, 255), (255, 80, 80), (80, 255, 80), (80, 80, 255), (255, 255, 80))
gcwindow_movie_trials.fcchange = 2
gcwindow_movie_trials.show_hz = 15
gcwindow_movie_trials.tfactor = 1.0000 #1.03365
# gcwindow_movie_trials.show(impath, idxfile)
############## END play INIT
## try showing the movie here, without eyetracker. This works, so it is commented
# pygame.init()
# surface = pygame.display.set_mode((800, 600), pygame.FULLSCREEN | pygame.RLEACCEL, 32)
surface = pygame.display.set_mode((800, 600), pygame.RLEACCEL, 32)
# gcwindow_movie_trials.do_trial(impath, idxfile, surface)
# sys.exit()
## end trying to show the movie
spath = os.path.dirname(sys.argv[0])
if len(spath) !=0: os.chdir(spath)
eyelinktracker = EyeLink()
#Here is the starting point of the experiment
#Initializes the graphics
pygame.init()
pygame.display.init()
# pygame.display.set_mode((800, 600), pygame.FULLSCREEN |pygame.DOUBLEBUF |pygame.RLEACCEL|pygame.HWSURFACE ,32)
pygame.display.set_mode((800, 600), pygame.FULLSCREEN | pygame.RLEACCEL, 32)
pylink.openGraphics()
#Opens the EDF file.
edfFileName = "TEST.EDF";
edfFileName = subject[:5] + '%03d.edf'
edffilepath = os.path.join(edfFileName)
filecounter = 0
while os.path.exists(edffilepath % filecounter):
filecounter += 1
edffilepath = edffilepath % filecounter
print edffilepath
getEYELINK().openDataFile(edffilepath)
pylink.flushGetkeyQueue();
getEYELINK().setOfflineMode();
#Gets the display surface and sends a mesage to EDF file;
surf = pygame.display.get_surface()
getEYELINK().sendCommand("screen_pixel_coords = 0 0 %d %d" %(surf.get_rect().w, surf.get_rect().h))
getEYELINK().sendMessage("DISPLAY_COORDS 0 0 %d %d" %(surf.get_rect().w, surf.get_rect().h))
tracker_software_ver = 0
eyelink_ver = getEYELINK().getTrackerVersion()
if eyelink_ver == 3:
tvstr = getEYELINK().getTrackerVersionString()
vindex = tvstr.find("EYELINK CL")
tracker_software_ver = int(float(tvstr[(vindex + len("EYELINK CL")):].strip()))
if eyelink_ver>=2:
getEYELINK().sendCommand("select_parser_configuration 0")
if eyelink_ver == 2: #turn off scenelink camera stuff
getEYELINK().sendCommand("scene_camera_gazemap = NO")
else:
getEYELINK().sendCommand("saccade_velocity_threshold = 35")
getEYELINK().sendCommand("saccade_acceleration_threshold = 9500")
# set EDF file contents
getEYELINK().sendCommand("file_event_filter = LEFT,RIGHT,FIXATION,SACCADE,BLINK,MESSAGE,BUTTON")
if tracker_software_ver>=4:
getEYELINK().sendCommand("file_sample_data = LEFT,RIGHT,GAZE,AREA,GAZERES,STATUS,HTARGET")
else:
getEYELINK().sendCommand("file_sample_data = LEFT,RIGHT,GAZE,AREA,GAZERES,STATUS")
# set link data (used for gaze cursor)
getEYELINK().sendCommand("link_event_filter = LEFT,RIGHT,FIXATION,SACCADE,BLINK,BUTTON")
if tracker_software_ver>=4:
getEYELINK().sendCommand("link_sample_data = LEFT,RIGHT,GAZE,GAZERES,AREA,STATUS,HTARGET")
else:
getEYELINK().sendCommand("link_sample_data = LEFT,RIGHT,GAZE,GAZERES,AREA,STATUS")
# pylink.setCalibrationColors( (0, 0, 0),(255, 255, 255)); #Sets the calibration target and background color
pylink.setCalibrationColors( (0, 0, 0),(140, 140, 140)); #Sets the calibration target and background color
pylink.setTargetSize(int(surf.get_rect().w/70), int(surf.get_rect().w/300)); #select best size for calibration target
pylink.setCalibrationSounds("", "", "");
pylink.setDriftCorrectSounds("", "off", "off");
out_file = "measures.npz"
if(getEYELINK().isConnected() and not getEYELINK().breakPressed()):
# gcwindow_movie_trials.run_trials(surf)
measures = gcwindow_movie_trials.start_trials(impath, idxfile, surf,
fixationcross=(0, 1, 1, 1), eyetrackercross=(1, 1, 1, 0))
contents = {}
if os.path.exists(out_file):
f = np.load(out_file)
for key in f.files:
contents[key] = f[key]
contents[edffilepath[:-4]] = measures
np.savez(out_file, **contents)
if getEYELINK() != None:
# File transfer and cleanup!
getEYELINK().setOfflineMode();
msecDelay(500);
#Close the file and transfer it to Display PC
getEYELINK().closeDataFile()
getEYELINK().receiveDataFile(edffilepath, edffilepath)
getEYELINK().close();
#Close the experiment graphics
pylink.closeGraphics()
pygame.display.quit()
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""cond_v2 and gradient.
This is a version of cond that emits a single If op, as well as the gradient
function for If ops produced by cond_v2. This will eventually replace the
current tf.cond implementation once it reaches feature and performance parity.
NOTE: most users of cond_v2 should import cond_v2, not this module! This module
does not contain all the necessary imports to prevent circular dependencies,
while cond_v2 does.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import gen_functional_ops
# The following modules cannot be imported directly because they cause circular
# dependencies. These are set in each corresponding module.
_function = None
_function_def_to_graph = None
_gradients_impl = None
# NOTE(skyewm): TensorFlow uses protected class methods and fields to signify
# that they aren't part of the official public API. These protected members
# often need to be used by implementation code however. Rather than litter the
# code with pylint comments, we ignore protected access violations for
# readability.
# pylint: disable=protected-access
def cond_v2(pred, true_fn, false_fn, name="cond"):
"""Like tf.cond, except emits a single If op."""
if not name:
name = "cond"
with ops.name_scope(name) as scope:
with ops.name_scope(None):
# Find the outer most graph for uniquing function names.
# TODO(jpienaar): Make this work in eager mode.
graph = ops.get_default_graph()
while isinstance(graph, _function.FuncGraph):
graph = graph.outer_graph
true_name = graph.unique_name(("%strue" % scope).replace("/", "_"))
false_name = graph.unique_name(("%sfalse" % scope).replace("/", "_"))
true_graph = _function.func_graph_from_py_func(
true_name, true_fn, [], {})
false_graph = _function.func_graph_from_py_func(
false_name, false_fn, [], {})
_check_same_outputs(true_graph, false_graph)
# Add inputs to true_graph and false_graph to make them match. Note that
# this modifies true_graph and false_graph.
cond_inputs = _make_inputs_match(true_graph, false_graph,
true_graph.external_captures,
false_graph.external_captures)
# Add all intermediate tensors as function outputs so they're available for
# the gradient computation.
true_intermediates = _get_intermediates(true_graph)
false_intermediates = _get_intermediates(false_graph)
# Save the original number of outputs to return to the caller.
num_cond_outputs = len(true_graph.outputs)
# Make the number/type of new intermediate outputs match.
extra_true_outputs, extra_false_outputs = _pad_params(
true_graph, false_graph, true_intermediates, false_intermediates)
true_graph.outputs.extend(extra_true_outputs)
false_graph.outputs.extend(extra_false_outputs)
# Create the If op.
tensors = gen_functional_ops._if( # pylint: disable=protected-access
pred, cond_inputs, [t.dtype for t in true_graph.outputs],
_create_new_tf_function(true_graph),
_create_new_tf_function(false_graph),
name=scope)
# Set the flag to enable lowering on the `if` op if necessary
# Lowering allows cond_v2 to avoid some of the limitations of Functions,
# allowing users to specify devices & colocation inside of cond_v2 branches,
# and enabling non-strict evaluation & partial pruning of cond_v2 branches.
# This brings cond_v2 closer to feature parity with tf.cond.
#
# However, we do not lower `If` in the XLA context because it is easier for
# XLA to apply its own optimizations when dealing with un-lowered `If`
# operators than with lowered switch/merge control flow.
#
# TODO(b/110167197) this approach requires cond_v2 to have at least 1 output
if_op = tensors[0].op
if not control_flow_util.IsInXLAContext(if_op):
# pylint: disable=protected-access
if_op._set_attr("_lower_using_switch_merge",
attr_value_pb2.AttrValue(b=True))
# pylint: enable=protected-access
return tuple(tensors[:num_cond_outputs])
@ops.RegisterGradient("If")
def _IfGrad(op, *grads): # pylint: disable=invalid-name
"""The gradient of an If op produced by cond_v2."""
true_graph, false_graph = _get_func_graphs(op)
# Note: op.graph != ops.get_default_graph() when we are computing the gradient
# of a nested cond.
assert true_graph.outer_graph == op.graph
assert false_graph.outer_graph == op.graph
# Create grad functions that compute the gradient of the true/false forward
# graphs. These functions will capture tensors from the forward pass
# functions.
true_grad_graph = _create_grad_func(
true_graph, grads, _get_grad_fn_name(true_graph))
false_grad_graph = _create_grad_func(
false_graph, grads, _get_grad_fn_name(false_graph))
assert ([t.dtype for t in true_grad_graph.outputs] ==
[t.dtype for t in false_grad_graph.outputs])
# Resolve references to forward graph tensors in grad graphs and ensure
# they are in-scope, i.e., belong to one of outer graphs of the grad graph.
true_grad_inputs = _resolve_grad_inputs(true_graph, true_grad_graph)
false_grad_inputs = _resolve_grad_inputs(false_graph, false_grad_graph)
# Make the inputs to true_grad_graph and false_grad_graph match. Note that
# this modifies true_grad_graph and false_grad_graph.
grad_inputs = _make_inputs_match(true_grad_graph, false_grad_graph,
true_grad_inputs, false_grad_inputs)
# Add all intermediate tensors as function outputs so they're available for
# higher-order gradient computations.
true_grad_intermediates = _get_intermediates(true_grad_graph)
false_grad_intermediates = _get_intermediates(false_grad_graph)
# Save the original number of gradient outputs to return.
num_grad_outputs = len(true_grad_graph.outputs)
# Make the number/type of new intermediate outputs match.
extra_true_grad_outputs, extra_false_grad_outputs = _pad_params(
true_grad_graph, false_grad_graph,
true_grad_intermediates, false_grad_intermediates)
true_grad_graph.outputs.extend(extra_true_grad_outputs)
false_grad_graph.outputs.extend(extra_false_grad_outputs)
# Create the gradient If op.
tensors = gen_functional_ops._if(
op.inputs[0], grad_inputs, [t.dtype for t in true_grad_graph.outputs],
_create_new_tf_function(true_grad_graph),
_create_new_tf_function(false_grad_graph))
# The predicate has no gradient.
return [None] + tensors[:num_grad_outputs]
def _get_func_graphs(if_op):
"""Returns `FuncGraph`s for the input op branches.
Args:
if_op: The _If Operation.
Returns:
A 2-tuple of the `FuncGraph`s of the then_branch and else_branch.
"""
def _get_func_graph_for_branch(branch_name):
"""Generates and returns a FuncGraph for the given branch."""
inputs = if_op.inputs[1:] # First input is pred.
input_shapes = [t.shape for t in inputs]
func_name = if_op.get_attr(branch_name).name
fdef = if_op.graph._get_function(func_name).definition
# `if_op.graph` may not be the same as `ops.get_default_graph()` e.g.
# in the case of nested if ops or when the gradient is being computed
# from inside a Defun. We build the `func_graph` with `if_op.graph` as its
# `outer_graph`. This resembles how the `FuncGraph` was built in the
# forward pass. We need this so that we can resolve references to tensors
# in `func_graph` from its gradient graph in `_resolve_grad_inputs`.
with if_op.graph.as_default():
func_graph = _function_def_to_graph.function_def_to_graph(
fdef, input_shapes)
func_graph.captures = collections.OrderedDict(zip(inputs,
func_graph.inputs))
# Set the if op so that the gradient code can use it.
func_graph._if = if_op
return func_graph
return (_get_func_graph_for_branch("then_branch"),
_get_func_graph_for_branch("else_branch"))
def _grad_fn(func_graph, grads):
"""The gradient function for each conditional branch.
This function builds the gradient graph of the corresponding forward-pass
conditional branch in `func_graph`. This is done by differentiating
func_graph's outputs w.r.t. its inputs.
Args:
func_graph: function.FuncGraph. The corresponding forward-pass function.
grads: The list of input gradient Tensors.
Returns:
The output gradient Tensors.
"""
# Filter out untrainable function outputs.
# NOTE(skyewm): If we don't do this, the untrainable tensors can sometimes
# cause _GradientsHelper to raise an exception (e.g. the implementation
# doesn't expect 'ys' to contain boolean tensors).
assert len(func_graph.outputs) == len(grads)
ys = []
grad_ys = []
for y, grad_y in zip(func_graph.outputs, grads):
if not _gradients_impl._IsTrainable(y):
continue
ys.append(y)
grad_ys.append(grad_y)
# Build the gradient graph. Note that this builds the gradient computation of
# func_graph in the current graph, which requires capturing tensors from
# func_graph. The captured func_graph tensors are resolved to external tensors
# in _resolve_grad_inputs.
result = _gradients_impl._GradientsHelper(
ys, func_graph.inputs, grad_ys=grad_ys,
src_graph=func_graph)
# Functions can't return None; replace Nones with zero tensors.
# TODO(b/80444525): don't return anything here and make _IfGrad return None if
# both branches have zero gradient.
for i in range(len(result)):
if result[i] is None:
result[i] = array_ops.zeros_like(func_graph.inputs[i])
return result
def _create_grad_func(func_graph, grads, name):
"""Returns the FuncGraph representation of _grad_fn."""
return _function.func_graph_from_py_func(
name, lambda: _grad_fn(func_graph, grads), [], {})
def _resolve_grad_inputs(cond_graph, grad_graph):
"""Returns the tensors to pass as inputs to `grad_graph`.
The `grad_graph` may have external references to
1. Its outer graph containing the input gradients. These references are kept
as is.
2. Tensors in the forward pass graph. These tensors may not be "live"
when the gradient is being computed. We replace such references by their
corresponding tensor in the least common ancestor graph of `grad_graph` and
`cond_graph`. Since we export intermediate tensors for all branch
functions, this is always possible.
Args:
cond_graph: function.FuncGraph. The forward-pass function.
grad_graph: function.FuncGraph. The gradients function.
Returns:
A list of inputs tensors to be passed to grad_graph.
"""
new_inputs = []
for t in grad_graph.external_captures:
if t.graph != grad_graph.outer_graph:
# `t` is a tensor in `cond_graph` or one of its ancestors. We bubble this
# tensor to the least common ancestor of the `cond_graph` and
# `grad_graph` so that it is "in-scope" for `grad_graph`.
# TODO(srbs): `_is_ancestor` calls may be expensive. Compute the least
# common ancestor once and re-use.
assert _is_ancestor(cond_graph, t.graph)
while not _is_ancestor(grad_graph, t.graph):
assert isinstance(t.graph, _function.FuncGraph)
if t in t.graph.internal_captures:
# TODO(srbs): Consider building a map of internal_captures ->
# external_captures instead of searching for `t` twice.
t = t.graph.external_captures[t.graph.internal_captures.index(t)]
else:
# Note: All intermediate tensors are output by the If op.
# TODO(srbs): .index() calls may be expensive. Optimize.
t = t.graph._if.outputs[t.graph.outputs.index(t)]
assert _is_ancestor(grad_graph, t.graph)
new_inputs.append(t)
return new_inputs
def _create_new_tf_function(func_graph):
"""Converts func_graph to a TF_Function and adds it to the current graph.
Args:
func_graph: function.FuncGraph
Returns:
The name of the new TF_Function.
"""
func = _function._EagerDefinedFunction(
func_graph.name, func_graph, func_graph.inputs, func_graph.outputs, {})
func.add_to_graph(func_graph.outer_graph)
return func_graph.name
def _get_intermediates(func_graph):
"""Returns all tensors in `func_graph` that aren't inputs or outputs."""
intermediates = []
for op in func_graph.get_operations():
for t in op.outputs:
if t in func_graph.inputs: continue
if t in func_graph.outputs: continue
intermediates.append(t)
return intermediates
def _separate_unique_inputs(true_inputs, false_inputs):
"""Separates tensors appearing only in true_inputs or false_inputs, or both.
Args:
true_inputs: list of Tensors
false_inputs: list of Tensors
Returns:
Three lists of Tensors:
1. The tensors that appear in both true_inputs and false_inputs
2. The tensors that only appear in true_inputs
3. The tensors that only appear in false_inputs
"""
true_inputs = set(true_inputs)
false_inputs = set(false_inputs)
shared_inputs = true_inputs.intersection(false_inputs)
true_only_inputs = true_inputs - false_inputs
false_only_inputs = false_inputs - true_inputs
return list(shared_inputs), list(true_only_inputs), list(false_only_inputs)
def _pad_params(true_graph, false_graph, true_params, false_params):
"""Returns new param lists that have matching signatures.
This is done by mirroring each param list in the other using dummy params.
There is no merging of params.
Args:
true_graph: function.FuncGraph
false_graph: function.FuncGraph
true_params: a list of Tensors from true_graph
false_params: a list of Tensors from false_graph
Returns:
A new list of Tensors in true_graph and a new list of Tensors in
false_graph. The two lists have the same number of Tensors, with matching
types and shapes across the lists.
"""
new_true_params = (true_params +
_create_dummy_params(true_graph, false_params))
new_false_inputs = (_create_dummy_params(false_graph, true_params)
+ false_params)
return new_true_params, new_false_inputs
def _make_inputs_match(true_graph, false_graph, true_inputs, false_inputs):
"""Modifies true_graph and false_graph so they have the same input signature.
This method reorders and/or adds parameters to true_graph and false_graph so
they have the same input signature, and updates the 'inputs' and 'captured'
fields of both graphs accordingly. It uses the input tensors from the outer
graph to avoid duplicating shared arguments.
Args:
true_graph: function.FuncGraph
false_graph: function.FuncGraph
true_inputs: a list of Tensors in the outer graph. The inputs for
true_graph.
false_inputs: a list of Tensors in the outer graph. The inputs for
false_graph.
Returns:
A new list of Tensors from the outer graph that are the new inputs for both
true_graph and false_graph. This is a deduped version of true_inputs +
false_inputs.
"""
shared_inputs, true_only_inputs, false_only_inputs = _separate_unique_inputs(
true_inputs, false_inputs)
new_inputs = shared_inputs + true_only_inputs + false_only_inputs
true_input_to_param = dict(zip(true_inputs, true_graph.inputs))
false_input_to_param = dict(zip(false_inputs, false_graph.inputs))
true_graph.inputs = (
[true_input_to_param[t] for t in shared_inputs] +
[true_input_to_param[t] for t in true_only_inputs] +
_create_dummy_params(true_graph, false_only_inputs))
false_graph.inputs = (
[false_input_to_param[t] for t in shared_inputs] +
_create_dummy_params(false_graph, true_only_inputs) +
[false_input_to_param[t] for t in false_only_inputs])
# Rewrite the FuncGraphs' state to reflect the new inputs.
true_graph.captures = collections.OrderedDict(zip(new_inputs,
true_graph.inputs))
false_graph.captures = collections.OrderedDict(zip(new_inputs,
false_graph.inputs))
return new_inputs
def _create_dummy_params(func_graph, template_tensors):
"""Creates tensors in func_graph to represent template_tensors.
Args:
func_graph: function.FuncGraph.
template_tensors: a list of tensors in the outer graph.
Returns:
A list of tensors in func_graph.
"""
with func_graph.as_default():
return [gen_functional_ops.fake_param(dtype=t.dtype, shape=t.shape)
for t in template_tensors]
def _get_grad_fn_name(func_graph):
"""Returns a unique name to use for the grad function of `func_graph`.
Ensures this name is unique in the entire hierarchy.
Args:
func_graph: The FuncGraph.
Returns:
A string, the name to use for the gradient function.
"""
name = "%s_grad" % func_graph.name
outer_most_graph = func_graph
while isinstance(outer_most_graph, _function.FuncGraph):
outer_most_graph = outer_most_graph.outer_graph
return outer_most_graph.unique_name(name)
def _check_same_outputs(true_graph, false_graph):
"""Raises an error if true_graph and false_graph have different outputs."""
true_output_types = [t.dtype for t in true_graph.outputs]
false_output_types = [t.dtype for t in false_graph.outputs]
if (len(true_graph.outputs) != len(false_graph.outputs) or
true_output_types != false_output_types):
raise ValueError(
"true_fn() and false_fn() must return the same number and type of "
"arguments, got:\n"
" true_fn: %s\n"
" false_fn: %s" % (true_output_types, false_output_types))
def _is_ancestor(graph, maybe_ancestor):
if maybe_ancestor == graph:
return True
if isinstance(graph, _function.FuncGraph):
return _is_ancestor(graph.outer_graph, maybe_ancestor)
return False
|
|
"""Tests for Climacell weather entity."""
from __future__ import annotations
from datetime import datetime
from typing import Any
from unittest.mock import patch
import pytest
from homeassistant.components.climacell.config_flow import (
_get_config_schema,
_get_unique_id,
)
from homeassistant.components.climacell.const import (
ATTR_CLOUD_COVER,
ATTR_PRECIPITATION_TYPE,
ATTR_WIND_GUST,
ATTRIBUTION,
DOMAIN,
)
from homeassistant.components.weather import (
ATTR_CONDITION_CLOUDY,
ATTR_CONDITION_RAINY,
ATTR_CONDITION_SNOWY,
ATTR_CONDITION_SUNNY,
ATTR_FORECAST,
ATTR_FORECAST_CONDITION,
ATTR_FORECAST_PRECIPITATION,
ATTR_FORECAST_PRECIPITATION_PROBABILITY,
ATTR_FORECAST_TEMP,
ATTR_FORECAST_TEMP_LOW,
ATTR_FORECAST_TIME,
ATTR_FORECAST_WIND_BEARING,
ATTR_FORECAST_WIND_SPEED,
ATTR_WEATHER_HUMIDITY,
ATTR_WEATHER_OZONE,
ATTR_WEATHER_PRESSURE,
ATTR_WEATHER_TEMPERATURE,
ATTR_WEATHER_VISIBILITY,
ATTR_WEATHER_WIND_BEARING,
ATTR_WEATHER_WIND_SPEED,
DOMAIN as WEATHER_DOMAIN,
)
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_FRIENDLY_NAME,
PRESSURE_HPA,
SPEED_KILOMETERS_PER_HOUR,
)
from homeassistant.core import HomeAssistant, State, callback
from homeassistant.helpers.entity_registry import async_get
from homeassistant.util import dt as dt_util
from .const import API_V3_ENTRY_DATA, API_V4_ENTRY_DATA
from tests.common import MockConfigEntry
@callback
def _enable_entity(hass: HomeAssistant, entity_name: str) -> None:
"""Enable disabled entity."""
ent_reg = async_get(hass)
entry = ent_reg.async_get(entity_name)
updated_entry = ent_reg.async_update_entity(
entry.entity_id, **{"disabled_by": None}
)
assert updated_entry != entry
assert updated_entry.disabled is False
async def _setup(hass: HomeAssistant, config: dict[str, Any]) -> State:
"""Set up entry and return entity state."""
with patch(
"homeassistant.util.dt.utcnow",
return_value=datetime(2021, 3, 6, 23, 59, 59, tzinfo=dt_util.UTC),
):
data = _get_config_schema(hass)(config)
config_entry = MockConfigEntry(
domain=DOMAIN,
data=data,
unique_id=_get_unique_id(hass, data),
version=1,
)
config_entry.add_to_hass(hass)
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
for entity_name in ("hourly", "nowcast"):
_enable_entity(hass, f"weather.climacell_{entity_name}")
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids(WEATHER_DOMAIN)) == 3
return hass.states.get("weather.climacell_daily")
async def test_v3_weather(
hass: HomeAssistant,
climacell_config_entry_update: pytest.fixture,
) -> None:
"""Test v3 weather data."""
hass.config.units.wind_speed_unit = SPEED_KILOMETERS_PER_HOUR
hass.config.units.pressure_unit = PRESSURE_HPA
weather_state = await _setup(hass, API_V3_ENTRY_DATA)
assert weather_state.state == ATTR_CONDITION_SUNNY
assert weather_state.attributes[ATTR_ATTRIBUTION] == ATTRIBUTION
assert weather_state.attributes[ATTR_FORECAST] == [
{
ATTR_FORECAST_CONDITION: ATTR_CONDITION_SUNNY,
ATTR_FORECAST_TIME: "2021-03-07T00:00:00+00:00",
ATTR_FORECAST_PRECIPITATION: pytest.approx(0, abs=0.01),
ATTR_FORECAST_PRECIPITATION_PROBABILITY: 0,
ATTR_FORECAST_TEMP: 7,
ATTR_FORECAST_TEMP_LOW: -5,
},
{
ATTR_FORECAST_CONDITION: ATTR_CONDITION_CLOUDY,
ATTR_FORECAST_TIME: "2021-03-08T00:00:00+00:00",
ATTR_FORECAST_PRECIPITATION: pytest.approx(0, abs=0.01),
ATTR_FORECAST_PRECIPITATION_PROBABILITY: 0,
ATTR_FORECAST_TEMP: 10,
ATTR_FORECAST_TEMP_LOW: -4,
},
{
ATTR_FORECAST_CONDITION: ATTR_CONDITION_CLOUDY,
ATTR_FORECAST_TIME: "2021-03-09T00:00:00+00:00",
ATTR_FORECAST_PRECIPITATION: pytest.approx(0, abs=0.01),
ATTR_FORECAST_PRECIPITATION_PROBABILITY: 0,
ATTR_FORECAST_TEMP: 19,
ATTR_FORECAST_TEMP_LOW: 0,
},
{
ATTR_FORECAST_CONDITION: ATTR_CONDITION_CLOUDY,
ATTR_FORECAST_TIME: "2021-03-10T00:00:00+00:00",
ATTR_FORECAST_PRECIPITATION: pytest.approx(0, abs=0.01),
ATTR_FORECAST_PRECIPITATION_PROBABILITY: 0,
ATTR_FORECAST_TEMP: 18,
ATTR_FORECAST_TEMP_LOW: 3,
},
{
ATTR_FORECAST_CONDITION: ATTR_CONDITION_CLOUDY,
ATTR_FORECAST_TIME: "2021-03-11T00:00:00+00:00",
ATTR_FORECAST_PRECIPITATION: pytest.approx(0, abs=0.01),
ATTR_FORECAST_PRECIPITATION_PROBABILITY: 5,
ATTR_FORECAST_TEMP: 20,
ATTR_FORECAST_TEMP_LOW: 9,
},
{
ATTR_FORECAST_CONDITION: ATTR_CONDITION_CLOUDY,
ATTR_FORECAST_TIME: "2021-03-12T00:00:00+00:00",
ATTR_FORECAST_PRECIPITATION: pytest.approx(0.0457, abs=0.01),
ATTR_FORECAST_PRECIPITATION_PROBABILITY: 25,
ATTR_FORECAST_TEMP: 20,
ATTR_FORECAST_TEMP_LOW: 12,
},
{
ATTR_FORECAST_CONDITION: ATTR_CONDITION_CLOUDY,
ATTR_FORECAST_TIME: "2021-03-13T00:00:00+00:00",
ATTR_FORECAST_PRECIPITATION: pytest.approx(0, abs=0.01),
ATTR_FORECAST_PRECIPITATION_PROBABILITY: 25,
ATTR_FORECAST_TEMP: 16,
ATTR_FORECAST_TEMP_LOW: 7,
},
{
ATTR_FORECAST_CONDITION: ATTR_CONDITION_RAINY,
ATTR_FORECAST_TIME: "2021-03-14T00:00:00+00:00",
ATTR_FORECAST_PRECIPITATION: pytest.approx(1.0744, abs=0.01),
ATTR_FORECAST_PRECIPITATION_PROBABILITY: 75,
ATTR_FORECAST_TEMP: 6,
ATTR_FORECAST_TEMP_LOW: 3,
},
{
ATTR_FORECAST_CONDITION: ATTR_CONDITION_SNOWY,
ATTR_FORECAST_TIME: "2021-03-15T00:00:00+00:00",
ATTR_FORECAST_PRECIPITATION: pytest.approx(7.3050, abs=0.01),
ATTR_FORECAST_PRECIPITATION_PROBABILITY: 95,
ATTR_FORECAST_TEMP: 1,
ATTR_FORECAST_TEMP_LOW: 0,
},
{
ATTR_FORECAST_CONDITION: ATTR_CONDITION_CLOUDY,
ATTR_FORECAST_TIME: "2021-03-16T00:00:00+00:00",
ATTR_FORECAST_PRECIPITATION: pytest.approx(0.0051, abs=0.01),
ATTR_FORECAST_PRECIPITATION_PROBABILITY: 5,
ATTR_FORECAST_TEMP: 6,
ATTR_FORECAST_TEMP_LOW: -2,
},
{
ATTR_FORECAST_CONDITION: ATTR_CONDITION_CLOUDY,
ATTR_FORECAST_TIME: "2021-03-17T00:00:00+00:00",
ATTR_FORECAST_PRECIPITATION: pytest.approx(0, abs=0.01),
ATTR_FORECAST_PRECIPITATION_PROBABILITY: 0,
ATTR_FORECAST_TEMP: 11,
ATTR_FORECAST_TEMP_LOW: 1,
},
{
ATTR_FORECAST_CONDITION: ATTR_CONDITION_CLOUDY,
ATTR_FORECAST_TIME: "2021-03-18T00:00:00+00:00",
ATTR_FORECAST_PRECIPITATION: pytest.approx(0, abs=0.01),
ATTR_FORECAST_PRECIPITATION_PROBABILITY: 5,
ATTR_FORECAST_TEMP: 12,
ATTR_FORECAST_TEMP_LOW: 6,
},
{
ATTR_FORECAST_CONDITION: ATTR_CONDITION_CLOUDY,
ATTR_FORECAST_TIME: "2021-03-19T00:00:00+00:00",
ATTR_FORECAST_PRECIPITATION: pytest.approx(0.1778, abs=0.01),
ATTR_FORECAST_PRECIPITATION_PROBABILITY: 45,
ATTR_FORECAST_TEMP: 9,
ATTR_FORECAST_TEMP_LOW: 5,
},
{
ATTR_FORECAST_CONDITION: ATTR_CONDITION_RAINY,
ATTR_FORECAST_TIME: "2021-03-20T00:00:00+00:00",
ATTR_FORECAST_PRECIPITATION: pytest.approx(1.2319, abs=0.01),
ATTR_FORECAST_PRECIPITATION_PROBABILITY: 55,
ATTR_FORECAST_TEMP: 5,
ATTR_FORECAST_TEMP_LOW: 3,
},
{
ATTR_FORECAST_CONDITION: ATTR_CONDITION_CLOUDY,
ATTR_FORECAST_TIME: "2021-03-21T00:00:00+00:00",
ATTR_FORECAST_PRECIPITATION: pytest.approx(0.0432, abs=0.01),
ATTR_FORECAST_PRECIPITATION_PROBABILITY: 20,
ATTR_FORECAST_TEMP: 7,
ATTR_FORECAST_TEMP_LOW: 1,
},
]
assert weather_state.attributes[ATTR_FRIENDLY_NAME] == "ClimaCell - Daily"
assert weather_state.attributes[ATTR_WEATHER_HUMIDITY] == 24
assert weather_state.attributes[ATTR_WEATHER_OZONE] == 52.625
assert weather_state.attributes[ATTR_WEATHER_PRESSURE] == 1028.12
assert weather_state.attributes[ATTR_WEATHER_TEMPERATURE] == 7
assert weather_state.attributes[ATTR_WEATHER_VISIBILITY] == 9.99
assert weather_state.attributes[ATTR_WEATHER_WIND_BEARING] == 320.31
assert weather_state.attributes[ATTR_WEATHER_WIND_SPEED] == 14.63
assert weather_state.attributes[ATTR_CLOUD_COVER] == 100
assert weather_state.attributes[ATTR_WIND_GUST] == 24.0758
assert weather_state.attributes[ATTR_PRECIPITATION_TYPE] == "rain"
async def test_v4_weather(
hass: HomeAssistant,
climacell_config_entry_update: pytest.fixture,
) -> None:
"""Test v4 weather data."""
hass.config.units.wind_speed_unit = SPEED_KILOMETERS_PER_HOUR
hass.config.units.pressure_unit = PRESSURE_HPA
weather_state = await _setup(hass, API_V4_ENTRY_DATA)
assert weather_state.state == ATTR_CONDITION_SUNNY
assert weather_state.attributes[ATTR_ATTRIBUTION] == ATTRIBUTION
assert weather_state.attributes[ATTR_FORECAST] == [
{
ATTR_FORECAST_CONDITION: ATTR_CONDITION_SUNNY,
ATTR_FORECAST_TIME: "2021-03-07T11:00:00+00:00",
ATTR_FORECAST_PRECIPITATION: pytest.approx(0, abs=0.01),
ATTR_FORECAST_PRECIPITATION_PROBABILITY: 0,
ATTR_FORECAST_TEMP: 8,
ATTR_FORECAST_TEMP_LOW: -3,
ATTR_FORECAST_WIND_BEARING: 239.6,
ATTR_FORECAST_WIND_SPEED: pytest.approx(15.2727, abs=0.01),
},
{
ATTR_FORECAST_CONDITION: "cloudy",
ATTR_FORECAST_TIME: "2021-03-08T11:00:00+00:00",
ATTR_FORECAST_PRECIPITATION: pytest.approx(0, abs=0.01),
ATTR_FORECAST_PRECIPITATION_PROBABILITY: 0,
ATTR_FORECAST_TEMP: 10,
ATTR_FORECAST_TEMP_LOW: -3,
ATTR_FORECAST_WIND_BEARING: 262.82,
ATTR_FORECAST_WIND_SPEED: pytest.approx(11.6517, abs=0.01),
},
{
ATTR_FORECAST_CONDITION: "cloudy",
ATTR_FORECAST_TIME: "2021-03-09T11:00:00+00:00",
ATTR_FORECAST_PRECIPITATION: pytest.approx(0, abs=0.01),
ATTR_FORECAST_PRECIPITATION_PROBABILITY: 0,
ATTR_FORECAST_TEMP: 19,
ATTR_FORECAST_TEMP_LOW: 0,
ATTR_FORECAST_WIND_BEARING: 229.3,
ATTR_FORECAST_WIND_SPEED: pytest.approx(11.3459, abs=0.01),
},
{
ATTR_FORECAST_CONDITION: "cloudy",
ATTR_FORECAST_TIME: "2021-03-10T11:00:00+00:00",
ATTR_FORECAST_PRECIPITATION: pytest.approx(0, abs=0.01),
ATTR_FORECAST_PRECIPITATION_PROBABILITY: 0,
ATTR_FORECAST_TEMP: 18,
ATTR_FORECAST_TEMP_LOW: 3,
ATTR_FORECAST_WIND_BEARING: 149.91,
ATTR_FORECAST_WIND_SPEED: pytest.approx(17.1234, abs=0.01),
},
{
ATTR_FORECAST_CONDITION: "cloudy",
ATTR_FORECAST_TIME: "2021-03-11T11:00:00+00:00",
ATTR_FORECAST_PRECIPITATION: pytest.approx(0, abs=0.01),
ATTR_FORECAST_PRECIPITATION_PROBABILITY: 0,
ATTR_FORECAST_TEMP: 19,
ATTR_FORECAST_TEMP_LOW: 9,
ATTR_FORECAST_WIND_BEARING: 210.45,
ATTR_FORECAST_WIND_SPEED: pytest.approx(25.2506, abs=0.01),
},
{
ATTR_FORECAST_CONDITION: "rainy",
ATTR_FORECAST_TIME: "2021-03-12T11:00:00+00:00",
ATTR_FORECAST_PRECIPITATION: pytest.approx(0.1219, abs=0.01),
ATTR_FORECAST_PRECIPITATION_PROBABILITY: 25,
ATTR_FORECAST_TEMP: 20,
ATTR_FORECAST_TEMP_LOW: 12,
ATTR_FORECAST_WIND_BEARING: 217.98,
ATTR_FORECAST_WIND_SPEED: pytest.approx(19.7949, abs=0.01),
},
{
ATTR_FORECAST_CONDITION: "cloudy",
ATTR_FORECAST_TIME: "2021-03-13T11:00:00+00:00",
ATTR_FORECAST_PRECIPITATION: pytest.approx(0, abs=0.01),
ATTR_FORECAST_PRECIPITATION_PROBABILITY: 25,
ATTR_FORECAST_TEMP: 12,
ATTR_FORECAST_TEMP_LOW: 6,
ATTR_FORECAST_WIND_BEARING: 58.79,
ATTR_FORECAST_WIND_SPEED: pytest.approx(15.6428, abs=0.01),
},
{
ATTR_FORECAST_CONDITION: "snowy",
ATTR_FORECAST_TIME: "2021-03-14T10:00:00+00:00",
ATTR_FORECAST_PRECIPITATION: pytest.approx(23.9573, abs=0.01),
ATTR_FORECAST_PRECIPITATION_PROBABILITY: 95,
ATTR_FORECAST_TEMP: 6,
ATTR_FORECAST_TEMP_LOW: 1,
ATTR_FORECAST_WIND_BEARING: 70.25,
ATTR_FORECAST_WIND_SPEED: pytest.approx(26.1518, abs=0.01),
},
{
ATTR_FORECAST_CONDITION: "snowy",
ATTR_FORECAST_TIME: "2021-03-15T10:00:00+00:00",
ATTR_FORECAST_PRECIPITATION: pytest.approx(1.4630, abs=0.01),
ATTR_FORECAST_PRECIPITATION_PROBABILITY: 55,
ATTR_FORECAST_TEMP: 6,
ATTR_FORECAST_TEMP_LOW: -1,
ATTR_FORECAST_WIND_BEARING: 84.47,
ATTR_FORECAST_WIND_SPEED: pytest.approx(25.5725, abs=0.01),
},
{
ATTR_FORECAST_CONDITION: "cloudy",
ATTR_FORECAST_TIME: "2021-03-16T10:00:00+00:00",
ATTR_FORECAST_PRECIPITATION: pytest.approx(0, abs=0.01),
ATTR_FORECAST_PRECIPITATION_PROBABILITY: 0,
ATTR_FORECAST_TEMP: 6,
ATTR_FORECAST_TEMP_LOW: -2,
ATTR_FORECAST_WIND_BEARING: 103.85,
ATTR_FORECAST_WIND_SPEED: pytest.approx(10.7987, abs=0.01),
},
{
ATTR_FORECAST_CONDITION: "cloudy",
ATTR_FORECAST_TIME: "2021-03-17T10:00:00+00:00",
ATTR_FORECAST_PRECIPITATION: pytest.approx(0, abs=0.01),
ATTR_FORECAST_PRECIPITATION_PROBABILITY: 0,
ATTR_FORECAST_TEMP: 11,
ATTR_FORECAST_TEMP_LOW: 1,
ATTR_FORECAST_WIND_BEARING: 145.41,
ATTR_FORECAST_WIND_SPEED: pytest.approx(11.6999, abs=0.01),
},
{
ATTR_FORECAST_CONDITION: "cloudy",
ATTR_FORECAST_TIME: "2021-03-18T10:00:00+00:00",
ATTR_FORECAST_PRECIPITATION: pytest.approx(0, abs=0.01),
ATTR_FORECAST_PRECIPITATION_PROBABILITY: 10,
ATTR_FORECAST_TEMP: 12,
ATTR_FORECAST_TEMP_LOW: 5,
ATTR_FORECAST_WIND_BEARING: 62.99,
ATTR_FORECAST_WIND_SPEED: pytest.approx(10.5895, abs=0.01),
},
{
ATTR_FORECAST_CONDITION: "rainy",
ATTR_FORECAST_TIME: "2021-03-19T10:00:00+00:00",
ATTR_FORECAST_PRECIPITATION: pytest.approx(2.9261, abs=0.01),
ATTR_FORECAST_PRECIPITATION_PROBABILITY: 55,
ATTR_FORECAST_TEMP: 9,
ATTR_FORECAST_TEMP_LOW: 4,
ATTR_FORECAST_WIND_BEARING: 68.54,
ATTR_FORECAST_WIND_SPEED: pytest.approx(22.3860, abs=0.01),
},
{
ATTR_FORECAST_CONDITION: "snowy",
ATTR_FORECAST_TIME: "2021-03-20T10:00:00+00:00",
ATTR_FORECAST_PRECIPITATION: pytest.approx(1.2192, abs=0.01),
ATTR_FORECAST_PRECIPITATION_PROBABILITY: 33.3,
ATTR_FORECAST_TEMP: 5,
ATTR_FORECAST_TEMP_LOW: 2,
ATTR_FORECAST_WIND_BEARING: 56.98,
ATTR_FORECAST_WIND_SPEED: pytest.approx(27.9221, abs=0.01),
},
]
assert weather_state.attributes[ATTR_FRIENDLY_NAME] == "ClimaCell - Daily"
assert weather_state.attributes[ATTR_WEATHER_HUMIDITY] == 23
assert weather_state.attributes[ATTR_WEATHER_OZONE] == 46.53
assert weather_state.attributes[ATTR_WEATHER_PRESSURE] == 1027.77
assert weather_state.attributes[ATTR_WEATHER_TEMPERATURE] == 7
assert weather_state.attributes[ATTR_WEATHER_VISIBILITY] == 13.12
assert weather_state.attributes[ATTR_WEATHER_WIND_BEARING] == 315.14
assert weather_state.attributes[ATTR_WEATHER_WIND_SPEED] == 15.02
assert weather_state.attributes[ATTR_CLOUD_COVER] == 100
assert weather_state.attributes[ATTR_WIND_GUST] == 20.3421
assert weather_state.attributes[ATTR_PRECIPITATION_TYPE] == "rain"
|
|
__author__ = 'linus'
def create_query(search_dict):
""" Creates a query parts dictionary
:search_dict search_dict:
Dict contains:
peptide
ms_run
source_name
source (organ/tissue/dignity)
person
source_hla_typing (TODO)
spectrum_hit (ionscore, e-value, q-value)
"""
#query_dict = dict()
filter_string = "WHERE"
first = True
# Sequence
if "sequence_input" in search_dict.keys():
sequence_input = search_dict["sequence_input"].split(";")
filter_string += " ( "
# combining the query
for i, seq in enumerate(sequence_input):
if i != len(sequence_input) - 1:
filter_string += " sequence LIKE '" + seq.strip() + "' " + search_dict["sequence_logic"]
else:
filter_string += " sequence LIKE '" + seq.strip() + "' "
# OR needs brackets (closing bracket)
filter_string += ") "
first = False
# Run name
if "run_name_input" in search_dict.keys():
run_name_input = search_dict["run_name_input"].split(";")
# if it is not first an "AND" must be added
if first:
filter_string += " ( "
first = False
else:
filter_string += " AND ( "
# combining the input
for i, run in enumerate(run_name_input):
if i != len(run_name_input) - 1:
filter_string += " filename LIKE '" + run.strip() + "' " + search_dict["run_name_logic"]
else:
filter_string += " filename LIKE '" + run.strip() + "' "
filter_string += ") "
# Source
# Source name
if "source_name_input" in search_dict.keys():
source_name_input = search_dict["source_name_input"].split(";")
# if it is not first an "AND" must be added
if first:
filter_string += " ( "
first = False
else:
filter_string += " AND ( "
# combining the input
for i, source in enumerate(source_name_input):
if i != len(source_name_input) - 1:
filter_string += " name LIKE '" + source.strip() + "' " + search_dict["source_name_logic"]
else:
filter_string += " name LIKE '" + source.strip() + "' "
filter_string += ") "
# Organ
if "organ_input" in search_dict.keys():
organ_input = search_dict["organ_input"].split(";")
# if it is not first an "AND" must be added
if first:
filter_string += " ( "
first = False
else:
filter_string += " AND ( "
# combining the input
for i, organ in enumerate(organ_input):
if i != len(organ_input) - 1:
filter_string += " organ LIKE '" + organ.strip() + "' " + search_dict["organ_logic"]
else:
filter_string += " organ LIKE '" + organ.strip() + "' "
filter_string += ") "
# Tissue
if "tissue_input" in search_dict.keys():
tissue_input = search_dict["tissue_input"].split(";")
# if it is not first an "AND" must be added
if first:
filter_string += " ( "
first = False
else:
filter_string += " AND ( "
# combining the input
for i, tissue in enumerate(tissue_input):
if i != len(tissue_input) - 1:
filter_string += " tissue LIKE '" + tissue.strip() + "' " + search_dict["tissue_logic"]
else:
filter_string += " tissue LIKE '" + tissue.strip() + "' "
filter_string += ") "
# Dignity
if "dignity_input" in search_dict.keys():
dignity_input = search_dict["dignity_input"].split(";")
# if it is not first an "AND" must be added
if first:
filter_string += " ( "
first = False
else:
filter_string += " AND ( "
# combining the input
for i, dignity in enumerate(dignity_input):
if i != len(dignity_input) - 1:
filter_string += " dignity LIKE '" + dignity.strip() + "' " + search_dict["dignity_logic"]
else:
filter_string += " dignity LIKE '" + dignity.strip() + "' "
filter_string += ") "
# Dignity
if "researcher_input" in search_dict.keys():
researcher_input = search_dict["researcher_input"].split(";")
# if it is not first an "AND" must be added
if first:
filter_string += " ( "
first = False
else:
filter_string += " AND ( "
# combining the input
for i, researcher in enumerate(researcher_input):
if i != len(researcher_input)-1:
filter_string += " lastname LIKE '" + researcher.strip() + "' " + search_dict["researcher_logic"]
else:
filter_string += " lastname LIKE '" + researcher.strip() + "' "
filter_string += ") "
# Source HLA typing
if "source_hla_typing_input" in search_dict.keys():
source_hla_typing_input = search_dict["source_hla_typing_input"].split(";")
# if it is not first an "AND" must be added
if first:
filter_string += " ( "
first = False
else:
filter_string += " AND ( "
# combining the input
for i, source_hla_typing in enumerate(source_hla_typing_input):
hla_type = source_hla_typing.strip().split(':')
# Basic gene group (2 digits)
hla_query = "( gene_group = '" + hla_type[0].strip() + "' "
# Protein (4 digits)
if len(hla_type) > 1:
hla_query += " AND " + " specific_protein = " + str(int(hla_type[1].strip()))
# DNA coding (6 digits)
if len(hla_type) > 2:
hla_query += " AND " + " dna_coding = " + str(int(hla_type[2].strip()))
# DNA non-coding (8 digits)
if len(hla_type) > 3:
# Expression suffix (extra char suffix)
if hla_type[3].strip()[-1].isalpha():
hla_query += " AND " + " dna_noncoding = " + str(int(hla_type[3].strip()[:-1])) + " AND expression_suffix == '" + hla_type[3].strip()[-1] + "' "
else:
hla_query += " AND " + " dna_noncoding = " + str(int(hla_type[3].strip()))
# creating the query. If more than one typing is provided they are combined
if i != len(source_hla_typing_input)-1:
filter_string += hla_query + ") " + search_dict["source_hla_typing_logic"]
else:
filter_string += hla_query + ") "
filter_string += ") "
# Protein
if "protein_input" in search_dict.keys():
protein_input = search_dict["protein_input"].split(";")
# if it is not first an "AND" must be added
if first:
filter_string += " ( "
first = False
else:
filter_string += " AND ( "
# combining the input
for i, protein in enumerate(protein_input):
if i != len(protein_input)-1:
filter_string += " uniprot_accession_pm LIKE '" + protein.strip() + "' " + search_dict["protein_logic"]
else:
filter_string += " uniprot_accession_pm LIKE '" + protein.strip() + "' "
filter_string += ") "
# netMHC prediction
if "netMHC_input" in search_dict.keys():
netMHC_information = search_dict["netMHC_information"].replace("*", "_").replace(":", "_").split(";")
if first:
filter_string += " ( "
first = False
else:
filter_string += " AND ( "
# combining the input
for i, allele in enumerate(netMHC_information):
if i != len(netMHC_information)-1:
filter_string+= " Prediction_mapping.netMHC_3_4."+allele+"_affinity"+search_dict["netMHC_comparison"]+search_dict["netMHC_input"]+" "+ search_dict["netMHC_logic"]+" "
else:
filter_string += "Prediction_mapping.netMHC_3_4."+allele+"_affinity"+search_dict["netMHC_comparison"]+search_dict["netMHC_input"]+" "
filter_string += ") "
# syfpeithi prediction
if "syfpeithi_input" in search_dict.keys():
syfpeithi_information = search_dict["syfpeithi_information"].replace("*", "_").replace(":", "_").split(";")
if first:
filter_string += " ( "
first = False
else:
filter_string += " AND ( "
# combining the input
for i, allele in enumerate(syfpeithi_information):
if i != len(syfpeithi_information)-1:
filter_string+= " Prediction_mapping.syfpeithi_170414."+allele+"_affinity"+search_dict["syfpeithi_comparison"]+search_dict["syfpeithi_input"] + " " + search_dict["syfpeithi_logic"]+" "
else:
filter_string += "Prediction_mapping.syfpeithi_170414."+allele+"_affinity"+search_dict["syfpeithi_comparison"]+search_dict["syfpeithi_input"] + " "
filter_string += ") "
# Standard filters
filter_string += " AND ionscore >= " + str(search_dict["ionscore_input"]) \
+ " AND q_value <= " + str(search_dict["q_value_input"]) \
+ " AND LENGTH(sequence) BETWEEN "+ str(search_dict["aa_length_start"])\
+ " AND " + str(search_dict["aa_length_end"])\
+ " GROUP BY filename, sequence"
return filter_string
|
|
# Copyright 2021 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Test for Kconfig checker"""
import contextlib
import io
import os
import pathlib
import re
import sys
import tempfile
import unittest
import kconfig_check
# Prefix that we strip from each Kconfig option, when considering whether it is
# equivalent to a CONFIG option with the same name
PREFIX = 'PLATFORM_EC_'
@contextlib.contextmanager
def capture_sys_output():
"""Capture output for testing purposes
Use this to suppress stdout/stderr output:
with capture_sys_output() as (stdout, stderr)
...do something...
"""
capture_out, capture_err = io.StringIO(), io.StringIO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = capture_out, capture_err
yield capture_out, capture_err
finally:
sys.stdout, sys.stderr = old_out, old_err
# Use unittest since it produced less verbose output than pytest and can be run
# directly from Python. You can still run this test with 'pytest' if you like.
class KconfigCheck(unittest.TestCase):
"""Tests for the KconfigCheck class"""
def test_simple_check(self):
"""Check it detected a new ad-hoc CONFIG"""
checker = kconfig_check.KconfigCheck()
self.assertEqual(['NEW_ONE'], checker.find_new_adhoc(
configs=['NEW_ONE', 'OLD_ONE', 'IN_KCONFIG'],
kconfigs=['IN_KCONFIG'],
allowed=['OLD_ONE']))
def test_sorted_check(self):
"""Check it sorts the results in order"""
checker = kconfig_check.KconfigCheck()
self.assertSequenceEqual(
['ANOTHER_NEW_ONE', 'NEW_ONE'],
checker.find_new_adhoc(
configs=['NEW_ONE', 'ANOTHER_NEW_ONE', 'OLD_ONE', 'IN_KCONFIG'],
kconfigs=['IN_KCONFIG'],
allowed=['OLD_ONE']))
def check_read_configs(self, use_defines):
checker = kconfig_check.KconfigCheck()
with tempfile.NamedTemporaryFile() as configs:
with open(configs.name, 'w') as out:
prefix = '#define ' if use_defines else ''
suffix = ' ' if use_defines else '='
out.write(f'''{prefix}CONFIG_OLD_ONE{suffix}y
{prefix}NOT_A_CONFIG{suffix}
{prefix}CONFIG_STRING{suffix}"something"
{prefix}CONFIG_INT{suffix}123
{prefix}CONFIG_HEX{suffix}45ab
''')
self.assertEqual(['OLD_ONE', 'STRING', 'INT', 'HEX'],
checker.read_configs(configs.name, use_defines))
def test_read_configs(self):
"""Test KconfigCheck.read_configs()"""
self.check_read_configs(False)
def test_read_configs_defines(self):
"""Test KconfigCheck.read_configs() containing #defines"""
self.check_read_configs(True)
@classmethod
def setup_srctree(cls, srctree):
"""Set up some Kconfig files in a directory and subdirs
Args:
srctree: Directory to write to
"""
with open(os.path.join(srctree, 'Kconfig'), 'w') as out:
out.write(f'''config {PREFIX}MY_KCONFIG
\tbool "my kconfig"
rsource "subdir/Kconfig.wibble"
''')
subdir = os.path.join(srctree, 'subdir')
os.mkdir(subdir)
with open(os.path.join(subdir, 'Kconfig.wibble'), 'w') as out:
out.write('menuconfig %sMENU_KCONFIG\n' % PREFIX)
# Add a directory which should be ignored
bad_subdir = os.path.join(subdir, 'Kconfig')
os.mkdir(bad_subdir)
with open(os.path.join(bad_subdir, 'Kconfig.bad'), 'w') as out:
out.write('menuconfig %sBAD_KCONFIG' % PREFIX)
def test_find_kconfigs(self):
"""Test KconfigCheck.find_kconfigs()"""
checker = kconfig_check.KconfigCheck()
with tempfile.TemporaryDirectory() as srctree:
self.setup_srctree(srctree)
files = checker.find_kconfigs(srctree)
fnames = [fname[len(srctree):] for fname in files]
self.assertEqual(['/Kconfig', '/subdir/Kconfig.wibble'], fnames)
def test_scan_kconfigs(self):
"""Test KconfigCheck.scan_configs()"""
checker = kconfig_check.KconfigCheck()
with tempfile.TemporaryDirectory() as srctree:
self.setup_srctree(srctree)
self.assertEqual(['MENU_KCONFIG', 'MY_KCONFIG'],
checker.scan_kconfigs(srctree, PREFIX))
@classmethod
def setup_allowed_and_configs(cls, allowed_fname, configs_fname,
add_new_one=True):
"""Set up the 'allowed' and 'configs' files for tests
Args:
allowed_fname: Filename to write allowed CONFIGs to
configs_fname: Filename to which CONFIGs to check should be written
add_new_one: True to add CONFIG_NEW_ONE to the configs_fname file
"""
with open(allowed_fname, 'w') as out:
out.write('CONFIG_OLD_ONE\n')
out.write('CONFIG_MENU_KCONFIG\n')
with open(configs_fname, 'w') as out:
to_add = ['CONFIG_OLD_ONE', 'CONFIG_MY_KCONFIG']
if add_new_one:
to_add.append('CONFIG_NEW_ONE')
out.write('\n'.join(to_add))
def test_check_adhoc_configs(self):
"""Test KconfigCheck.check_adhoc_configs()"""
checker = kconfig_check.KconfigCheck()
with tempfile.TemporaryDirectory() as srctree:
self.setup_srctree(srctree)
with tempfile.NamedTemporaryFile() as allowed:
with tempfile.NamedTemporaryFile() as configs:
self.setup_allowed_and_configs(allowed.name, configs.name)
new_adhoc, unneeded_adhoc, updated_adhoc = (
checker.check_adhoc_configs(
configs.name, srctree, allowed.name, PREFIX))
self.assertEqual(['NEW_ONE'], new_adhoc)
self.assertEqual(['MENU_KCONFIG'], unneeded_adhoc)
self.assertEqual(['OLD_ONE'], updated_adhoc)
def test_check(self):
"""Test running the 'check' subcommand"""
with capture_sys_output() as (stdout, stderr):
with tempfile.TemporaryDirectory() as srctree:
self.setup_srctree(srctree)
with tempfile.NamedTemporaryFile() as allowed:
with tempfile.NamedTemporaryFile() as configs:
self.setup_allowed_and_configs(allowed.name,
configs.name)
ret_code = kconfig_check.main(
['-c', configs.name, '-s', srctree,
'-a', allowed.name, '-p', PREFIX, 'check'])
self.assertEqual(1, ret_code)
self.assertEqual('', stdout.getvalue())
found = re.findall('(CONFIG_.*)', stderr.getvalue())
self.assertEqual(['CONFIG_NEW_ONE'], found)
def test_real_kconfig(self):
"""Same Kconfig should be returned for kconfiglib / adhoc"""
if not kconfig_check.USE_KCONFIGLIB:
self.skipTest('No kconfiglib available')
zephyr_path = pathlib.Path('../../third_party/zephyr/main').resolve()
if not zephyr_path.exists():
self.skipTest('No zephyr tree available')
checker = kconfig_check.KconfigCheck()
srcdir = 'zephyr'
search_paths = [zephyr_path]
kc_version = checker.scan_kconfigs(
srcdir, search_paths=search_paths, try_kconfiglib=True)
adhoc_version = checker.scan_kconfigs(srcdir, try_kconfiglib=False)
# List of things missing from the Kconfig
missing = sorted(list(set(adhoc_version) - set(kc_version)))
# The Kconfig is disjoint in some places, e.g. the boards have their
# own Kconfig files which are not included from the main Kconfig
missing = [item for item in missing
if not item.startswith('BOARD') and
not item.startswith('VARIANT')]
# Similarly, some other items are defined in files that are not included
# in all cases, only for particular values of $(ARCH)
self.assertEqual(
['FLASH_LOAD_OFFSET', 'NPCX_HEADER', 'SYS_CLOCK_HW_CYCLES_PER_SEC'],
missing)
def test_check_unneeded(self):
"""Test running the 'check' subcommand with unneeded ad-hoc configs"""
with capture_sys_output() as (stdout, stderr):
with tempfile.TemporaryDirectory() as srctree:
self.setup_srctree(srctree)
with tempfile.NamedTemporaryFile() as allowed:
with tempfile.NamedTemporaryFile() as configs:
self.setup_allowed_and_configs(allowed.name,
configs.name, False)
ret_code = kconfig_check.main(
['-c', configs.name, '-s', srctree,
'-a', allowed.name, '-p', PREFIX, 'check'])
self.assertEqual(1, ret_code)
self.assertEqual('', stderr.getvalue())
found = re.findall('(CONFIG_.*)', stdout.getvalue())
self.assertEqual(['CONFIG_MENU_KCONFIG'], found)
allowed = kconfig_check.NEW_ALLOWED_FNAME.read_text().splitlines()
self.assertEqual(['CONFIG_OLD_ONE'], allowed)
if __name__ == '__main__':
unittest.main()
|
|
import logging
import os
import re
from textwrap import dedent
import pytest
from ufo2ft.errors import InvalidFeaturesData
from ufo2ft.featureCompiler import parseLayoutFeatures
from ufo2ft.featureWriters import ast
from ufo2ft.featureWriters.markFeatureWriter import (
MarkFeatureWriter,
NamedAnchor,
parseAnchorName,
)
from . import FeatureWriterTest
@pytest.fixture
def testufo(FontClass):
ufo = FontClass()
ufo.newGlyph("a").appendAnchor({"name": "top", "x": 100, "y": 200})
liga = ufo.newGlyph("f_i")
liga.appendAnchor({"name": "top_1", "x": 100, "y": 500})
liga.appendAnchor({"name": "top_2", "x": 600, "y": 500})
ufo.newGlyph("acutecomb").appendAnchor({"name": "_top", "x": 100, "y": 200})
accent = ufo.newGlyph("tildecomb")
accent.appendAnchor({"name": "_top", "x": 100, "y": 200})
accent.appendAnchor({"name": "top", "x": 100, "y": 300})
return ufo
@pytest.mark.parametrize(
"input_expected",
[
("top", (False, "top", None)),
("top_", (False, "top_", None)),
("top1", (False, "top1", None)),
("_bottom", (True, "bottom", None)),
("bottom_2", (False, "bottom", 2)),
("top_right_1", (False, "top_right", 1)),
],
)
def test_parseAnchorName(input_expected):
anchorName, (isMark, key, number) = input_expected
assert parseAnchorName(anchorName) == (isMark, key, number)
def test_parseAnchorName_invalid():
with pytest.raises(ValueError, match="mark anchor cannot be numbered"):
parseAnchorName("_top_2")
with pytest.raises(ValueError, match="mark anchor key is nil"):
parseAnchorName("_")
def test_NamedAnchor_invalid():
with pytest.raises(ValueError, match="indexes must start from 1"):
NamedAnchor("top_0", 1, 2)
def test_NamedAnchor_repr():
expected = "NamedAnchor(name='top', x=1.0, y=2.0)"
assert repr(NamedAnchor("top", 1.0, 2.0)) == expected
class MarkFeatureWriterTest(FeatureWriterTest):
FeatureWriter = MarkFeatureWriter
def test__makeMarkClassDefinitions_empty(self, FontClass):
ufo = FontClass()
ufo.newGlyph("a").appendAnchor({"name": "top", "x": 250, "y": 500})
ufo.newGlyph("c").appendAnchor({"name": "bottom", "x": 250, "y": -100})
ufo.newGlyph("grave").appendAnchor({"name": "_top", "x": 100, "y": 200})
ufo.newGlyph("cedilla").appendAnchor({"name": "_bottom", "x": 100, "y": 0})
writer = MarkFeatureWriter()
feaFile = ast.FeatureFile()
writer.setContext(ufo, feaFile)
markClassDefs = writer._makeMarkClassDefinitions()
assert len(feaFile.markClasses) == 2
assert [str(mcd) for mcd in markClassDefs] == [
"markClass cedilla <anchor 100 0> @MC_bottom;",
"markClass grave <anchor 100 200> @MC_top;",
]
def test__makeMarkClassDefinitions_non_empty(self, FontClass):
ufo = FontClass()
ufo.newGlyph("a").appendAnchor({"name": "top", "x": 250, "y": 500})
ufo.newGlyph("c").appendAnchor({"name": "bottom", "x": 250, "y": -100})
ufo.newGlyph("grave").appendAnchor({"name": "_top", "x": 100, "y": 200})
ufo.newGlyph("cedilla").appendAnchor({"name": "_bottom", "x": 100, "y": 0})
ufo.features.text = dedent(
"""\
markClass cedilla <anchor 200 0> @MC_bottom;
markClass grave <anchor 100 200> @MC_top;
"""
)
writer = MarkFeatureWriter()
feaFile = parseLayoutFeatures(ufo)
writer.setContext(ufo, feaFile)
markClassDefs = writer._makeMarkClassDefinitions()
assert len(markClassDefs) == 1
assert len(feaFile.markClasses) == 3
assert "MC_bottom" in feaFile.markClasses
assert "MC_top" in feaFile.markClasses
assert [str(mcd) for mcd in markClassDefs] == [
"markClass cedilla <anchor 100 0> @MC_bottom_1;"
]
def test_skip_empty_feature(self, FontClass):
ufo = FontClass()
assert not self.writeFeatures(ufo)
ufo.newGlyph("a").appendAnchor({"name": "top", "x": 100, "y": 200})
ufo.newGlyph("acutecomb").appendAnchor({"name": "_top", "x": 100, "y": 200})
fea = str(self.writeFeatures(ufo))
assert "feature mark" in fea
assert "feature mkmk" not in fea
def test_skip_unnamed_anchors(self, FontClass, caplog):
caplog.set_level(logging.ERROR)
ufo = FontClass()
ufo.newGlyph("a").appendAnchor({"x": 100, "y": 200})
writer = MarkFeatureWriter()
feaFile = ast.FeatureFile()
logger = "ufo2ft.featureWriters.markFeatureWriter.MarkFeatureWriter"
with caplog.at_level(logging.WARNING, logger=logger):
writer.setContext(ufo, feaFile)
assert len(caplog.records) == 1
assert "unnamed anchor discarded in glyph 'a'" in caplog.text
def test_warn_duplicate_anchor_names(self, FontClass, caplog):
caplog.set_level(logging.ERROR)
ufo = FontClass()
ufo.newGlyph("a").anchors = [
{"name": "top", "x": 100, "y": 200},
{"name": "top", "x": 200, "y": 300},
]
writer = MarkFeatureWriter()
feaFile = ast.FeatureFile()
logger = "ufo2ft.featureWriters.markFeatureWriter.MarkFeatureWriter"
with caplog.at_level(logging.WARNING, logger=logger):
writer.setContext(ufo, feaFile)
assert len(caplog.records) == 1
assert "duplicate anchor 'top' in glyph 'a'" in caplog.text
def test_warn_liga_anchor_in_mark_glyph(self, testufo, caplog):
caplog.set_level(logging.ERROR)
testufo.newGlyph("ogonekcomb").anchors = [
{"name": "_top", "x": 200, "y": -40},
{"name": "top_1", "x": 200, "y": 450}, # should not be there!
]
logger = "ufo2ft.featureWriters.markFeatureWriter.MarkFeatureWriter"
with caplog.at_level(logging.WARNING, logger=logger):
_ = self.writeFeatures(testufo)
assert len(caplog.records) == 1
assert "invalid ligature anchor 'top_1' in mark glyph" in caplog.text
def test_ligature_NULL_anchor(self, testufo):
testufo.newGlyph("f_f_foo").anchors = [
{"name": "top_1", "x": 250, "y": 600},
{"name": "top_2", "x": 500, "y": 600},
{"name": "_3", "x": 0, "y": 0}, # this becomes <anchor NULL>
]
generated = self.writeFeatures(testufo)
assert re.search(r"ligComponent\s+<anchor NULL>", str(generated))
def test_skip_existing_feature(self, testufo):
testufo.features.text = dedent(
"""\
markClass acutecomb <anchor 100 200> @MC_top;
feature mark {
lookup mark1 {
pos base a
<anchor 100 200> mark @MC_top;
} mark1;
} mark;
"""
)
generated = self.writeFeatures(testufo)
# only mkmk is generated, mark was already present
assert str(generated) == dedent(
"""\
markClass tildecomb <anchor 100 200> @MC_top;
feature mkmk {
lookup mark2mark_top {
@MFS_mark2mark_top = [acutecomb tildecomb];
lookupflag UseMarkFilteringSet @MFS_mark2mark_top;
pos mark tildecomb
<anchor 100 300> mark @MC_top;
} mark2mark_top;
} mkmk;
"""
)
def test_append_feature(self, testufo):
testufo.features.text = dedent(
"""\
markClass acutecomb <anchor 100 200> @MC_top;
feature mark {
lookup mark1 {
pos base a
<anchor 100 200> mark @MC_top;
} mark1;
} mark;
"""
)
generated = self.writeFeatures(testufo, mode="append")
assert str(generated) == dedent(
"""\
markClass tildecomb <anchor 100 200> @MC_top;
feature mark {
lookup mark2base {
pos base a
<anchor 100 200> mark @MC_top;
} mark2base;
lookup mark2liga {
pos ligature f_i
<anchor 100 500> mark @MC_top
ligComponent
<anchor 600 500> mark @MC_top;
} mark2liga;
} mark;
feature mkmk {
lookup mark2mark_top {
@MFS_mark2mark_top = [acutecomb tildecomb];
lookupflag UseMarkFilteringSet @MFS_mark2mark_top;
pos mark tildecomb
<anchor 100 300> mark @MC_top;
} mark2mark_top;
} mkmk;
"""
)
def test_insert_comment_before(self, testufo):
writer = MarkFeatureWriter()
testufo.features.text = dedent(
"""\
markClass acutecomb <anchor 100 200> @MC_top;
feature mark {
#
# Automatic Code
#
lookup mark1 {
pos base a
<anchor 100 200> mark @MC_top;
} mark1;
} mark;
"""
)
feaFile = parseLayoutFeatures(testufo)
assert writer.write(testufo, feaFile)
assert str(feaFile) == dedent(
"""\
markClass acutecomb <anchor 100 200> @MC_top;
markClass tildecomb <anchor 100 200> @MC_top;
feature mark {
lookup mark2base {
pos base a
<anchor 100 200> mark @MC_top;
} mark2base;
lookup mark2liga {
pos ligature f_i
<anchor 100 500> mark @MC_top
ligComponent
<anchor 600 500> mark @MC_top;
} mark2liga;
} mark;
feature mark {
#
#
lookup mark1 {
pos base a
<anchor 100 200> mark @MC_top;
} mark1;
} mark;
feature mkmk {
lookup mark2mark_top {
@MFS_mark2mark_top = [acutecomb tildecomb];
lookupflag UseMarkFilteringSet @MFS_mark2mark_top;
pos mark tildecomb
<anchor 100 300> mark @MC_top;
} mark2mark_top;
} mkmk;
"""
)
# test append mode ignores insert marker
generated = self.writeFeatures(testufo, mode="append")
assert str(generated) == dedent(
"""\
markClass tildecomb <anchor 100 200> @MC_top;
feature mark {
lookup mark2base {
pos base a
<anchor 100 200> mark @MC_top;
} mark2base;
lookup mark2liga {
pos ligature f_i
<anchor 100 500> mark @MC_top
ligComponent
<anchor 600 500> mark @MC_top;
} mark2liga;
} mark;
feature mkmk {
lookup mark2mark_top {
@MFS_mark2mark_top = [acutecomb tildecomb];
lookupflag UseMarkFilteringSet @MFS_mark2mark_top;
pos mark tildecomb
<anchor 100 300> mark @MC_top;
} mark2mark_top;
} mkmk;
"""
)
def test_insert_comment_after(self, testufo):
writer = MarkFeatureWriter()
testufo.features.text = dedent(
"""\
markClass acutecomb <anchor 100 200> @MC_top;
feature mark {
lookup mark1 {
pos base a
<anchor 100 200> mark @MC_top;
} mark1;
#
# Automatic Code
#
} mark;
"""
)
feaFile = parseLayoutFeatures(testufo)
assert writer.write(testufo, feaFile)
assert str(feaFile) == dedent(
"""\
markClass acutecomb <anchor 100 200> @MC_top;
feature mark {
lookup mark1 {
pos base a
<anchor 100 200> mark @MC_top;
} mark1;
#
#
} mark;
markClass tildecomb <anchor 100 200> @MC_top;
feature mark {
lookup mark2base {
pos base a
<anchor 100 200> mark @MC_top;
} mark2base;
lookup mark2liga {
pos ligature f_i
<anchor 100 500> mark @MC_top
ligComponent
<anchor 600 500> mark @MC_top;
} mark2liga;
} mark;
feature mkmk {
lookup mark2mark_top {
@MFS_mark2mark_top = [acutecomb tildecomb];
lookupflag UseMarkFilteringSet @MFS_mark2mark_top;
pos mark tildecomb
<anchor 100 300> mark @MC_top;
} mark2mark_top;
} mkmk;
"""
)
# test append mode ignores insert marker
generated = self.writeFeatures(testufo, mode="append")
assert str(generated) == dedent(
"""\
markClass tildecomb <anchor 100 200> @MC_top;
feature mark {
lookup mark2base {
pos base a
<anchor 100 200> mark @MC_top;
} mark2base;
lookup mark2liga {
pos ligature f_i
<anchor 100 500> mark @MC_top
ligComponent
<anchor 600 500> mark @MC_top;
} mark2liga;
} mark;
feature mkmk {
lookup mark2mark_top {
@MFS_mark2mark_top = [acutecomb tildecomb];
lookupflag UseMarkFilteringSet @MFS_mark2mark_top;
pos mark tildecomb
<anchor 100 300> mark @MC_top;
} mark2mark_top;
} mkmk;
"""
)
def test_insert_comment_middle(self, testufo):
writer = MarkFeatureWriter()
testufo.features.text = dedent(
"""\
markClass acutecomb <anchor 100 200> @MC_top;
feature mark {
lookup mark1 {
pos base a
<anchor 100 200> mark @MC_top;
} mark1;
#
# Automatic Code
#
lookup mark2 {
pos base a
<anchor 150 250> mark @MC_top;
} mark2;
} mark;
"""
)
feaFile = parseLayoutFeatures(testufo)
with pytest.raises(
InvalidFeaturesData,
match="Insert marker has rules before and after, feature mark "
"cannot be inserted.",
):
writer.write(testufo, feaFile)
# test append mode ignores insert marker
generated = self.writeFeatures(testufo, mode="append")
assert str(generated) == dedent(
"""\
markClass tildecomb <anchor 100 200> @MC_top;
feature mark {
lookup mark2base {
pos base a
<anchor 100 200> mark @MC_top;
} mark2base;
lookup mark2liga {
pos ligature f_i
<anchor 100 500> mark @MC_top
ligComponent
<anchor 600 500> mark @MC_top;
} mark2liga;
} mark;
feature mkmk {
lookup mark2mark_top {
@MFS_mark2mark_top = [acutecomb tildecomb];
lookupflag UseMarkFilteringSet @MFS_mark2mark_top;
pos mark tildecomb
<anchor 100 300> mark @MC_top;
} mark2mark_top;
} mkmk;
"""
)
def test_insert_comment_outside_block(self, testufo):
writer = MarkFeatureWriter()
testufo.features.text = dedent(
"""\
#
# Automatic Code
#
"""
)
feaFile = parseLayoutFeatures(testufo)
assert writer.write(testufo, feaFile)
testufo.features.text = dedent(
"""\
#
# Automatic Code
#
markClass acutecomb <anchor 100 200> @MC_top;
feature mark {
lookup mark1 {
pos base a
<anchor 100 200> mark @MC_top;
} mark1;
} mark;
"""
)
feaFile = parseLayoutFeatures(testufo)
assert writer.write(testufo, feaFile)
# test append mode
writer = MarkFeatureWriter(mode="append")
assert writer.write(testufo, feaFile)
def test_defs_and_lookups_first(self, testufo):
testufo.newGlyph("circumflexcomb")
writer = MarkFeatureWriter()
testufo.features.text = dedent(
"""\
feature mkmk {
# Automatic Code
# Move acutecomb down and right if preceded by circumflexcomb
lookup move_acutecomb {
lookupflag UseMarkFilteringSet [acutecomb circumflexcomb];
pos circumflexcomb acutecomb' <0 20 0 20>;
} move_acutecomb;
} mkmk;
"""
)
feaFile = parseLayoutFeatures(testufo)
assert writer.write(testufo, feaFile)
assert str(feaFile) == dedent(
"""\
markClass acutecomb <anchor 100 200> @MC_top;
markClass tildecomb <anchor 100 200> @MC_top;
feature mark {
lookup mark2base {
pos base a
<anchor 100 200> mark @MC_top;
} mark2base;
lookup mark2liga {
pos ligature f_i
<anchor 100 500> mark @MC_top
ligComponent
<anchor 600 500> mark @MC_top;
} mark2liga;
} mark;
feature mkmk {
lookup mark2mark_top {
@MFS_mark2mark_top = [acutecomb tildecomb];
lookupflag UseMarkFilteringSet @MFS_mark2mark_top;
pos mark tildecomb
<anchor 100 300> mark @MC_top;
} mark2mark_top;
} mkmk;
feature mkmk {
# Move acutecomb down and right if preceded by circumflexcomb
lookup move_acutecomb {
lookupflag UseMarkFilteringSet [acutecomb circumflexcomb];
pos circumflexcomb acutecomb' <0 20 0 20>;
} move_acutecomb;
} mkmk;
"""
)
def test_mark_mkmk_features(self, testufo):
writer = MarkFeatureWriter() # by default both mark + mkmk are built
feaFile = ast.FeatureFile()
assert writer.write(testufo, feaFile)
assert str(feaFile) == dedent(
"""\
markClass acutecomb <anchor 100 200> @MC_top;
markClass tildecomb <anchor 100 200> @MC_top;
feature mark {
lookup mark2base {
pos base a
<anchor 100 200> mark @MC_top;
} mark2base;
lookup mark2liga {
pos ligature f_i
<anchor 100 500> mark @MC_top
ligComponent
<anchor 600 500> mark @MC_top;
} mark2liga;
} mark;
feature mkmk {
lookup mark2mark_top {
@MFS_mark2mark_top = [acutecomb tildecomb];
lookupflag UseMarkFilteringSet @MFS_mark2mark_top;
pos mark tildecomb
<anchor 100 300> mark @MC_top;
} mark2mark_top;
} mkmk;
"""
)
def test_write_only_one(self, testufo):
writer = MarkFeatureWriter(features=["mkmk"]) # only builds "mkmk"
feaFile = ast.FeatureFile()
assert writer.write(testufo, feaFile)
fea = str(feaFile)
assert "feature mark" not in fea
assert "feature mkmk" in fea
writer = MarkFeatureWriter(features=["mark"]) # only builds "mark"
feaFile = ast.FeatureFile()
assert writer.write(testufo, feaFile)
fea = str(feaFile)
assert "feature mark" in fea
assert "feature mkmk" not in fea
def test_predefined_anchor_lists(self, FontClass):
"""Roboto uses some weird anchor naming scheme, see:
https://github.com/google/roboto/blob/
5700de83856781fa0c097a349e46dbaae5792cb0/
scripts/lib/fontbuild/markFeature.py#L41-L47
"""
class RobotoMarkFeatureWriter(MarkFeatureWriter):
class NamedAnchor(NamedAnchor):
markPrefix = "_mark"
ignoreRE = "(^mkmk|_acc$)"
ufo = FontClass()
a = ufo.newGlyph("a")
a.anchors = [
{"name": "top", "x": 250, "y": 600},
{"name": "bottom", "x": 250, "y": -100},
]
f_i = ufo.newGlyph("f_i")
f_i.anchors = [
{"name": "top_1", "x": 200, "y": 700},
{"name": "top_2", "x": 500, "y": 700},
]
gravecomb = ufo.newGlyph("gravecomb")
gravecomb.anchors = [
{"name": "_marktop", "x": 160, "y": 780},
{"name": "mkmktop", "x": 150, "y": 800},
{"name": "mkmkbottom_acc", "x": 150, "y": 600},
]
ufo.newGlyph("cedillacomb").appendAnchor(
{"name": "_markbottom", "x": 200, "y": 0}
)
ufo.newGlyph("ogonekcomb").appendAnchor({"name": "_bottom", "x": 180, "y": -10})
writer = RobotoMarkFeatureWriter()
feaFile = ast.FeatureFile()
writer.write(ufo, feaFile)
assert str(feaFile) == dedent(
"""\
markClass cedillacomb <anchor 200 0> @MC_markbottom;
markClass gravecomb <anchor 160 780> @MC_marktop;
feature mark {
lookup mark2base {
pos base a
<anchor 250 -100> mark @MC_markbottom
<anchor 250 600> mark @MC_marktop;
} mark2base;
lookup mark2liga {
pos ligature f_i
<anchor 200 700> mark @MC_marktop
ligComponent
<anchor 500 700> mark @MC_marktop;
} mark2liga;
} mark;
feature mkmk {
lookup mark2mark_bottom {
@MFS_mark2mark_bottom = [cedillacomb gravecomb];
lookupflag UseMarkFilteringSet @MFS_mark2mark_bottom;
pos mark gravecomb
<anchor 150 600> mark @MC_markbottom;
} mark2mark_bottom;
lookup mark2mark_top {
@MFS_mark2mark_top = [gravecomb];
lookupflag UseMarkFilteringSet @MFS_mark2mark_top;
pos mark gravecomb
<anchor 150 800> mark @MC_marktop;
} mark2mark_top;
} mkmk;
""" # noqa: B950
)
def test_abvm_blwm_features(self, FontClass):
ufo = FontClass()
ufo.info.unitsPerEm = 1000
dottedCircle = ufo.newGlyph("dottedCircle")
dottedCircle.unicode = 0x25CC
dottedCircle.anchors = [
{"name": "top", "x": 297, "y": 552},
{"name": "topright", "x": 491, "y": 458},
{"name": "bottom", "x": 297, "y": 0},
]
nukta = ufo.newGlyph("nukta-kannada")
nukta.unicode = 0x0CBC
nukta.appendAnchor({"name": "_bottom", "x": 0, "y": 0})
nukta = ufo.newGlyph("candrabindu-kannada")
nukta.unicode = 0x0C81
nukta.appendAnchor({"name": "_top", "x": 0, "y": 547})
halant = ufo.newGlyph("halant-kannada")
halant.unicode = 0x0CCD
halant.appendAnchor({"name": "_topright", "x": -456, "y": 460})
ka = ufo.newGlyph("ka-kannada")
ka.unicode = 0x0C95
ka.appendAnchor({"name": "bottom", "x": 290, "y": 0})
ka_base = ufo.newGlyph("ka-kannada.base")
ka_base.appendAnchor({"name": "top", "x": 291, "y": 547})
ka_base.appendAnchor({"name": "topright", "x": 391, "y": 460})
ka_base.appendAnchor({"name": "bottom", "x": 290, "y": 0})
ufo.features.text = dedent(
"""\
languagesystem DFLT dflt;
languagesystem knda dflt;
languagesystem knd2 dflt;
feature psts {
sub ka-kannada' halant-kannada by ka-kannada.base;
} psts;
"""
)
generated = self.writeFeatures(ufo)
assert str(generated) == dedent(
"""\
markClass nukta-kannada <anchor 0 0> @MC_bottom;
markClass candrabindu-kannada <anchor 0 547> @MC_top;
markClass halant-kannada <anchor -456 460> @MC_topright;
feature abvm {
lookup abvm_mark2base {
pos base ka-kannada.base
<anchor 291 547> mark @MC_top
<anchor 391 460> mark @MC_topright;
} abvm_mark2base;
} abvm;
feature blwm {
lookup blwm_mark2base {
pos base ka-kannada
<anchor 290 0> mark @MC_bottom;
pos base ka-kannada.base
<anchor 290 0> mark @MC_bottom;
} blwm_mark2base;
} blwm;
feature mark {
lookup mark2base {
pos base dottedCircle
<anchor 297 0> mark @MC_bottom
<anchor 297 552> mark @MC_top
<anchor 491 458> mark @MC_topright;
} mark2base;
} mark;
""" # noqa: B950
)
def test_all_features(self, testufo):
ufo = testufo
ufo.info.unitsPerEm = 1000
ufo.newGlyph("cedillacomb").anchors = [
{"name": "_bottom", "x": 10, "y": -5},
{"name": "bottom", "x": 20, "y": -309},
]
ufo.newGlyph("c").appendAnchor({"name": "bottom", "x": 240, "y": 0})
dottedCircle = ufo.newGlyph("dottedCircle")
dottedCircle.unicode = 0x25CC
dottedCircle.anchors = [
{"name": "top", "x": 297, "y": 552},
{"name": "bottom", "x": 297, "y": 0},
{"name": "bar", "x": 491, "y": 458},
]
# too lazy, couldn't come up with a real-word example :/
foocomb = ufo.newGlyph("foocomb")
foocomb.unicode = 0x0B85
foocomb.anchors = [
{"name": "_top", "x": 100, "y": 40},
{"name": "top", "x": 100, "y": 190},
]
barcomb = ufo.newGlyph("barcomb")
barcomb.unicode = 0x0B86
barcomb.anchors = [
{"name": "_bar", "x": 100, "y": 40},
{"name": "bar", "x": 100, "y": 440.1},
]
bazcomb = ufo.newGlyph("bazcomb")
bazcomb.unicode = 0x0B87
bazcomb.anchors = [
{"name": "_bottom", "x": 90, "y": 320},
{"name": "bottom", "x": 100, "y": -34},
]
foo_bar_baz = ufo.newGlyph("foo_bar_baz")
foo_bar_baz.unicode = 0x0B88
foo_bar_baz.anchors = [
{"name": "top_1", "x": 100, "y": 500},
{"name": "bottom_1", "x": 100, "y": 10},
{"name": "_2", "x": 600, "y": 500},
{"name": "top_3", "x": 1000, "y": 500},
{"name": "bar_3", "x": 1100, "y": 499}, # below half UPEM
]
bar_foo = ufo.newGlyph("bar_foo")
bar_foo.unicode = 0x0B89
# sequence doesn't start from 1, the first is implied NULL anchor
bar_foo.anchors = [{"name": "top_2", "x": 600, "y": 501}]
testufo.glyphOrder = [
"a",
"f_i",
"acutecomb",
"tildecomb",
"cedillacomb",
"c",
"dottedCircle",
"foocomb",
"barcomb",
"bazcomb",
"foo_bar_baz",
"bar_foo",
]
generated = self.writeFeatures(testufo)
assert str(generated) == dedent(
"""\
markClass barcomb <anchor 100 40> @MC_bar;
markClass cedillacomb <anchor 10 -5> @MC_bottom;
markClass bazcomb <anchor 90 320> @MC_bottom;
markClass acutecomb <anchor 100 200> @MC_top;
markClass tildecomb <anchor 100 200> @MC_top;
markClass foocomb <anchor 100 40> @MC_top;
feature abvm {
lookup abvm_mark2liga {
pos ligature foo_bar_baz
<anchor 100 500> mark @MC_top
ligComponent
<anchor NULL>
ligComponent
<anchor 1100 499> mark @MC_bar
<anchor 1000 500> mark @MC_top;
pos ligature bar_foo
<anchor NULL>
ligComponent
<anchor 600 501> mark @MC_top;
} abvm_mark2liga;
lookup abvm_mark2mark_bar {
@MFS_abvm_mark2mark_bar = [barcomb];
lookupflag UseMarkFilteringSet @MFS_abvm_mark2mark_bar;
pos mark barcomb
<anchor 100 440> mark @MC_bar;
} abvm_mark2mark_bar;
lookup abvm_mark2mark_top {
@MFS_abvm_mark2mark_top = [foocomb];
lookupflag UseMarkFilteringSet @MFS_abvm_mark2mark_top;
pos mark foocomb
<anchor 100 190> mark @MC_top;
} abvm_mark2mark_top;
} abvm;
feature blwm {
lookup blwm_mark2liga {
pos ligature foo_bar_baz
<anchor 100 10> mark @MC_bottom
ligComponent
<anchor NULL>
ligComponent
<anchor NULL>;
} blwm_mark2liga;
lookup blwm_mark2mark_bottom {
@MFS_blwm_mark2mark_bottom = [bazcomb];
lookupflag UseMarkFilteringSet @MFS_blwm_mark2mark_bottom;
pos mark bazcomb
<anchor 100 -34> mark @MC_bottom;
} blwm_mark2mark_bottom;
} blwm;
feature mark {
lookup mark2base {
pos base a
<anchor 100 200> mark @MC_top;
pos base c
<anchor 240 0> mark @MC_bottom;
pos base dottedCircle
<anchor 491 458> mark @MC_bar
<anchor 297 0> mark @MC_bottom
<anchor 297 552> mark @MC_top;
} mark2base;
lookup mark2liga {
pos ligature f_i
<anchor 100 500> mark @MC_top
ligComponent
<anchor 600 500> mark @MC_top;
} mark2liga;
} mark;
feature mkmk {
lookup mark2mark_bottom {
@MFS_mark2mark_bottom = [cedillacomb];
lookupflag UseMarkFilteringSet @MFS_mark2mark_bottom;
pos mark cedillacomb
<anchor 20 -309> mark @MC_bottom;
} mark2mark_bottom;
lookup mark2mark_top {
@MFS_mark2mark_top = [acutecomb tildecomb];
lookupflag UseMarkFilteringSet @MFS_mark2mark_top;
pos mark tildecomb
<anchor 100 300> mark @MC_top;
} mark2mark_top;
} mkmk;
""" # noqa: B950
)
def test_mark_mkmk_features_with_GDEF(self, testufo):
D = testufo.newGlyph("D")
D.anchors = [
{"name": "top", "x": 300, "y": 700},
{"name": "center", "x": 320, "y": 360},
]
# these glyphs have compatible anchors but since they not listed in
# the GDEF groups, they won't be included in the mark/mkmk feature
testufo.newGlyph("Alpha").appendAnchor({"name": "topleft", "x": -10, "y": 400})
testufo.newGlyph("psili").appendAnchor({"name": "_topleft", "x": 0, "y": 50})
dotaccentcomb = testufo.newGlyph("dotaccentcomb")
# this mark glyph has more than one mark anchor, and both will be
# generated. Since the two mark anchors cannot cohabit in the same
# mark lookup, two lookups will be generated.
dotaccentcomb.anchors = [
{"name": "_center", "x": 0, "y": 0},
{"name": "_top", "x": 0, "y": 0},
{"name": "top", "x": 0, "y": 300},
]
testufo.features.text = dedent(
"""\
@Bases = [a D];
@Marks = [acutecomb tildecomb dotaccentcomb];
table GDEF {
GlyphClassDef @Bases, [f_i], @Marks, ;
} GDEF;
"""
)
testufo.glyphOrder = [
"Alpha",
"D",
"a",
"acutecomb",
"dotaccentcomb",
"f_i",
"psili",
"tildecomb",
]
generated = self.writeFeatures(testufo)
assert str(generated) == dedent(
"""\
markClass dotaccentcomb <anchor 0 0> @MC_center;
markClass acutecomb <anchor 100 200> @MC_top;
markClass dotaccentcomb <anchor 0 0> @MC_top;
markClass tildecomb <anchor 100 200> @MC_top;
feature mark {
lookup mark2base {
pos base D
<anchor 320 360> mark @MC_center;
} mark2base;
lookup mark2base_1 {
pos base D
<anchor 300 700> mark @MC_top;
pos base a
<anchor 100 200> mark @MC_top;
} mark2base_1;
lookup mark2liga {
pos ligature f_i
<anchor 100 500> mark @MC_top
ligComponent
<anchor 600 500> mark @MC_top;
} mark2liga;
} mark;
feature mkmk {
lookup mark2mark_top {
@MFS_mark2mark_top = [acutecomb dotaccentcomb tildecomb];
lookupflag UseMarkFilteringSet @MFS_mark2mark_top;
pos mark dotaccentcomb
<anchor 0 300> mark @MC_top;
pos mark tildecomb
<anchor 100 300> mark @MC_top;
} mark2mark_top;
} mkmk;
"""
)
def test_mark_mkmk_features_with_GDEF_and_openTypeCategories(self, testufo):
# this glyph has compatible anchors and has an openTypeCategories "base"
# value
D = testufo.newGlyph("D")
D.anchors = [
{"name": "top", "x": 300, "y": 700},
{"name": "center", "x": 320, "y": 360},
]
# these glyphs have compatible anchors but since they not listed in
# the GDEF groups, they won't be included in the mark/mkmk feature
testufo.newGlyph("Alpha").appendAnchor({"name": "topleft", "x": -10, "y": 400})
testufo.newGlyph("psili").appendAnchor({"name": "_topleft", "x": 0, "y": 50})
dotaccentcomb = testufo.newGlyph("dotaccentcomb")
# this mark glyph has more than one mark anchor, and both will be
# generated. Since the two mark anchors cannot cohabit in the same
# mark lookup, two lookups will be generated.
dotaccentcomb.anchors = [
{"name": "_center", "x": 0, "y": 0},
{"name": "_top", "x": 0, "y": 0},
{"name": "top", "x": 0, "y": 300},
]
# will be ignored because in GDEF table below
testufo.lib["public.openTypeCategories"] = {
"D": "base",
"dotaccentcomb": "mark",
"tildecomb": "base",
}
testufo.features.text = dedent(
"""\
@Bases = [a];
@Marks = [acutecomb tildecomb];
table GDEF {
GlyphClassDef @Bases, [f_i], @Marks, ;
} GDEF;
"""
)
testufo.glyphOrder = [
"Alpha",
"D",
"a",
"acutecomb",
"dotaccentcomb",
"f_i",
"psili",
"tildecomb",
]
generated = self.writeFeatures(testufo)
assert str(generated) == dedent(
"""\
markClass acutecomb <anchor 100 200> @MC_top;
markClass tildecomb <anchor 100 200> @MC_top;
feature mark {
lookup mark2base {
pos base a
<anchor 100 200> mark @MC_top;
} mark2base;
lookup mark2liga {
pos ligature f_i
<anchor 100 500> mark @MC_top
ligComponent
<anchor 600 500> mark @MC_top;
} mark2liga;
} mark;
feature mkmk {
lookup mark2mark_top {
@MFS_mark2mark_top = [acutecomb tildecomb];
lookupflag UseMarkFilteringSet @MFS_mark2mark_top;
pos mark tildecomb
<anchor 100 300> mark @MC_top;
} mark2mark_top;
} mkmk;
"""
)
def test_multiple_anchor_classes_base(self, FontClass):
dirname = os.path.dirname(os.path.dirname(__file__))
fontPath = os.path.join(dirname, "data", "MultipleAnchorClasses.ufo")
testufo = FontClass(fontPath)
generated = self.writeFeatures(testufo)
assert str(generated) == dedent(
"""\
markClass acutecomb <anchor -175 589> @MC_topA;
markClass acutecomb <anchor -175 572> @MC_topE;
feature mark {
lookup mark2base {
pos base a
<anchor 515 581> mark @MC_topA;
} mark2base;
lookup mark2base_1 {
pos base e
<anchor -21 396> mark @MC_topE;
} mark2base_1;
} mark;
"""
)
def test_multiple_anchor_classes_liga(self, FontClass):
ufo = FontClass()
liga = ufo.newGlyph("f_i")
liga.appendAnchor({"name": "top_1", "x": 100, "y": 500})
liga.appendAnchor({"name": "top_2", "x": 600, "y": 500})
ligaOther = ufo.newGlyph("f_f")
ligaOther.appendAnchor({"name": "topOther_1", "x": 101, "y": 501})
ligaOther.appendAnchor({"name": "topOther_2", "x": 601, "y": 501})
ligaMix = ufo.newGlyph("f_l")
ligaMix.appendAnchor({"name": "top_1", "x": 102, "y": 502})
ligaMix.appendAnchor({"name": "topOther_2", "x": 602, "y": 502})
acutecomb = ufo.newGlyph("acutecomb")
acutecomb.appendAnchor({"name": "_top", "x": 100, "y": 200})
acutecomb.appendAnchor({"name": "_topOther", "x": 150, "y": 250})
generated = self.writeFeatures(ufo)
# MC_top should be last thanks to the anchorSortKey
assert str(generated) == dedent(
"""\
markClass acutecomb <anchor 100 200> @MC_top;
markClass acutecomb <anchor 150 250> @MC_topOther;
feature mark {
lookup mark2liga {
pos ligature f_f
<anchor 101 501> mark @MC_topOther
ligComponent
<anchor 601 501> mark @MC_topOther;
pos ligature f_l
<anchor NULL>
ligComponent
<anchor 602 502> mark @MC_topOther;
} mark2liga;
lookup mark2liga_1 {
pos ligature f_i
<anchor 100 500> mark @MC_top
ligComponent
<anchor 600 500> mark @MC_top;
pos ligature f_l
<anchor 102 502> mark @MC_top
ligComponent
<anchor NULL>;
} mark2liga_1;
} mark;
"""
)
def test_multiple_anchor_classes_conflict_warning(self, FontClass, caplog):
"""Check that when there is an ambiguity in the form of one base glyph
and one mark glyph being able to be linked through two different
anchor pairs, the mark feature writer emits a warning about the
situation but still outputs a valid feature declaraction. The last
lookup in that feature declaration will "win" and determine the outcome
of mark positioning. See this comment for more information:
https://github.com/googlefonts/ufo2ft/pull/416#issuecomment-721693266
"""
caplog.set_level(logging.INFO)
ufo = FontClass()
liga = ufo.newGlyph("a")
liga.appendAnchor({"name": "top", "x": 100, "y": 500})
liga.appendAnchor({"name": "topOther", "x": 150, "y": 550})
acutecomb = ufo.newGlyph("acutecomb")
acutecomb.appendAnchor({"name": "_top", "x": 100, "y": 200})
acutecomb.appendAnchor({"name": "_topOther", "x": 150, "y": 250})
generated = self.writeFeatures(ufo)
assert (
"The base glyph a and mark glyph acutecomb are ambiguously "
"connected by several anchor classes: MC_topOther, MC_top. "
"The last one will prevail." in caplog.text
)
# MC_top should be last thanks to the anchorSortKey
assert str(generated) == dedent(
"""\
markClass acutecomb <anchor 100 200> @MC_top;
markClass acutecomb <anchor 150 250> @MC_topOther;
feature mark {
lookup mark2base {
pos base a
<anchor 150 550> mark @MC_topOther;
} mark2base;
lookup mark2base_1 {
pos base a
<anchor 100 500> mark @MC_top;
} mark2base_1;
} mark;
"""
)
def test_skipExportGlyphs(self, testufo):
testufo.lib["public.skipExportGlyphs"] = ["f_i", "tildecomb"]
testufo.glyphOrder = ["a", "f_i", "acutecomb", "tildcomb"]
generated = self.writeFeatures(testufo)
assert str(generated) == dedent(
"""\
markClass acutecomb <anchor 100 200> @MC_top;
feature mark {
lookup mark2base {
pos base a
<anchor 100 200> mark @MC_top;
} mark2base;
} mark;
"""
)
def test_quantize(self, testufo):
testufo.newGlyph("ogonekcomb").anchors = [
{"name": "_top", "x": 236, "y": 188},
]
testufo.lib["public.skipExportGlyphs"] = ["f_i", "tildecomb"]
generated = self.writeFeatures(testufo, quantization=50)
assert str(generated) == dedent(
"""\
markClass acutecomb <anchor 100 200> @MC_top;
markClass ogonekcomb <anchor 250 200> @MC_top;
feature mark {
lookup mark2base {
pos base a
<anchor 100 200> mark @MC_top;
} mark2base;
} mark;
"""
)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(sys.argv))
|
|
#! python
# -*- coding: utf-8 -*-
"""
WavyTool is a simple program that allows you to acquire data from input devices,
i.e microphones, and save them as file (csv, png). Also, you can perform
some simple processing as spectral analysis.
:authors: Daniel Cosmo Pizetta, Wesley Daflita
:contact: daniel.pizetta@usp.br, wesley.daflita@usp.br
:since: 2015/02/27
"""
import collections
import json
import logging
import os
import sys
import time
import urllib.request
import numpy as np
# QtPy must be imported before pyqtgraph
from qtpy.QtCore import QTimer
from qtpy.QtGui import QPixmap
from qtpy.QtWidgets import (QApplication, QFileDialog, QMainWindow,
QMessageBox, QSplashScreen)
# Must be set to the same binding here
api_names = {'pyqt5': 'PyQt5', 'pyside2': 'PySide2', 'pyqt4': 'PyQt4', 'pyside': 'PySide'}
os.environ['PYQTGRAPH_QT_LIB'] = api_names[os.environ['QT_API']]
# PyQtGraph must be imported after QtPy
import pyqtgraph as pg
# Then import the own interface
from wavytool import __version__ as version
from wavytool import app_name
from wavytool.core_wavy import AudioRecord
from wavytool.gui_wav2dat import ConvertWave2Data
from wavytool.mw_wavy import Ui_MainWindow
logging.basicConfig(level=logging.DEBUG)
# Informing about used binding
logging.info('Using Qt binding (QtPy/PyQtGraph): %s', (os.environ['QT_API'],
os.environ['PYQTGRAPH_QT_LIB']))
about = ("<h3>{} v.{}</h3>"
"<p>© Daniel C. Pizetta, Wesley Daflita<br/>"
"Sao Carlos Institute of Physics<br/>"
"University of Sao Paulo<br/>"
"<a href='https://github.com/dpizetta/wavy'>WavyTool on GitHub</a><br/>"
"<a href='https://pypi.org/project/wavytool'>WavyTool on PyPI</a><br/>"
"<a href='http://choosealicense.com/licenses/mit'>MIT License</a><br/></p>").format(app_name, version)
def main():
"""The main function."""
args = sys.argv[1:]
wavy = QApplication(args)
wavy.setApplicationVersion(version)
wavy.setApplicationName(app_name)
wavy.setOrganizationName("Sao Carlos Institute of Physics - University of Sao Paulo")
wavy.setOrganizationDomain("www.ifsc.usp.br")
try:
import qdarkstyle
except ImportError:
logging.warning("No dark theme installed, use 'pip install qdarkstyle' to install.")
else:
try:
wavy.setStyleSheet(qdarkstyle.load_stylesheet_from_environment())
except Exception as err:
logging.warning("Problems using qdarkstyle.\nError: %s", str(err))
pixmap = QPixmap("wavytool/images/symbol.png")
splash = QSplashScreen(pixmap)
start = time.time()
splash.show()
splash.repaint()
splash.showMessage("Loading...")
wavy.processEvents()
while time.time() - start < 1:
time.sleep(0.001)
wavy.processEvents()
splash.showMessage("Starting...")
window = MainWindow()
window.showMaximized()
splash.finish(window)
try:
with open('wavytool.config', 'r') as json_file:
data = json.load(json_file)
window.base_path = data['data_folder']
logging.info('Data folder is: %s', window.base_path)
except IOError:
window.getDataFolder()
return wavy.exec_()
class GlobalBuffer():
"""Allows real-time data transfer between plots."""
def __init__(self, buffer_size=1024):
self.recording = False
self.buffer_size = buffer_size
self.data = np.empty(self.buffer_size)
self.counter = 0
self.timestamp = 0
self.time_limit = 0
def startRecording(self):
self.timestamp = time.time()
self.recording = True
def stopRecording(self):
self.timestamp = 0
self.recording = False
self.counter = 0
def clear(self):
tmp = self.data
self.data[:self.buffer_size] = tmp
self.counter = 0
global_buffer = GlobalBuffer()
class RecordingPlotter(pg.PlotWidget):
"""Plots sub data from real time plotter.
Parameters:
sample_interval (float): sample interval. Default 0.02 seconds.
time_window (float): size (in time) for the main window. Default 20 seconds.
main_window (MainWindow): main_window.
parent (QWidget): parent.
"""
def __init__(self, sample_interval=0.02, time_window=20., main_window=None, parent=None):
super(RecordingPlotter, self).__init__(parent)
self.sample_interval = sample_interval
self.time_window = time_window
self.showGrid(x=True, y=True)
self.setLabel('top', 'Recorded data')
self.setLabel('left', 'Amplitude', 'V')
self.setLabel('bottom', 'Time', 's')
self.curve = None
self.main_window = main_window
global global_buffer
def initData(self):
# Forces update at 20 FPS, shouldn't be taxing to most systems
self._interval = int(1 / 20 * 1000)
self._bufsize = int(self.time_window / self.sample_interval)
self.x = np.linspace(0.0, self.time_window, self._bufsize)
self.setDownsampling(mode='peak')
self.databuffer = collections.deque([0.0] * self._bufsize, self._bufsize)
self.setClipToView(True)
self.data = np.empty(5)
self.ptr = 0
self.counter = 0
self.curve = self.plot(self.x[:self.ptr], self.data[:self.ptr], antialias=True)
self.timer = QTimer()
self.timer.timeout.connect(self.updateplot)
self.timer.start(self._interval)
def setSampleInterval(self, sample_interval):
self.sample_interval = sample_interval
def setTimeWindow(self, time_window):
self.time_window = time_window
self.curve.clear()
self.initData()
def getdata(self):
if global_buffer.time_limit != 0 and self.x[self.ptr] >= global_buffer.time_limit:
# TODO: this is not a good way to stop because you need the parent,
# and the parents stop method calls your methods.
# We need to thing about something different here.
self.main_window.stop()
while (self.counter > global_buffer.counter and self.counter > 0):
self.counter -= 1
return global_buffer.data[(self.counter % global_buffer.buffer_size)]
def updateplot(self):
"""Update plot."""
self.data[self.ptr] = self.getdata()
self.x[self.ptr + 1] = self.x[self.ptr] + self.sample_interval
self.ptr += 1
self.counter += 1
if self.ptr >= self.data.shape[0]:
tmp = self.data
xtmp = self.x
self.data = np.empty(self.data.shape[0] + 5)
self.x = np.empty(self.x.shape[0] + 5)
self.data[:tmp.shape[0]] = tmp
self.x[:xtmp.shape[0]] = xtmp
self.curve.setData(self.x[:self.ptr], self.data[:self.ptr])
def setCurveColor(self, r, g, b):
"""Set curve color"""
self.curve.setPen(pg.mkPen(color=(r, g, b)))
class RealTimeRecordingPlotter(pg.PlotWidget):
"""Plots data (audio) in real time.
Parameters:
sample_interval (float): sample interval. Default 0.02 seconds.
time_window (float): size (in time) for the main window. Default 20 seconds.
main_window (MainWindow): main_window.
parent (QWidget): parent.
"""
def __init__(self, sample_interval=0.02, time_window=20., parent=None):
super(RealTimeRecordingPlotter, self).__init__(parent)
self.sample_interval = sample_interval
self.time_window = time_window
self.showGrid(x=True, y=True)
self.setLabel('top', 'Input Real Time')
self.setLabel('left', 'Amplitude', 'V')
self.setLabel('bottom', 'Time', 's')
self.curve = None
global global_buffer
def initData(self):
"""Initialize data for for plotting."""
# self.sample_interval = 0.01
# Forces update at 20 FPS, shouldn't be taxing to most systems
self._interval = int(1 / 20 * 1000)
self._bufsize = int(self.time_window / self.sample_interval)
self.databuffer = collections.deque([0.0] * self._bufsize, self._bufsize)
self.x = np.linspace(-self.time_window, 0.0, self._bufsize)
self.y = np.zeros(self._bufsize, dtype=np.float)
# Initializes audio listener
self.audio = AudioRecord("output.wav", self.sample_interval)
try:
self.audio.begin_audio()
except IOError as e:
QMessageBox.information(self,
self.tr('Information'),
self.tr('No input device found, please make sure to plug it before open the '
'program. Please, restart the program and try again.\n{}'.format(e)),
QMessageBox.Ok)
exit(1)
# :TODO: needs to be separated the interval of plotting data from the acquire data.
# Initializes the timer
self.timer = QTimer()
self.timer.timeout.connect(self.updateplot)
self.timer.start(self._interval)
# Plot for the first time
self.curve = self.plot(self.x, self.y, pen=(0, 255, 255), antialias=True)
self.curve.clear()
def setSampleInterval(self, sample_interval):
"""Sets the sample interval for plotting.
Parameters:
sample_interval (float): sample interval in seconds
"""
self.sample_interval = sample_interval
self.curve.clear()
self.initData()
def setTimeWindow(self, time_window):
"""Sets the time window for plotting.
Parameters:
time_window (float): size (in time) for the main window, in seconds.
"""
self.time_window = time_window
self.curve.clear()
self.initData()
def getdata(self):
"""Gets data for plotting."""
b = self.audio.get_data_from_audio()[1]
new = b[0]
# This clipping of the signal prevents pyqtgraph from breaking due
# to large random noise when some soundcards are initiated.
# Prevents input overflow when program starts.
if new > 1e+150:
new = 1e+150
if global_buffer.recording is True:
global_buffer.counter += 1
if global_buffer.counter >= global_buffer.buffer_size:
global_buffer.clear()
global_buffer.data[global_buffer.counter] = new
return new
def updateplot(self):
"""Update plot."""
stp = self.getdata()
self.databuffer.append(stp)
self.y[:] = self.databuffer
self.curve.setData(self.x, self.y)
class MainWindow(QMainWindow):
"""Main window class.
Parameters:
parent (QWidget): parent
"""
def __init__(self, parent=None):
global global_buffer
super(MainWindow, self).__init__(parent)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
# Check new version
self.setWindowTitle(app_name + ' ' + version)
self.update = "NOT CHECKED ..."
self.checkUpdate()
self.filepath = ""
# Initial state is none because there is no data acquired yet
self.isSaved = None
# Sample interval should be 0.02s to not overflow in XP
self.ui.doubleSpinBoxSampleInterval.setMinimum(0.02)
self.ui.doubleSpinBoxSampleInterval.setMaximum(0.5)
self.ui.doubleSpinBoxSampleInterval.setValue(0.02)
self.ui.doubleSpinBoxSampleInterval.setSingleStep(0.01)
# Connecting actions
# File actions
# self.ui.actionNew.triggered.connect(self.newFile)
# For now it cannot open a file
# self.ui.actionOpen.triggered.connect(self.openFile)
# self.ui.actionSave.triggered.connect(self.saveFile)
self.ui.actionSave_As.triggered.connect(self.saveFileAs)
self.ui.actionSave_As.setEnabled(False)
self.ui.actionPrint_graph.triggered.connect(self.saveImageAs)
self.ui.actionPrint_graph.setEnabled(False)
# Acquire actions
self.ui.actionRecord.triggered.connect(self.record)
self.ui.actionRecord.setCheckable(True)
self.ui.actionPause.triggered.connect(self.pause)
self.ui.actionPause.setCheckable(True)
self.ui.actionPause.setEnabled(False)
self.ui.actionStop.triggered.connect(self.stop)
self.ui.actionStop.setEnabled(False)
# Tools actions
self.ui.actionConvert_Wav_to_Dat.triggered.connect(self.callTools)
# Program actions
self.ui.actionQuit.triggered.connect(self.close)
self.ui.actionAbout_Wavy.triggered.connect(self.about)
# Plot widget
self.plot_widget = RealTimeRecordingPlotter(sample_interval=0.02, time_window=20.)
self.plot_widget.initData()
self.ui.gridLayout_2.addWidget(self.plot_widget, 0, 1)
self.plot_widget_rec = RecordingPlotter(sample_interval=0.02, time_window=5., main_window=self)
self.ui.gridLayout_2.addWidget(self.plot_widget_rec, 1, 1)
# Inputs
self.ui.doubleSpinBoxSampleInterval.valueChanged.connect(self.plot_widget.setSampleInterval)
self.ui.doubleSpinBoxSampleInterval.valueChanged.connect(self.plot_widget_rec.setSampleInterval)
self.ui.doubleSpinBoxSampleInterval.valueChanged.connect(self.setSampleRate)
# self.ui.doubleSpinBoxSampleRate.valueChanged.connect(self.setSampleInterval)
self.ui.spinBoxWindowTime.valueChanged.connect(self.plot_widget.setTimeWindow)
self.setSampleRate(self.ui.doubleSpinBoxSampleInterval.value())
def checkUpdate(self):
"""Check update from internet."""
url = 'https://api.github.com/repos/dpizetta/wavy/releases/latest'
try:
response = urllib.request.urlopen(url, timeout=20)
tag_version = json.loads(response.read())
except Exception:
pass
else:
if str(version) >= str(tag_version['tag_name'][1:]):
self.update = "Up to date!"
else:
self.update = "New version ({}) is available!".format(str(tag_version['tag_name']))
QMessageBox.information(self,
self.tr('Information'),
self.tr('<p>Oh, there is a new version ({}) avaliable.\n'
'Go to <a href="https://github.com/dpizetta/wavy/releases/latest">'
'download!</a></p>.'.format(tag_version['tag_name'])),
QMessageBox.Ok)
self.ui.labelAbout.setText(self.tr(about + "\nVersion status: " + self.update))
def setSampleRate(self, sample_interval):
"""Sets sample rate."""
self.ui.doubleSpinBoxSampleRate.setValue(1. / sample_interval)
def setSampleInterval(self, sample_rate):
"""Sets sample interval."""
self.ui.doubleSpinBoxSampleInterval.setValue(1. / sample_rate)
def callTools(self):
"""Call converting tool."""
dlg = ConvertWave2Data()
dlg.exec_()
def createFileName(self):
"""Construct a new file name to save the data."""
# Creates auto naming filename
filename = 'new_wavy_data_' + time.strftime("%Y%m%d%H%M%S", time.gmtime())
# Gets the current directory
# base_path = os.path.abspath(".")
self.filepath = os.path.join(self.base_path, filename)
self.setWindowFilePath(self.filepath)
def record(self):
"""Starts acquiring."""
# Create a new filename for the current acquisition
self.createFileName()
# Checks if is saved before start a new recording
if self.isSaved is False:
answer = QMessageBox.question(
self,
self.tr('Question'),
self.tr('Do you want to save your data before start a new record?'),
QMessageBox.Yes | QMessageBox.No)
if answer == QMessageBox.Yes:
self.saveFileAs()
if self.plot_widget_rec.curve is not None:
self.plot_widget_rec.curve.clear()
self.plot_widget_rec.initData()
self.plot_widget_rec.setCurveColor(255, 0, 0)
self.plot_widget_rec.setLabel('top', 'Recording ...')
# Set enabled buttons
self.ui.actionPause.setEnabled(True)
self.ui.actionStop.setEnabled(True)
self.ui.actionRecord.setEnabled(False)
# Set enabled inputs
self.ui.spinBoxWindowTime.setEnabled(False)
self.ui.doubleSpinBoxSampleInterval.setEnabled(False)
self.ui.doubleSpinBoxSampleRate.setEnabled(False)
self.ui.spinBoxStopRecordingAfter.setEnabled(False)
# Set enabled tool bar and menu
self.ui.toolBarFile.setEnabled(False)
self.ui.menuFile.setEnabled(False)
self.ui.menuTools.setEnabled(False)
global_buffer.time_limit = self.ui.spinBoxStopRecordingAfter.value()
global_buffer.startRecording()
self.isSaved = False
def pause(self):
"""Pauses acquiring."""
# TODO: We need to discuss if this is needed
# because the time is not correctly saved
if self.ui.actionPause.isChecked():
# Stopping changing color and label
self.plot_widget_rec.timer.stop()
self.plot_widget_rec.setCurveColor(255, 153, 0)
self.plot_widget_rec.setLabel('top', 'Paused ...')
global_buffer.stopRecording()
else:
# Starting changing color and label
self.plot_widget_rec.timer.start()
self.plot_widget_rec.setCurveColor(255, 0, 0)
self.plot_widget_rec.setLabel('top', 'Recording ...')
global_buffer.startRecording()
# Set enabled tool bar
self.ui.toolBarFile.setEnabled(False)
self.ui.menuFile.setEnabled(False)
self.ui.menuTools.setEnabled(False)
def stop(self):
"""Stops acquiring."""
# Stopping changing color and label
self.plot_widget_rec.timer.stop()
self.plot_widget_rec.setCurveColor(0, 255, 0)
self.plot_widget_rec.setLabel('top', 'Stopped ...')
# Set checked
self.ui.actionRecord.setChecked(False)
self.ui.actionPause.setChecked(False)
# Set enabled buttons
self.ui.actionPause.setEnabled(False)
self.ui.actionStop.setEnabled(False)
self.ui.actionRecord.setEnabled(True)
# Set enabled inputs
self.ui.doubleSpinBoxSampleInterval.setEnabled(True)
self.ui.doubleSpinBoxSampleRate.setEnabled(True)
self.ui.spinBoxWindowTime.setEnabled(True)
self.ui.spinBoxStopRecordingAfter.setEnabled(True)
# Set enabled tool bar
self.ui.toolBarFile.setEnabled(True)
self.ui.menuFile.setEnabled(True)
self.ui.menuTools.setEnabled(True)
self.ui.actionSave_As.setEnabled(True)
self.ui.actionPrint_graph.setEnabled(True)
global_buffer.stopRecording()
def savePNGFile(self, filepath):
"""Saves an image."""
# This extension should not be removed
# Exporter needs the extension to save correctly.
filepath += ".png"
logging.info('File path to save image: %s', filepath)
self.plot_widget_rec.setBackground('w')
exporter = pg.exporters.ImageExporter(self.plot_widget_rec.plotItem)
self.plot_widget_rec.setBackground('k')
exporter.export(filepath)
def saveCSVFile(self, filepath):
"""Saves a data file."""
# This extension should not be removed
# Exporter needs the extension to save correctly.
filepath += ".csv"
logging.info('File path to save data: %s', filepath)
exporter = pg.exporters.CSVExporter(self.plot_widget_rec.plotItem)
exporter.export(filepath)
def getDataFolder(self):
"""Get data folder option."""
answer = QMessageBox.question(self,
self.tr('Question'),
self.tr('It seems the first time you run WavyTool. Do you want to choose '
'a folder to keep exported data?'),
QMessageBox.Yes | QMessageBox.No)
if answer == QMessageBox.Yes:
path = QFileDialog.getExistingDirectory(self,
self.tr('Data folder'),
os.path.expanduser('~'))
if path:
try:
# This string converting is needed because the return is a QString
self.base_path = os.path.splitext(str(path))[0]
with open('wavytool.config', 'w') as outfile:
json.dump({'data_folder': self.base_path}, outfile)
except Exception as e:
QMessageBox.critical(self,
self.tr('Critical'),
self.tr('There was a problem set the default folder to save data:\n '
'{}'.format(str(e))),
QMessageBox.Ok)
else:
logging.info('The default folder is: %s', self.base_path)
QMessageBox.information(self,
self.tr('Information'),
self.tr('Default folder to save data was set up to:\n'
'{}.'.format(self.base_path)),
QMessageBox.Ok)
else:
self.base_path = '.'
def saveImageAs(self):
"""Saves image as."""
path = QFileDialog.getSaveFileName(self,
self.tr('Export recorded image ...'),
os.path.splitext(self.filepath)[0] + '.png',
self.tr("Image File (*.png)"))
if path:
try:
# This string converting is needed because the return is a QString
self.filepath = os.path.splitext(str(path))[0]
self.savePNGFile(self.filepath)
except Exception as e:
QMessageBox.critical(self,
self.tr('Critical'),
self.tr('There was a problem to save image:\n {}'.format(str(e))),
QMessageBox.Ok)
else:
logging.info('The image was saved in the file: %s', self.filepath)
QMessageBox.information(self,
self.tr('Information'),
self.tr('Image was successfully exported.'),
QMessageBox.Ok)
def saveFileAs(self):
"""Saves data file as."""
path = QFileDialog.getSaveFileName(self,
self.tr('Save recorded data ...'),
os.path.splitext(self.filepath)[0] + '.csv',
self.tr("Data File (*.csv)"))
if path:
try:
# This string converting is needed because the return is a QString
self.filepath = os.path.splitext(str(path))[0]
self.saveCSVFile(self.filepath)
except Exception as e:
self.isSaved = False
QMessageBox.critical(self,
self.tr('Critical'),
self.tr('There was a problem to save data\n {}'.format(str(e))),
QMessageBox.Ok)
else:
self.isSaved = True
# self.ui.actionSave_As.setEnabled(False)
logging.info('The data was saved in the file: %s', self.filepath)
QMessageBox.information(self,
self.tr('Information'),
self.tr('Data was successfully saved.\n\nDATA SAVED IS REPRESENTED '
'BY THE WINDOW RECORDING, IF YOU APPLY ZOOM ON IT, JUST '
'DATA VISIBLE WILL BE SAVED!'),
QMessageBox.Ok)
def about(self):
"""Show the dialog about."""
QMessageBox.about(self, self.tr('About'),
self.tr(about))
def closeQuestion(self):
"""Asks about to close."""
if self.isSaved is False:
answer = QMessageBox.question(
self,
self.tr('Question'),
self.tr('Do you want to save your data before exit?'),
QMessageBox.Yes | QMessageBox.No)
if answer == QMessageBox.Yes:
self.saveFileAs()
answer = QMessageBox.question(self,
self.tr('Close'),
self.tr('Do you want to exit?'),
QMessageBox.Yes | QMessageBox.No,
QMessageBox.No)
return answer == QMessageBox.Yes
def closeEvent(self, event):
"""Re implements close event."""
if self.closeQuestion():
self.plot_widget.timer.stop()
self.plot_widget.audio.end_audio()
if self.plot_widget_rec.curve is not None:
self.plot_widget_rec.timer.stop()
event.accept()
else:
event.ignore()
|
|
"""
==========================
Mel To Python Translator
==========================
Convert mel scripts into python scripts.
Known Limitations
=================
array index assignment
----------------------
In mel, you can directly assign the value of any element in an array, and all intermediate elements will be
automatically filled. This is not the case in python: if the list index is out of range an IndexError will be
raised. I've added fixes for several common array assignment conventions:
append new element
~~~~~~~~~~~~~~~~~~
**MEL**
.. python::
string $strArray[];
$strArray[`size $strArray`] = "foo";
Python
>>> strArray = [] #doctest: +SKIP
>>> strArray.append("foo") #doctest: +SKIP
assignment relative to end of array
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
**MEL**
.. python::
strArray[`size $strArray`-3] = "foo";
Python
>>> strArray[-3] = "foo" #doctest: +SKIP
However, since the translator does not track values of variables, it does not know if any given index is out of
range or not. so, the following would raise a 'list assignment index out of range' error when converted to
python and would need to be manually fixed:
.. python::
string $strArray[];
for ($i=0; $i<5; $i++)
$strArray[$i] = "foo"
for(init; condition; update)
----------------------------
the closest equivalent to this in python is something akin to:
>>> for i in range(start, end): #doctest: +SKIP
in order for this type of for loop to be translated into a python for loop it must meet several requirements:
1. the initialization, condition, and update expressions must not be empty.
not translatable:
.. python::
for(; ; $i++) print $i;
2. there can be only one conditional expression.
not translatable:
.. python::
for($i=0; $i<10, $j<20; $i++) print $i;
3. the variable which is being updated and tested in the condition (aka, the iterator) must exist alone on one
side of the conditional expression. this one is easy enough to fix, just do some algebra:
not translatable:
.. python::
for($i=0; ($i-2)<10, $i++) print $i;
translatable:
.. python::
for($i=0; $i<(10+2), $i++) print $i;
4. the iterator can appear only once in the update expression:
not translatable:
.. python::
for($i=0; $i<10; $i++, $i+=2) print $i;
if these conditions are not met, the for loop will be converted into a while loop:
>>> i=0
>>> while 1: #doctest: +SKIP
... if not ( (i - 2)<10 ):
... break
... print i
... i+=1
Inconveniences
==============
Switch Statements
-----------------
Alas, switch statements are not supported by python. the translator will convert them into an if/elif/else statement.
Global Variables
----------------
Global variables are not shared between mel and python. two functions have been added to pymel for this purpose:
`getMelGlobal` and `setMelGlobal`. by default, the translator will convert mel global variables into python global
variables AND intialize them to the value of their corresponding mel global variable using getMelGlobal(). if your
python global variable does not need to be shared with other mel scripts, you can remove the get- and
setMelGlobals lines (for how to filter global variables, see below). however, if it does need to be shared, it is very
important that you manually add setMelGlobal() to update the variable in the mel environment before calling any mel
procedures that will use the global variable.
In order to hone the accuracy of the translation of global variables, you will find two dictionary parameters below --
`global_var_include_regex` and `global_var_exclude_regex` -- which you can use to set a regular expression string
to tell the translator which global variables to share with the mel environment (i.e. which will use the get and set
methods described above) and which to not. for instance, in my case, it is desirable for all of maya's global
variables to be initialized from their mel value but for our in-house variables not to be, since the latter are often
used to pass values within a single script. see below for the actual regular expressions used to accomplish this.
Comments
--------
Rules on where comments may be placed is more strict in python, so expect your comments to be shifted around a bit
after translation.
Formatting
----------
Much of the formatting of your original script will be lost. I apologize for this, but python is much more strict
about formatting than mel, so the conversion is infinitely simpler when the formatting is largely discarded
and reconstructed based on pythonic rules.
Solutions and Caveats
=====================
catch and catchQuiet
--------------------
There is no direct equivalent in python to the catch and catchQuiet command and it does not exist in maya.cmds so i wrote two
python commands of the same name and put them into pymel. these are provided primarily for compatibility with
automatically translated scripts. try/except statements should be used instead of catch or catchQuiet if coding
from scratch.
for( $elem in $list )
---------------------
This variety of for loop has a direct syntactical equivalent in python. the only catch here is that maya.cmds
functions which are supposed to return lists, return None when there are no matches. life would be much simpler
if they returned empty lists instead. the solution currently lies in pymel, where i have begun
correcting all of these command to return proper results. i've started with the obvious ones, but there
are many more that i need to fix. you'll know you hit the problem when you get this error: 'TypeError: iteration
over non-sequence'. just email me with commands that are giving you problems and i'll fix them as
quickly as i can.
"""
from melparse import *
try:
from pymel.util.external.ply.lex import LexError
except ImportError:
from ply.lex import LexError
import pymel.util as util
import pymel.internal as internal
import pymel.internal.factories as _factories
import pymel
import os
log = internal.getLogger(__name__)
"""
This is a dictionary for custom remappings of mel procedures into python functions, classes, etc. If you are like me you probably have a
library of helper mel scripts to make your life a bit easier. you will probably find that python has a built-in equivalent for many of
these.
i've provided a few entries as examples to show you how to implement remappings in mel2py. the first procedure in the dictionary is
'firstElem', which simply returns the first element of a string array, useful when the first element of a command is all you need. as you
can see, the key in the dictionary is the procedure name, and the value is a function which takes two inputs: a list of arguments to the
procedure being remapped, and a ply yacc token object, which you probably will not need to use. the function should return a string
representing the new command. also, note that the list of arguments will all be strings and will already be converted into their python
equivalents. in the case of 'firstElem', it will perform conversions like the following:
firstElem( ls(sl=1) ) --> ls(sl=1)[0]
firstElem( myListVar ) --> myListVar[0]
"""
custom_proc_remap = {
'firstElem' : ( 'string', lambda args, t: '%s[0]' % (args[0]) ),
'firstFloatElem' : ( 'float', lambda args, t: '%s[0]' % (args[0]) ),
'stringArrayAppend' : ( 'string[]', lambda args, t: '%s + %s' % (args[0], args[1]) ),
'stringInArray' : ( 'int', lambda args, t: '%s in %s' % (args[0], args[1]) ),
'stringInStringArray' : ( 'int', lambda args, t: '%s in %s' % (args[0], args[1]) ),
'stringArrayPrefix' : ( 'string[]', lambda args, t: '[ %s + x for x in %s ]' % (args[0], args[1]) ),
'stringArraySuffix' : ( 'string[]', lambda args, t: '[ x + %s for x in %s ]' % (args[0], args[1]) ),
'addPad' : ( 'string', lambda args, t: "'%0" + args[1] + "d' % " + args[0] ),
'getRefFileFromObject' : ( 'string', lambda args, t: '%s.referenceFile()' % (args[0]) )
}
# do not change the following line !!!
proc_remap.update(custom_proc_remap)
def resolvePath( melobj, recurse=False, exclude=(), melPathOnly=False, basePackage='' ):
"""
if passed a directory, get all mel files in the directory
if passed a file, ensure it is a mel file
if passed a procedure name, find its file
Returns tuples of the form (moduleName, melfile).
"""
if basePackage is None:
basePackage = ''
files = []
recursedResults = []
filepath = util.path( melobj )
if filepath.isfile():
if filepath.ext == '.mel':
files = [ filepath.canonicalpath() ]
else:
log.warning( "File is not a mel script: %s" % (filepath) )
files = []
elif filepath.isdir():
files = [ f.canonicalpath() for f in filepath.files( '[a-zA-Z]*.mel') ]
if recurse:
for dir in filepath.dirs():
recursedResults.extend(resolvePath(dir, recurse=recurse,
exclude=exclude, melPathOnly=melPathOnly,
basePackage = basePackage + '.' + pythonizeName(dir.basename())))
#elif not filepath.exists():
else:
# see if it's a procedure that we can derive a path from
try:
info = mel.whatIs( melobj ).split(': ')[-1]
assert info != 'Unknown', "If providing a procedure or a short file name, ensure the appropriate script is sourced"
melfile = util.path( info )
files = [ melfile.canonicalpath() ]
except Exception, msg:
log.warning( "Could not determine mel script from input '%s': %s." % (filepath, msg) )
if exclude:
for i, badFile in enumerate(exclude):
badFile = util.path(badFile).canonicalpath()
if badFile.isdir():
badFile = badFile + os.sep
exclude[i] = badFile
filteredFiles = []
for f in files:
fileGood = True
for badFile in exclude:
if f.samepath(badFile) \
or (badFile.isdir()
and f.startswith(badFile)):
fileGood = False
if fileGood:
filteredFiles.append(f)
files = filteredFiles
if melPathOnly:
files = [x for x in files if fileOnMelPath(x)]
if basePackage and basePackage[-1] != '.':
basePackage = basePackage + '.'
return [ (basePackage + getModuleBasename(x), x) for x in files] + recursedResults
def fileOnMelPath( file ):
"""
Return True if this file is on the mel path.
"""
file = util.path(file)
info = mel.whatIs( file.basename() ).split(': ', 1)
if len(info) < 2:
# If there wasn't a ':' character, the result was probably 'Unknown, or something similar -
# anyway, not what we're looking for
return False
if info[0] not in ('Mel procedure found in', 'Script found in'):
return False
path = util.path(info[1])
return path.samepath(file)
def _updateCurrentModules( newResults ):
currentModules = melparse.batchData.currentModules
for moduleName, melfile in newResults:
if not isinstance(melfile, Path):
melfile = util.path(melfile)
if melfile in currentModules.values():
oldModule = currentModules.get_key(melfile)
if oldModule == moduleName:
continue
if moduleName.count('.') >= oldModule.count('.'):
continue
elif moduleName in currentModules:
raise RuntimeError('two mel files result in same python module name: %s, %s => %s' % (currentModules[moduleName], melfile, moduleName))
currentModules[moduleName] = melfile
def _makePackages():
# Maps from a package (in tuple form) to base directory
packages = {}
for moduleName, melfile in melparse.batchData.currentModules.iteritems():
if moduleName.count('.') < 1:
continue
package = tuple(moduleName.split('.')[:-1])
if melparse.batchData.outputDir:
packages[package] = melparse.batchData.outputDir
else:
assert package == tuple(melfile.splitall()[-(len(package)+1):-1]), \
"package %s did not match melfile %s directory structure" % ('.'.join(package), melfile)
packages[package] = util.path.joinpath( *(melfile.splitall()[:-(len(package)+1)]) )
for packageTuple, baseDir in packages.iteritems():
if not baseDir.isdir():
baseDir.makedirs()
curDir = baseDir
for nextDir in packageTuple:
curDir = curDir / nextDir
if not curDir.isdir():
curDir.mkdir()
initFile = curDir / '__init__.py'
if not initFile.isfile():
initFile.touch()
def _getInputFiles( input, recurse=False, exclude=(), melPathOnly=False, basePackage='' ):
"""
Returns tuples of the form (packageName, melfile)
"""
results = []
if not util.isIterable( input ):
input = [input]
for f in input:
results.extend(resolvePath(f, recurse=recurse, exclude=exclude, melPathOnly=melPathOnly, basePackage=basePackage))
return results
def melInfo( input ):
"""
Get information about procedures in a mel file.
>>> import pymel.tools.mel2py as mel2py
>>> mel2py.melInfo('attributeExists')
(['attributeExists'], {'attributeExists': {'returnType': 'int', 'args': [('string', '$attr'), ('string', '$node')]}}, {})
:Parameters:
input
can be a mel file or a sourced mel procedure
:return:
A 3-element tuple:
1. the list of procedures in the order the are defined
2. a dictionary of global procedures, with the following entries:
- returnType: mel type to be returned
- args: a list of (type, variable_name) pairs
3. a dictionary of local procedures, formatted the same as with globals
"""
# TODO: change this to use _getInputFiles, with an option to prevent recursing directories
res = resolvePath(input)
if len(res) != 1:
raise ValueError, "input must be a mel script or a known procedure from a sourced mel script."
f = res[0]
cbParser = MelScanner()
cbParser.build()
return cbParser.parse( f.bytes() )
def mel2pyStr( data, currentModule=None, pymelNamespace='', forceCompatibility=False, verbosity=0, basePackage=None ):
"""
convert a string representing mel code into a string representing python code
>>> import pymel.tools.mel2py as mel2py
>>> print mel2py.mel2pyStr('paneLayout -e -configuration "top3" test;')
from pymel.all import *
paneLayout('test',configuration="top3",e=1)
<BLANKLINE>
Note that when converting single lines, the lines must end in a semi-colon, otherwise it is technically
invalid syntax.
:Parameters:
data : `str`
string representing coe to convert
currentModule : `str`
the name of the module that the hypothetical code is executing in. In most cases you will
leave it at its default, the __main__ namespace.
pymelNamespace : `str`
the namespace into which pymel will be imported. the default is '', which means ``from pymel.all import *``
forceCompatibility : `bool`
If True, the translator will attempt to use non-standard python types in order to produce
python code which more exactly reproduces the behavior of the original mel file, but which
will produce "uglier" code. Use this option if you wish to produce the most reliable code
without any manual cleanup.
verbosity : `int`
Set to non-zero for a *lot* of feedback
"""
mparser = MelParser()
mparser.build(currentModule, pymelNamespace=pymelNamespace, forceCompatibility=forceCompatibility, verbosity=verbosity)
results = mparser.parse( data )
#print mparser.lexer.global_procs
return results
def mel2py( input, outputDir=None,
pymelNamespace='', forceCompatibility=False,
verbosity=0 , test=False,
recurse=False, exclude=(), melPathOnly=False,
basePackage=None):
"""
Batch convert an entire directory
:Parameters:
input
May be a directory, a list of directories, the name of a mel file, a list of mel files, or the name of a sourced procedure.
If only the name of the mel file is passed, mel2py will attempt to determine the location
of the file using the 'whatIs' mel command, which relies on the script already being sourced by maya.
outputDir : `str`
Directory where resulting python files will be written to
pymelNamespace : `str`
the namespace into which pymel will be imported. the default is '', which means ``from pymel.all import *``
forceCompatibility : `bool`
If True, the translator will attempt to use non-standard python types in order to produce
python code which more exactly reproduces the behavior of the original mel file, but which
will produce "uglier" code. Use this option if you wish to produce the most reliable code
without any manual cleanup.
verbosity : `int`
Set to non-zero for a *lot* of feedback
test : `bool`
After translation, attempt to import the modules to test for errors
recurse : `bool`
If the input is a directory, whether or not to recursively search subdirectories as well.
Subdirectories will be converted into packages, and any mel files within those subdirectories
will be submodules of that package.
exclude : `str`
A comma-separated list of files/directories to exclude from processing, if input is a directory.
melPathOnly : `bool`
If true, will only translate mel files found on the mel script path.
basePackage : `str`
Gives the package that all translated modules will be a part of; if None or an empty string, all
translated modules are assumed to have no base package.
"""
if basePackage is None:
basePackage = ''
melparse.batchData = BatchData()
batchData = melparse.batchData
batchData.basePackage = basePackage
if outputDir is not None:
outputDir = util.path(outputDir)
batchData.outputDir = outputDir
if outputDir and not os.path.exists(outputDir):
os.makedirs(outputDir)
currentFiles = _getInputFiles( input, recurse=recurse, exclude=exclude, melPathOnly=melPathOnly, basePackage=basePackage )
if not currentFiles:
raise ValueError, "Could not find any scripts to operate on. Please pass a directory, a list of directories, the name of a mel file, a list of mel files, or the name of a sourced procedure"
_updateCurrentModules(currentFiles)
_makePackages()
importCnt = 0
succeeded = []
for moduleName, melfile in batchData.currentModules.iteritems():
print melfile, moduleName
if melfile in batchData.scriptPath_to_moduleText:
print "Using pre-converted mel script", melfile
converted = batchData.scriptPath_to_moduleText[melfile]
else:
data = melfile.bytes()
print "Converting mel script", melfile
try:
converted = mel2pyStr( data, moduleName, pymelNamespace=pymelNamespace, verbosity=verbosity )
except MelParseError, e:
if e.file is None:
e.file = melfile
raise
header = """%s from mel file:
# %s
""" % (tag, melfile)
converted = header + converted
splitModule = moduleName.split('.')
if outputDir is None:
currOutDir = melfile.parent
else:
currOutDir = outputDir
if len(splitModule) > 1:
currOutDir = currOutDir.joinpath(*splitModule[:-1])
pyfile = currOutDir.joinpath(splitModule[-1] + '.py')
print "Writing converted python script: %s" % pyfile
pyfile.write_bytes(converted)
succeeded.append( pyfile )
#except (ValueError, IndexError, TypeError, LexError), msg:
# if ignoreErrors:
# print 'failed:', msg
# else:
# raise Exception, msg
#
if test:
for pyfile in succeeded:
print "Testing", pyfile
try:
__import__( pyfile.namebase )
except (SyntaxError, IndentationError), msg:
print 'A syntax error exists in this file that will need to be manually fixed: %s' % msg
except RuntimeError, msg:
print 'This file has code which executed on import and failed: %s' % msg
except ImportError, msg:
print '%s' % msg
except Exception, msg:
print 'This file has code which executed on import and failed: %s' % msg
else:
importCnt += 1
succCnt = len(succeeded)
print "%d total processed for conversion" % len(batchData.currentModules)
print "%d files succeeded" % succCnt
print "%d files failed" % (len(batchData.currentModules)-succCnt)
if test:
print "%d files imported without error" % (importCnt)
succCnt = 0
def findMelOnlyCommands():
"""
Using maya's documentation, find commands which were not ported to python.
"""
docs = util.path( _factories.mayaDocsLocation() )
melCmds = set([ x.namebase for x in ( docs / 'Commands').files('*.html') ])
pyCmds = set([ x.namebase for x in ( docs / 'CommandsPython').files('*.html') ])
result = []
for cmd in sorted(melCmds.difference(pyCmds)):
typ = pymel.mel.whatIs(cmd)
if typ.startswith( 'Script') or typ.startswith( 'Mel' ):
typ = 'Mel'
try:
func = getattr( pymel, cmd)
info = func.__module__
except AttributeError:
if hasattr( builtin_module, cmd):
info = 'builtin'
else:
info = proc_remap.has_key( cmd )
result.append( (cmd, typ, info ) )
return result
if __name__ == '__main__':
import pymel.tools.mel2pyCommand
pymel.tools.mel2pyCommand.main()
|
|
import json
import logging
import webapp2
from google.appengine.ext import ndb
from consts.auth_type import AuthType
from controllers.api.api_base_controller import ApiTrustedBaseController
from datafeeds.parsers.json.json_alliance_selections_parser import JSONAllianceSelectionsParser
from datafeeds.parsers.json.json_awards_parser import JSONAwardsParser
from datafeeds.parsers.json.json_matches_parser import JSONMatchesParser
from datafeeds.parsers.json.json_rankings_parser import JSONRankingsParser
from datafeeds.parsers.json.json_team_list_parser import JSONTeamListParser
from helpers.award_manipulator import AwardManipulator
from helpers.event_manipulator import EventManipulator
from helpers.event_team_manipulator import EventTeamManipulator
from helpers.match_helper import MatchHelper
from helpers.match_manipulator import MatchManipulator
from models.award import Award
from models.event import Event
from models.event_team import EventTeam
from models.match import Match
from models.sitevar import Sitevar
from models.team import Team
class ApiTrustedEventAllianceSelectionsUpdate(ApiTrustedBaseController):
"""
Overwrites an event's alliance_selections_json with new data
"""
REQUIRED_AUTH_TYPES = {AuthType.EVENT_ALLIANCES}
def _process_request(self, request, event_key):
alliance_selections = JSONAllianceSelectionsParser.parse(request.body)
event = Event.get_by_id(event_key)
event.alliance_selections_json = json.dumps(alliance_selections)
EventManipulator.createOrUpdate(event)
self.response.out.write(json.dumps({'Success': "Alliance selections successfully updated"}))
class ApiTrustedEventAwardsUpdate(ApiTrustedBaseController):
"""
Removes all awards for an event and adds the awards given in the request
"""
REQUIRED_AUTH_TYPES = {AuthType.EVENT_AWARDS}
def _process_request(self, request, event_key):
event = Event.get_by_id(event_key)
awards = []
for award in JSONAwardsParser.parse(request.body, event_key):
awards.append(Award(
id=Award.render_key_name(event.key_name, award['award_type_enum']),
name_str=award['name_str'],
award_type_enum=award['award_type_enum'],
year=event.year,
event=event.key,
event_type_enum=event.event_type_enum,
team_list=[ndb.Key(Team, team_key) for team_key in award['team_key_list']],
recipient_json_list=award['recipient_json_list']
))
# it's easier to clear all awards and add new ones than try to find the difference
old_award_keys = Award.query(Award.event == event.key).fetch(None, keys_only=True)
AwardManipulator.delete_keys(old_award_keys)
AwardManipulator.createOrUpdate(awards)
self.response.out.write(json.dumps({'Success': "Awards successfully updated"}))
class ApiTrustedEventMatchesUpdate(ApiTrustedBaseController):
"""
Creates/updates matches
"""
REQUIRED_AUTH_TYPES = {AuthType.EVENT_MATCHES}
def _process_request(self, request, event_key):
event = Event.get_by_id(event_key)
year = int(event_key[:4])
matches = []
needs_time = []
for match in JSONMatchesParser.parse(request.body, year):
match = Match(
id=Match.renderKeyName(
event.key.id(),
match.get("comp_level", None),
match.get("set_number", 0),
match.get("match_number", 0)),
event=event.key,
year=event.year,
set_number=match.get("set_number", 0),
match_number=match.get("match_number", 0),
comp_level=match.get("comp_level", None),
team_key_names=match.get("team_key_names", None),
alliances_json=match.get("alliances_json", None),
score_breakdown_json=match.get("score_breakdown_json", None),
time_string=match.get("time_string", None),
time=match.get("time", None),
)
if (not match.time or match.time == "") and match.time_string:
# We can calculate the real time from the time string
needs_time.append(match)
matches.append(match)
if needs_time:
try:
logging.debug("Calculating time!")
MatchHelper.add_match_times(event, needs_time)
except Exception, e:
logging.error("Failed to calculate match times")
MatchManipulator.createOrUpdate(matches)
self.response.out.write(json.dumps({'Success': "Matches successfully updated"}))
class ApiTrustedEventMatchesDelete(ApiTrustedBaseController):
"""
Deletes given match keys
"""
REQUIRED_AUTH_TYPES = {AuthType.EVENT_MATCHES}
def _process_request(self, request, event_key):
keys_to_delete = set()
try:
match_keys = json.loads(request.body)
except Exception:
self._errors = json.dumps({"Error": "'keys_to_delete' could not be parsed"})
self.abort(400)
for match_key in match_keys:
keys_to_delete.add(ndb.Key(Match, '{}_{}'.format(event_key, match_key)))
MatchManipulator.delete_keys(keys_to_delete)
ret = json.dumps({"keys_deleted": [key.id().split('_')[1] for key in keys_to_delete]})
self.response.out.write(ret)
class ApiTrustedEventMatchesDeleteAll(ApiTrustedBaseController):
"""
Deletes all matches
"""
REQUIRED_AUTH_TYPES = {AuthType.EVENT_MATCHES}
def _process_request(self, request, event_key):
if request.body != event_key:
self._errors = json.dumps({"Error": "To delete all matches for this event, the body of the request must be the event key."})
self.abort(400)
keys_to_delete = Match.query(Match.event == ndb.Key(Event, event_key)).fetch(keys_only=True)
MatchManipulator.delete_keys(keys_to_delete)
self.response.out.write(json.dumps({'Success': "All matches for {} deleted".format(event_key)}))
class ApiTrustedEventRankingsUpdate(ApiTrustedBaseController):
"""
Overwrites an event's rankings_json with new data
"""
REQUIRED_AUTH_TYPES = {AuthType.EVENT_RANKINGS}
def _process_request(self, request, event_key):
rankings = JSONRankingsParser.parse(request.body)
event = Event.get_by_id(event_key)
event.rankings_json = json.dumps(rankings)
EventManipulator.createOrUpdate(event)
self.response.out.write(json.dumps({'Success': "Rankings successfully updated"}))
class ApiTrustedEventTeamListUpdate(ApiTrustedBaseController):
"""
Creates/updates EventTeams for teams given in the request
and removes EventTeams for teams not in the request
"""
REQUIRED_AUTH_TYPES = {AuthType.EVENT_TEAMS}
def _process_request(self, request, event_key):
team_keys = JSONTeamListParser.parse(request.body)
event = Event.get_by_id(event_key)
event_teams = []
for team_key in team_keys:
if Team.get_by_id(team_key): # Don't create EventTeams for teams that don't exist
event_teams.append(EventTeam(id=event.key.id() + '_{}'.format(team_key),
event=event.key,
team=ndb.Key(Team, team_key),
year=event.year))
# delete old eventteams
old_eventteam_keys = EventTeam.query(EventTeam.event == event.key).fetch(None, keys_only=True)
to_delete = set(old_eventteam_keys).difference(set([et.key for et in event_teams]))
EventTeamManipulator.delete_keys(to_delete)
EventTeamManipulator.createOrUpdate(event_teams)
self.response.out.write(json.dumps({'Success': "Event teams successfully updated"}))
class ApiTrustedAddMatchYoutubeVideo(ApiTrustedBaseController):
"""
Adds YouTube videos to matches.
"""
REQUIRED_AUTH_TYPES = {AuthType.MATCH_VIDEO}
def _process_request(self, request, event_key):
try:
match_videos = json.loads(request.body)
except Exception:
self._errors = json.dumps({"Error": "Invalid JSON. Please check input."})
self.abort(400)
matches_to_put = []
for partial_match_key, youtube_id in match_videos.items():
match_key = '{}_{}'.format(event_key, partial_match_key)
match = Match.get_by_id(match_key)
if match is None:
self._errors = json.dumps({"Error": "Match {} does not exist!".format(match_key)})
self.abort(400)
if youtube_id not in match.youtube_videos:
match.youtube_videos.append(youtube_id)
matches_to_put.append(match)
MatchManipulator.createOrUpdate(matches_to_put)
self.response.out.write(json.dumps({'Success': "Match videos successfully updated"}))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.